Skip to content

Commit

Permalink
Fix inference c api PD_GetZeroCopyOutput lod (PaddlePaddle#22768)
Browse files Browse the repository at this point in the history
* fix inference c api lod, test=develop

* fix capi lod problem and enrich tests, test=develop

* delete useless header files and alter const_cast, test=develop
  • Loading branch information
FrostML authored Mar 2, 2020
1 parent 7578fcb commit 324f2b3
Show file tree
Hide file tree
Showing 6 changed files with 135 additions and 17 deletions.
23 changes: 13 additions & 10 deletions paddle/fluid/inference/capi/pd_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,8 @@ void PD_SetZeroCopyInput(PD_Predictor* predictor,

if (tensor->lod.length) {
auto* lod_ptr = reinterpret_cast<size_t*>(tensor->lod.data);
std::vector<size_t> lod(lod_ptr, lod_ptr + tensor->lod.length);
std::vector<size_t> lod;
lod.assign(lod_ptr, lod_ptr + tensor->lod.length / sizeof(size_t));
input->SetLoD({std::move(lod)});
}
}
Expand Down Expand Up @@ -266,17 +267,19 @@ void PD_GetZeroCopyOutput(PD_Predictor* predictor, PD_ZeroCopyTensor* tensor) {
tensor->data.length = length;

auto lod = output->lod();
tensor->lod.length = lod.front().size() * sizeof(size_t);
if (tensor->lod.capacity < lod.front().size()) {
if (tensor->lod.data) {
std::free(tensor->lod.data);
}
if (!lod.empty()) {
tensor->lod.length = lod.front().size() * sizeof(size_t);
if (tensor->lod.capacity < lod.front().size()) {
if (tensor->lod.data) {
std::free(tensor->lod.data);
}

tensor->lod.data = std::malloc(lod.front().size() * sizeof(size_t));
tensor->lod.capacity = lod.front().size() * sizeof(size_t);
tensor->lod.data = std::malloc(lod.front().size() * sizeof(size_t));
tensor->lod.capacity = lod.front().size() * sizeof(size_t);
}
std::copy(lod.front().begin(), lod.front().end(),
reinterpret_cast<size_t*>(tensor->lod.data));
}
std::copy(lod.front().begin(), lod.front().end(),
reinterpret_cast<size_t*>(tensor->lod.data));
switch (tensor->dtype) {
case PD_FLOAT32:
output->copy_to_cpu(reinterpret_cast<float*>(tensor->data.data));
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/inference/tests/api/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -399,3 +399,7 @@ if(WITH_MKLDNN)
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c
ARGS --infer_model=${INT8_DATA_DIR}/resnet50/model)
endif()

inference_analysis_test(test_analyzer_capi_ner SRCS analyzer_capi_ner_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c
ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model)
2 changes: 0 additions & 2 deletions paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ limitations under the License. */
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include "paddle/fluid/inference/capi/paddle_c_api.h"
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/inference/tests/api/analyzer_capi_int_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ limitations under the License. */
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include "paddle/fluid/inference/capi/paddle_c_api.h"
Expand Down
117 changes: 117 additions & 0 deletions paddle/fluid/inference/tests/api/analyzer_capi_ner_tester.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "paddle/fluid/inference/capi/paddle_c_api.h"
#include "paddle/fluid/inference/tests/api/tester_helper.h"

namespace paddle {
namespace inference {
namespace analysis {

void SetConfig(PD_AnalysisConfig *config) {
auto model_dir = FLAGS_infer_model;
PD_SetModel(config, (model_dir + "/__model__").c_str(),
(model_dir + "/param").c_str());
PD_SwitchUseFeedFetchOps(config, false);
PD_SwitchSpecifyInputNames(config, true);
PD_DisableGpu(config);
}

TEST(PD_ZeroCopyRun, zero_copy_run) {
PD_AnalysisConfig *config = PD_NewAnalysisConfig();
SetConfig(config);
PD_Predictor *predictor = PD_NewPredictor(config);

int input_num = PD_GetInputNum(predictor);
printf("Input num: %d\n", input_num);
int output_num = PD_GetOutputNum(predictor);
printf("Output num: %d\n", output_num);

PD_ZeroCopyTensor inputs[2];

// inputs[0]: word
PD_InitZeroCopyTensor(&inputs[0]);
inputs[0].name = new char[5];
snprintf(inputs[0].name, strlen(PD_GetInputName(predictor, 0)) + 1, "%s",
PD_GetInputName(predictor, 0));

inputs[0].data.capacity = sizeof(int64_t) * 11 * 1;
inputs[0].data.length = inputs[0].data.capacity;
inputs[0].data.data = malloc(inputs[0].data.capacity);
std::vector<int64_t> ref_word(
{12673, 9763, 905, 284, 45, 7474, 20, 17, 1, 4, 9});
inputs[0].data.data = reinterpret_cast<void *>(ref_word.data());

int shape0[] = {11, 1};
inputs[0].shape.data = reinterpret_cast<void *>(shape0);
inputs[0].shape.capacity = sizeof(shape0);
inputs[0].shape.length = sizeof(shape0);
inputs[0].dtype = PD_INT64;

size_t lod0[] = {0, 11};
inputs[0].lod.data = reinterpret_cast<void *>(lod0);
inputs[0].lod.capacity = sizeof(size_t) * 2;
inputs[0].lod.length = sizeof(size_t) * 2;

PD_SetZeroCopyInput(predictor, &inputs[0]);

// inputs[1]: mention
PD_InitZeroCopyTensor(&inputs[1]);
inputs[1].name = new char[8];
snprintf(inputs[1].name, strlen(PD_GetInputName(predictor, 1)) + 1, "%s",
PD_GetInputName(predictor, 1));

inputs[1].data.capacity = sizeof(int64_t) * 11 * 1;
inputs[1].data.length = inputs[1].data.capacity;
inputs[1].data.data = malloc(inputs[1].data.capacity);
std::vector<int64_t> ref_mention({27, 0, 0, 33, 34, 33, 0, 0, 0, 1, 2});
inputs[1].data.data = reinterpret_cast<void *>(ref_mention.data());

int shape1[] = {11, 1};
inputs[1].shape.data = reinterpret_cast<void *>(shape1);
inputs[1].shape.capacity = sizeof(shape1);
inputs[1].shape.length = sizeof(shape1);
inputs[1].dtype = PD_INT64;

size_t lod1[] = {0, 11};
inputs[1].lod.data = reinterpret_cast<void *>(lod1);
inputs[1].lod.capacity = sizeof(size_t) * 2;
inputs[1].lod.length = sizeof(size_t) * 2;

PD_SetZeroCopyInput(predictor, &inputs[1]);

PD_ZeroCopyRun(predictor);
PD_ZeroCopyTensor output;
PD_InitZeroCopyTensor(&output);
output.name = new char[21];
snprintf(output.name, strlen(PD_GetOutputName(predictor, 0)) + 1, "%s",
PD_GetOutputName(predictor, 0));

// not necessary, just for converage tests
output.lod.data = std::malloc(sizeof(size_t));

PD_GetZeroCopyOutput(predictor, &output);
PD_DestroyZeroCopyTensor(&output);
PD_DeleteAnalysisConfig(config);
PD_DeletePredictor(predictor);
}

} // namespace analysis
} // namespace inference
} // namespace paddle
4 changes: 1 addition & 3 deletions paddle/fluid/inference/tests/api/analyzer_capi_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ limitations under the License. */
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include "paddle/fluid/inference/capi/paddle_c_api.h"
Expand Down Expand Up @@ -71,7 +69,7 @@ void zero_copy_run() {
delete[] outputs;
}

TEST(PD_ZeroCopyRun, zero_copy_run) { zero_copy_run(); }
TEST(PD_PredictorZeroCopyRun, zero_copy_run) { zero_copy_run(); }

#ifdef PADDLE_WITH_MKLDNN
TEST(PD_AnalysisConfig, profile_mkldnn) {
Expand Down

0 comments on commit 324f2b3

Please sign in to comment.