Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add capi for fluid inference api #20092

Merged
merged 24 commits into from
Oct 5, 2019
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
cbd2aa3
add capi for fluid inference api, including AnalysisConfig, AnalysisP…
FrostML Sep 25, 2019
e8b8f3c
undo unexpected change caused by pre-commit
FrostML Sep 25, 2019
e2d23d0
undo unexpected changes caused by pre-commit
FrostML Sep 25, 2019
a0aeed4
add and fill CMakeLists.txt file
FrostML Sep 25, 2019
7accc6a
add subdirectory
FrostML Sep 25, 2019
af68090
fix some problem according to offline meeting
FrostML Sep 25, 2019
48768e8
delete some redundance
FrostML Sep 29, 2019
8b0a45d
delete some unexpected changes during programing
FrostML Sep 29, 2019
c4951c5
delete some useless comment and trigger ci, test=develop
FrostML Sep 29, 2019
15b8985
correct some namespace errors in c_api_internal.h
FrostML Sep 29, 2019
dcfdd63
correct the errors because of NHZIX's change about api EnableMemoryOptim
FrostML Sep 29, 2019
a6e669b
Merge branch 'develop' into infer_c_api update PaddlePaddle
FrostML Sep 29, 2019
89e1072
delete gtest include
FrostML Sep 29, 2019
c264b87
improve the coverages of unit tests
FrostML Sep 30, 2019
b9dad8f
correct Cmakelists file
FrostML Sep 30, 2019
a2f5e3b
correct Cmakelists file
FrostML Sep 30, 2019
82e2b11
trigger ci, test=develop
FrostML Sep 30, 2019
8fd9ea7
trigger ci, test=develop
FrostML Sep 30, 2019
8342483
trigger ci, test=develop
FrostML Sep 30, 2019
31da862
add more unit tests for ci converages, test=develop
FrostML Sep 30, 2019
2203a7f
add numeric for pd_predictor.cc, test=develop
FrostML Oct 1, 2019
bfa8064
deal with PADDLE_ENFORCE, test=develop
FrostML Oct 1, 2019
fc961c6
add the PADDLE_ENFORCE_NOT_NULL(config) for config in case the input …
FrostML Oct 1, 2019
6e6f3e6
for ci, test=develop
FrostML Oct 1, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/fluid/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ else(WIN32)
endif(WIN32)

add_subdirectory(api)
add_subdirectory(capi)

if(WITH_MKLDNN)
set(mkldnn_quantizer_src ${CMAKE_CURRENT_SOURCE_DIR}/api/mkldnn_quantizer.cc)
Expand Down
9 changes: 9 additions & 0 deletions paddle/fluid/inference/capi/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@

cc_library(pd_config SRCS pd_config.cc)
cc_library(pd_predictor SRCS pd_predictor.cc)
cc_library(pd_tensor SRCS pd_tensor.cc)
cc_library(pd_c_api SRCS c_api.cc)

cc_library(paddle_fluid_c SRCS c_api.cc DEPS paddle_fluid pd_config pd_predictor pd_tensor pd_c_api)
cc_library(paddle_fluid_c_shared SHARED SRCS c_api.cc DEPS paddle_fluid pd_config pd_predictor pd_tensor pd_c_api)
set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c)
97 changes: 97 additions & 0 deletions paddle/fluid/inference/capi/c_api.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/capi/c_api.h"
#include <algorithm>
#include <vector>
#include "paddle/fluid/inference/capi/c_api_internal.h"

using paddle::ConvertToPaddleDType;
using paddle::ConvertToPDDataType;
using paddle::ConvertToACPrecision;

extern "C" {

PD_PaddleBuf* PD_NewPaddleBuf() { return new PD_PaddleBuf; }

void PD_DeletePaddleBuf(PD_PaddleBuf* buf) {
if (buf) {
delete buf;
buf = nullptr;
}
}

void PD_PaddleBufResize(PD_PaddleBuf* buf, size_t length) {
buf->buf.Resize(length);
}

void PD_PaddleBufReset(PD_PaddleBuf* buf, void* data, size_t length) {
buf->buf.Reset(data, length);
}

bool PD_PaddleBufEmpty(PD_PaddleBuf* buf) { return buf->buf.empty(); }

void* PD_PaddleBufData(PD_PaddleBuf* buf) { return buf->buf.data(); }

size_t PD_PaddleBufLength(PD_PaddleBuf* buf) { return buf->buf.length(); }

} // extern "C"

namespace paddle {
paddle::PaddleDType ConvertToPaddleDType(PD_DataType dtype) {
switch (dtype) {
case PD_FLOAT32:
return PD_PaddleDType::FLOAT32;
case PD_INT32:
return PD_PaddleDType::INT32;
case PD_INT64:
return PD_PaddleDType::INT64;
case PD_UINT8:
return PD_PaddleDType::UINT8;
default:
CHECK(false) << "Unsupport dtype.";
return PD_PaddleDType::FLOAT32;
}
}

PD_DataType ConvertToPDDataType(PD_PaddleDType dtype) {
switch (dtype) {
case PD_PaddleDType::FLOAT32:
return PD_DataType::PD_FLOAT32;
case PD_PaddleDType::INT32:
return PD_DataType::PD_INT32;
case PD_PaddleDType::INT64:
return PD_DataType::PD_INT64;
case PD_PaddleDType::UINT8:
return PD_DataType::PD_UINT8;
default:
CHECK(false) << "Unsupport dtype.";
return PD_DataType::PD_UNKDTYPE;
}
}

PD_ACPrecision ConvertToACPrecision(Precision dtype) {
switch (dtype) {
case Precision::kFloat32:
return PD_ACPrecision::kFloat32;
case Precision::kInt8:
return PD_ACPrecision::kInt8;
case Precision::kHalf:
return PD_ACPrecision::kHalf;
default:
CHECK(false) << "Unsupport precision.";
return PD_ACPrecision::kFloat32;
}
}
} // namespace paddle
255 changes: 255 additions & 0 deletions paddle/fluid/inference/capi/c_api.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,255 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <stddef.h>
#include <stdint.h>
#include <stdio.h>

#if defined(_WIN32)
#ifdef PADDLE_ON_INFERENCE
#define PADDLE_CAPI_EXPORT __declspec(dllexport)
#else
#define PADDLE_CAPI_EXPORT __declspec(dllimport)
#endif // PADDLE_ON_INFERENCE
#else
#define PADDLE_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32

#ifdef __cplusplus
extern "C" {
#endif

enum PD_DataType { PD_FLOAT32, PD_INT32, PD_INT64, PD_UINT8, PD_UNKDTYPE };
typedef struct PD_PaddleBuf PD_PaddleBuf;
typedef struct PD_AnalysisConfig PD_AnalysisConfig;

typedef struct PD_ZeroCopyData {
char* name = new char[50];
void* data;
PD_DataType dtype;
int* shape;
int shape_size;
} PD_ZeroCopyData;
typedef struct InTensorShape {
char* name;
int* tensor_shape;
int shape_size;
} InTensorShape;

PADDLE_CAPI_EXPORT extern PD_PaddleBuf* PD_NewPaddleBuf();

PADDLE_CAPI_EXPORT extern void PD_DeletePaddleBuf(PD_PaddleBuf* buf);

PADDLE_CAPI_EXPORT extern void PD_PaddleBufResize(PD_PaddleBuf* buf,
size_t length);

PADDLE_CAPI_EXPORT extern void PD_PaddleBufReset(PD_PaddleBuf* buf, void* data,
size_t length);

PADDLE_CAPI_EXPORT extern bool PD_PaddleBufEmpty(PD_PaddleBuf* buf);

PADDLE_CAPI_EXPORT extern void* PD_PaddleBufData(PD_PaddleBuf* buf);

PADDLE_CAPI_EXPORT extern size_t PD_PaddleBufLength(PD_PaddleBuf* buf);

// PaddleTensor
typedef struct PD_Tensor PD_Tensor;

PADDLE_CAPI_EXPORT extern PD_Tensor* PD_NewPaddleTensor();

PADDLE_CAPI_EXPORT extern void PD_DeletePaddleTensor(PD_Tensor* tensor);

PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorName(PD_Tensor* tensor,
char* name);

PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorDType(PD_Tensor* tensor,
PD_DataType dtype);

PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorData(PD_Tensor* tensor,
PD_PaddleBuf* buf);

PADDLE_CAPI_EXPORT extern void PD_SetPaddleTensorShape(PD_Tensor* tensor,
int* shape, int size);

PADDLE_CAPI_EXPORT extern const char* PD_GetPaddleTensorName(
const PD_Tensor* tensor);

PADDLE_CAPI_EXPORT extern PD_DataType PD_GetPaddleTensorDType(
const PD_Tensor* tensor);

PADDLE_CAPI_EXPORT extern PD_PaddleBuf* PD_GetPaddleTensorData(
const PD_Tensor* tensor);

PADDLE_CAPI_EXPORT extern int* PD_GetPaddleTensorShape(const PD_Tensor* tensor,
int** size);

// AnalysisPredictor
PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config,
PD_Tensor* inputs, int in_size,
PD_Tensor* output_data,
int** out_size, int batch_size);

PADDLE_CAPI_EXPORT extern bool PD_PredictorZeroCopyRun(
const PD_AnalysisConfig* config, PD_ZeroCopyData* inputs, int in_size,
PD_ZeroCopyData* output, int** out_size);

// AnalysisConfig
enum Precision { kFloat32 = 0, kInt8, kHalf };

PADDLE_CAPI_EXPORT extern PD_AnalysisConfig* PD_NewAnalysisConfig();

PADDLE_CAPI_EXPORT extern void PD_DeleteAnalysisConfig(
PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SetModel(PD_AnalysisConfig* config,
const char* model_dir,
const char* params_path = NULL);

PADDLE_CAPI_EXPORT
extern void PD_SetProgFile(PD_AnalysisConfig* config, const char* x);

PADDLE_CAPI_EXPORT extern void PD_SetParamsFile(PD_AnalysisConfig* config,
const char* x);

PADDLE_CAPI_EXPORT extern void PD_SetOptimCacheDir(PD_AnalysisConfig* config,
const char* opt_cache_dir);

PADDLE_CAPI_EXPORT extern const char* PD_ModelDir(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern const char* PD_ProgFile(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern const char* PD_ParamsFile(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_EnableUseGpu(
PD_AnalysisConfig* config, uint64_t memory_pool_init_size_mb,
int device_id = 0);

PADDLE_CAPI_EXPORT extern void PD_DisableGpu(PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern bool PD_UseGpu(const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern int PD_GpuDeviceId(const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern int PD_MemoryPoolInitSizeMb(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern float PD_FractionOfGpuMemoryForPool(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_EnableCUDNN(PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern bool PD_CudnnEnabled(const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SwitchIrOptim(PD_AnalysisConfig* config,
bool x = true);

PADDLE_CAPI_EXPORT extern bool PD_IrOptim(const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SwitchUseFeedFetchOps(
PD_AnalysisConfig* config, bool x = true);

PADDLE_CAPI_EXPORT extern bool PD_UseFeedFetchOpsEnabled(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SwitchSpecifyInputNames(
PD_AnalysisConfig* config, bool x = true);

PADDLE_CAPI_EXPORT extern bool PD_SpecifyInputName(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_EnableTensorRtEngine(
PD_AnalysisConfig* config, int workspace_size = 1 << 20,
int max_batch_size = 1, int min_subgraph_size = 3,
Precision precision = Precision::kFloat32, bool use_static = false,
bool use_calib_mode = false);

PADDLE_CAPI_EXPORT extern bool PD_TensorrtEngineEnabled(
const PD_AnalysisConfig* config);

typedef struct PD_MaxInputShape {
char* name;
int* shape;
int shape_size;
} PD_MaxInputShape;

PADDLE_CAPI_EXPORT extern void PD_EnableAnakinEngine(
PD_AnalysisConfig* config, int max_batch_size = 1,
PD_MaxInputShape* max_input_shape = NULL, int max_input_shape_size = 0,
int min_subgraph_size = 6, Precision precision = Precision::kFloat32,
bool auto_config_layout = false, char** passes_filter = NULL,
int passes_filter_size = 0, char** ops_filter = NULL,
int ops_filter_size = 0);

PADDLE_CAPI_EXPORT extern bool PD_AnakinEngineEnabled(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SwitchIrDebug(PD_AnalysisConfig* config,
bool x = true);

PADDLE_CAPI_EXPORT extern void PD_EnableNgraph(PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern bool PD_NgraphEnabled(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_EnableMKLDNN(PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SetMkldnnCacheCapacity(
PD_AnalysisConfig* config, int capacity);

PADDLE_CAPI_EXPORT extern bool PD_MkldnnEnabled(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SetCpuMathLibraryNumThreads(
PD_AnalysisConfig* config, int cpu_math_library_num_threads);

PADDLE_CAPI_EXPORT extern int PD_CpuMathLibraryNumThreads(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_EnableMkldnnQuantizer(
PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern bool PD_MkldnnQuantizerEnabled(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SetModelBuffer(PD_AnalysisConfig* config,
const char* prog_buffer,
size_t prog_buffer_size,
const char* params_buffer,
size_t params_buffer_size);

PADDLE_CAPI_EXPORT extern bool PD_ModelFromMemory(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_EnableMemoryOptim(PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern bool PD_MemoryOptimEnabled(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_EnableProfile(PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern bool PD_ProfileEnabled(
const PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern void PD_SetInValid(PD_AnalysisConfig* config);

PADDLE_CAPI_EXPORT extern bool PD_IsValid(const PD_AnalysisConfig* config);

#ifdef __cplusplus
} // extern "C"
#endif
Loading