Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[docs] fixed prebuilt libs typos and update readme #24

Merged
merged 5 commits into from
Aug 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,13 @@ if(ENABLE_PADDLE_BACKEND)
add_definitions(-DENABLE_PADDLE_BACKEND)
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_PADDLE_SRCS})
include(external/paddle_inference.cmake)
list(APPEND DEPEND_LIBS external_paddle_inference external_dnnl external_omp)
if(NOT APPLE)
list(APPEND DEPEND_LIBS external_paddle_inference external_dnnl external_omp)
else()
# no third parties libs(mkldnn and omp) need to
# link into paddle_inference on MacOS OSX.
list(APPEND DEPEND_LIBS external_paddle_inference)
endif()
endif()

if(WITH_GPU)
Expand Down
9 changes: 6 additions & 3 deletions FastDeploy.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,16 @@ if(ENABLE_PADDLE_BACKEND)
set(DNNL_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mkldnn/lib/mkldnn.lib")
set(IOMP_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mklml/lib/libiomp5md.lib")
elseif(APPLE)
set(DNNL_LIB "")
set(IOMP_LIB "")
message(STATUS "No third parties libs(mkldnn and omp) need to link into paddle_inference on MacOS OSX.")
else()
set(DNNL_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mkldnn/lib/libmkldnn.so.0")
set(IOMP_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mklml/lib/libiomp5.so")
endif()
list(APPEND FASTDEPLOY_LIBS ${PADDLE_LIB} ${DNNL_LIB} ${IOMP_LIB})
if(NOT APPLE)
list(APPEND FASTDEPLOY_LIBS ${PADDLE_LIB} ${DNNL_LIB} ${IOMP_LIB})
else()
list(APPEND FASTDEPLOY_LIBS ${PADDLE_LIB})
endif()
endif()

if(WITH_GPU)
Expand Down
170 changes: 85 additions & 85 deletions README.md

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions csrc/fastdeploy/backends/common/multiclass_nms.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,11 +162,11 @@ void MultiClassNMS::Compute(const float* boxes_data, const float* scores_data,
int64_t out_dim = box_dim + 2;

int num_nmsed_out = 0;
FDASSERT(score_size == 3, "Require rank of input scores be 3, but now it's " +
std::to_string(score_size) + ".");
FDASSERT(score_size == 3,
"Require rank of input scores be 3, but now it's %d.", score_size);
FDASSERT(boxes_dim[2] == 4,
"Require the 3-dimension of input boxes be 4, but now it's " +
std::to_string(boxes_dim[2]) + ".");
"Require the 3-dimension of input boxes be 4, but now it's %lld.",
box_dim);
out_num_rois_data.resize(batch_size);

std::vector<std::map<int, std::vector<int>>> all_indices;
Expand Down
8 changes: 4 additions & 4 deletions csrc/fastdeploy/backends/ort/ops/multiclass_nms.cc
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,11 @@ void MultiClassNmsKernel::Compute(OrtKernelContext* context) {
int64_t out_dim = box_dim + 2;

int num_nmsed_out = 0;
FDASSERT(score_size == 3, "Require rank of input scores be 3, but now it's " +
std::to_string(score_size) + ".");
FDASSERT(score_size == 3,
"Require rank of input scores be 3, but now it's %d.", score_size);
FDASSERT(boxes_dim[2] == 4,
"Require the 3-dimension of input boxes be 4, but now it's " +
std::to_string(boxes_dim[2]) + ".");
"Require the 3-dimension of input boxes be 4, but now it's %lld.",
box_dim);
std::vector<int64_t> out_num_rois_dims = {batch_size};
OrtValue* out_num_rois = ort_.KernelContext_GetOutput(
context, 2, out_num_rois_dims.data(), out_num_rois_dims.size());
Expand Down
17 changes: 9 additions & 8 deletions csrc/fastdeploy/backends/ort/ort_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,10 @@ void OrtBackend::CopyToCpu(const Ort::Value& value, FDTensor* tensor) {
numel * sizeof(double));
tensor->dtype = FDDataType::FP64;
} else {
FDASSERT(false, "Unrecognized data type of " + std::to_string(data_type) +
" while calling OrtBackend::CopyToCpu().");
FDASSERT(
false,
"Unrecognized data type of %d while calling OrtBackend::CopyToCpu().",
data_type);
}
}

Expand Down Expand Up @@ -237,9 +239,9 @@ bool OrtBackend::Infer(std::vector<FDTensor>& inputs,
}

TensorInfo OrtBackend::GetInputInfo(int index) {
FDASSERT(index < NumInputs(), "The index:" + std::to_string(index) +
" should less than the number of inputs:" +
std::to_string(NumInputs()) + ".");
FDASSERT(index < NumInputs(),
"The index: %d should less than the number of inputs: %d.", index,
NumInputs());
TensorInfo info;
info.name = inputs_desc_[index].name;
info.shape.assign(inputs_desc_[index].shape.begin(),
Expand All @@ -250,9 +252,8 @@ TensorInfo OrtBackend::GetInputInfo(int index) {

TensorInfo OrtBackend::GetOutputInfo(int index) {
FDASSERT(index < NumOutputs(),
"The index:" + std::to_string(index) +
" should less than the number of outputs:" +
std::to_string(NumOutputs()) + ".");
"The index: %d should less than the number of outputs: %d.", index,
NumOutputs());
TensorInfo info;
info.name = outputs_desc_[index].name;
info.shape.assign(outputs_desc_[index].shape.begin(),
Expand Down
2 changes: 1 addition & 1 deletion csrc/fastdeploy/core/fd_type.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ int FDDataTypeSize(const FDDataType& data_type) {
} else if (data_type == FDDataType::UINT8) {
return sizeof(uint8_t);
} else {
FDASSERT(false, "Unexpected data type: " + Str(data_type));
FDASSERT(false, "Unexpected data type: %s", Str(data_type).c_str());
}
return -1;
}
Expand Down
4 changes: 2 additions & 2 deletions csrc/fastdeploy/fastdeploy_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,12 @@ bool FastDeployModel::InitRuntime() {
<< Str(runtime_option.backend) << "." << std::endl;
if (use_gpu) {
FDASSERT(valid_gpu_backends.size() > 0,
"There's no valid gpu backend for " + ModelName() + ".");
"There's no valid gpu backend for %s.", ModelName().c_str());
FDWARNING << "FastDeploy will choose " << Str(valid_gpu_backends[0])
<< " for model inference." << std::endl;
} else {
FDASSERT(valid_gpu_backends.size() > 0,
"There's no valid cpu backend for " + ModelName() + ".");
"There's no valid cpu backend for %s.", ModelName().c_str());
FDWARNING << "FastDeploy will choose " << Str(valid_cpu_backends[0])
<< " for model inference." << std::endl;
}
Expand Down
11 changes: 6 additions & 5 deletions csrc/fastdeploy/function/reduce.cc
Original file line number Diff line number Diff line change
Expand Up @@ -310,15 +310,16 @@ void ArgMinMax(const FDTensor& x, FDTensor* out, int64_t axis,
const auto& x_dims = x.shape;
int64_t x_rank = x_dims.size();
FDASSERT(axis >= -x_rank,
"'axis'(%d) must be greater than or equal to -Rank(X)(%d).", axis,
-x_rank);
"'axis'(%lld) must be greater than or equal to -Rank(X)(%lld).",
axis, -x_rank);
FDASSERT(axis < x_rank,
"'axis'(%d) must be less than or equal to Rank(X)(%d).", axis,
"'axis'(%lld) must be less than or equal to Rank(X)(%lld).", axis,
x_rank);
FDASSERT(output_dtype == FDDataType::INT32 || FDDataType::INT64,
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s].",
Str(FDDataType::INT32), Str(FDDataType::INT64), Str(output_dtype));
Str(FDDataType::INT32).c_str(), Str(FDDataType::INT64).c_str(),
Str(output_dtype).c_str());
if (axis < 0) axis += x_rank;
if (output_dtype == FDDataType::INT32) {
int64_t all_element_num = 0;
Expand All @@ -330,7 +331,7 @@ void ArgMinMax(const FDTensor& x, FDTensor* out, int64_t axis,
}
FDASSERT(all_element_num <= std::numeric_limits<int>::max(),
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"%lld, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'.",
all_element_num, std::numeric_limits<int>::max());
}
Expand Down
2 changes: 1 addition & 1 deletion csrc/fastdeploy/function/softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ void Softmax(const FDTensor& x, FDTensor* out, int axis) {
FDASSERT(
std::abs(axis) < x.shape.size(),
"The absolute given axis should be smaller than the input's "
"dimension. Expected absolute axis is smaller than %d, but receive %d.",
"dimension. Expected absolute axis is smaller than %lu, but receive %d.",
x.shape.size(), std::abs(axis));
FD_VISIT_FLOAT_TYPES(x.dtype, "SoftmaxKernel",
([&] { SoftmaxKernel<data_t>(x, out, axis); }));
Expand Down
4 changes: 2 additions & 2 deletions csrc/fastdeploy/function/transpose.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,12 @@ void Transpose(const FDTensor& x, FDTensor* out,
size_t dims_size = dims.size();
FDASSERT(dims_size == x.shape.size(),
"The input tensor's dimension should be equal to the dims's size. "
"Expect dims size is %d, but receive %d.",
"Expect dims size is %lu, but receive %lu.",
x.shape.size(), dims_size);
std::vector<int> count(dims_size, 0);
for (size_t i = 0; i < dims_size; i++) {
FDASSERT(dims[i] >= 0,
"The dims should be greater than or equal to 0, but receive %d.",
"The dims should be greater than or equal to 0, but receive %lld.",
dims[i]);
FDASSERT(dims[i] < static_cast<int>(dims_size) && ++count[dims[i]] == 1,
"Each element of Attribute axis should be a unique value range "
Expand Down
7 changes: 4 additions & 3 deletions csrc/fastdeploy/pybind/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ pybind11::dtype FDDataTypeToNumpyDataType(const FDDataType& fd_dtype) {
} else if (fd_dtype == FDDataType::UINT8) {
dt = pybind11::dtype::of<uint8_t>();
} else {
FDASSERT(false, "The function doesn't support data type of " +
Str(fd_dtype) + ".");
FDASSERT(false, "The function doesn't support data type of %s.",
Str(fd_dtype).c_str());
}
return dt;
}
Expand Down Expand Up @@ -73,7 +73,8 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor,
pybind11::array TensorToPyArray(const FDTensor& tensor) {
auto numpy_dtype = FDDataTypeToNumpyDataType(tensor.dtype);
auto out = pybind11::array(numpy_dtype, tensor.shape);
memcpy(out.mutable_data(), tensor.Data(), tensor.Numel() * FDDataTypeSize(tensor.dtype));
memcpy(out.mutable_data(), tensor.Data(),
tensor.Numel() * FDDataTypeSize(tensor.dtype));
return out;
}

Expand Down
4 changes: 2 additions & 2 deletions csrc/fastdeploy/pybind/main.cc.in
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ pybind11::dtype FDDataTypeToNumpyDataType(const FDDataType& fd_dtype) {
} else if (fd_dtype == FDDataType::UINT8) {
dt = pybind11::dtype::of<uint8_t>();
} else {
FDASSERT(false, "The function doesn't support data type of " +
Str(fd_dtype) + ".");
FDASSERT(false, "The function doesn't support data type of %s.",
Str(fd_dtype).c_str());
}
return dt;
}
Expand Down
23 changes: 11 additions & 12 deletions csrc/fastdeploy/utils/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,13 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDLogger(true, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \
<< ")::" << __FUNCTION__ << "\t"

#define FDASSERT(condition, format, ...) \
if (!(condition)) { \
std::string format_string(format); \
int n = std::snprintf(nullptr, 0, format_string.data(), ##__VA_ARGS__); \
std::vector<char> buffer(n + 1); \
std::snprintf(buffer.data(), n + 1, format_string.data(), ##__VA_ARGS__); \
FDERROR << buffer.data() << std::endl; \
std::abort(); \
#define FDASSERT(condition, format, ...) \
if (!(condition)) { \
int n = std::snprintf(nullptr, 0, format, ##__VA_ARGS__); \
std::vector<char> buffer(n + 1); \
std::snprintf(buffer.data(), n + 1, format, ##__VA_ARGS__); \
FDERROR << buffer.data() << std::endl; \
std::abort(); \
}

///////// Basic Marco ///////////
Expand Down Expand Up @@ -128,7 +127,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
false, \
"Invalid enum data type. Expect to accept data type BOOL, INT32, " \
"INT64, FP32, FP64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()

Expand All @@ -148,7 +147,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \
"Invalid enum data type. Expect to accept data type INT32, " \
"INT64, FP32, FP64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()

Expand All @@ -164,7 +163,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \
"Invalid enum data type. Expect to accept data type FP32, " \
"FP64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()

Expand All @@ -180,7 +179,7 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
FDASSERT(false, \
"Invalid enum data type. Expect to accept data type INT32, " \
"INT64, but receive type %s.", \
Str(__dtype__)); \
Str(__dtype__).c_str()); \
} \
}()

Expand Down
15 changes: 9 additions & 6 deletions csrc/fastdeploy/vision/common/processors/mat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,9 @@ FDDataType Mat::Type() {
} else if (type == 1) {
return FDDataType::INT8;
} else if (type == 2) {
FDASSERT(false, "While calling Mat::Type(), get UINT16 type which is not "
"supported now.");
FDASSERT(false,
"While calling Mat::Type(), get UINT16 type which is not "
"supported now.");
} else if (type == 3) {
return FDDataType::INT16;
} else if (type == 4) {
Expand All @@ -108,10 +109,12 @@ FDDataType Mat::Type() {
} else if (type == 6) {
return FDDataType::FP64;
} else {
FDASSERT(false, "While calling Mat::Type(), get type = " +
std::to_string(type) + ", which is not expected!.");
FDASSERT(
false,
"While calling Mat::Type(), get type = %d, which is not expected!.",
type);
}
}

} // namespace vision
} // namespace fastdeploy
} // namespace vision
} // namespace fastdeploy
4 changes: 2 additions & 2 deletions csrc/fastdeploy/vision/detection/ppdet/ppyoloe.cc
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ bool PPYOLOE::BuildPreprocessPipelineFromConfig() {
auto target_size = op["target_size"].as<std::vector<int>>();
int interp = op["interp"].as<int>();
FDASSERT(target_size.size(),
"Require size of target_size be 2, but now it's " +
std::to_string(target_size.size()) + ".");
"Require size of target_size be 2, but now it's %lu.",
target_size.size());
if (!keep_ratio) {
int width = target_size[1];
int height = target_size[0];
Expand Down
4 changes: 2 additions & 2 deletions csrc/fastdeploy/vision/segmentation/ppseg/model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ bool PaddleSegModel::Postprocess(
// 3. shape: 2-D HW
FDASSERT(infer_result.dtype == FDDataType::INT64 ||
infer_result.dtype == FDDataType::FP32,
"Require the data type of output is int64 or fp32, but now it's " +
Str(infer_result.dtype) + ".");
"Require the data type of output is int64 or fp32, but now it's %s.",
Str(infer_result.dtype).c_str());
result->Clear();

if (infer_result.shape.size() == 4) {
Expand Down
12 changes: 6 additions & 6 deletions docs/compile/prebuilt_libraries.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ FastDeploy提供了在Windows/Linux/Mac上的预先编译CPP部署库,开发

| 部署库下载地址 | 硬件 | 说明 |
| :------------- | :--- | :--- |
| [fastdeploy-linux-x64-0.2.0](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-0.2.0.tgz) | CPU | g++ 8.2编译产出 |
| [fastdeploy-linux-x64-gpu-0.2.0](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz) | CPU/GPU | g++ 8.2, cuda 11.2, cudnn 8.2编译产出 |
| [fastdeploy-linux-x64-0.2.0.tgz](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-0.2.0.tgz) | CPU | g++ 8.2编译产出 |
| [fastdeploy-linux-x64-gpu-0.2.0.tgz](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz) | CPU/GPU | g++ 8.2, cuda 11.2, cudnn 8.2编译产出 |

### Windows 10 x64平台

| 部署库下载地址 | 硬件 | 说明 |
| :------------- | :--- | :--- |
| [fastdeploy-win-x64-0.2.0](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-win-x64-0.2.0.zip) | CPU | Visual Studio 16 2019 编译产出 |
| [fastdeploy-win-x64-gpu-0.2.0](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-win-x64-gpu-0.2.0.zip) | CPU/GPU | Visual Studio 16 2019,cuda 11.2, cudnn 8.2编译产出 |
| [fastdeploy-win-x64-0.2.0.zip](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-win-x64-0.2.0.zip) | CPU | Visual Studio 16 2019 编译产出 |
| [fastdeploy-win-x64-gpu-0.2.0.zip](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-win-x64-gpu-0.2.0.zip) | CPU/GPU | Visual Studio 16 2019,cuda 11.2, cudnn 8.2编译产出 |

### Linux aarch64平台

Expand All @@ -35,8 +35,8 @@ FastDeploy提供了在Windows/Linux/Mac上的预先编译CPP部署库,开发

| 部署库下载地址 | 架构 |硬件 |
| :---- | :-- | :------ |
| [comming...] | x86 | CPU |
| [comming...] | arm64 | CPU |
| [fastdeploy-osx-x86_64-0.2.0.tgz](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-osx-x86_64-0.2.0.tgz) | x86 | CPU |
| [fastdeploy-osx-arm64-0.2.0.tgz](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-osx-arm64-0.2.0.tgz) | arm64 | CPU |

## 其它文档

Expand Down
Loading