Skip to content

Commit fedb609

Browse files
authored
[cherry-pick] fluid-lite subgraph resnet50 test. test=develop test=release/1.7 (#22224)
[cherry-pick] #22191 - 添加了fluid-lite子图方式运行resnet的单测 - 修改了依赖Lite的git commit id
1 parent 3acb995 commit fedb609

File tree

3 files changed

+77
-1
lines changed

3 files changed

+77
-1
lines changed

cmake/external/lite.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
4343
${LITE_PROJECT}
4444
${EXTERNAL_PROJECT_LOG_ARGS}
4545
GIT_REPOSITORY "https://github.com/PaddlePaddle/Paddle-Lite.git"
46-
GIT_TAG 947cda26637d46dc23f4e39d2b52e7d9a1fa6eef
46+
GIT_TAG b30dc65b264f7bc3753ba862ff4e529ea2af6665
4747
PREFIX ${LITE_SOURCES_DIR}
4848
UPDATE_COMMAND ""
4949
BUILD_COMMAND ${LITE_BUILD_COMMAND}

paddle/fluid/inference/tests/api/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -367,6 +367,10 @@ download_data(${LITE_MODEL_INSTALL_DIR} "mul_model_fp32.tgz")
367367
inference_analysis_test(lite_mul_model_test SRCS lite_mul_model_test.cc
368368
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
369369
ARGS --infer_model=${LITE_MODEL_INSTALL_DIR})
370+
inference_analysis_test(lite_resnet50_test SRCS lite_resnet50_test.cc
371+
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
372+
ARGS --infer_model=${RESNET50_MODEL_DIR})
373+
370374
inference_analysis_test(test_analyzer_capi SRCS analyzer_capi_tester.cc
371375
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c
372376
ARGS --infer_model=${RESNET50_MODEL_DIR}/model)
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <gflags/gflags.h>
16+
#include <glog/logging.h>
17+
#include <gtest/gtest.h>
18+
#include <cmath>
19+
20+
#include "paddle/fluid/inference/tests/api/tester_helper.h"
21+
22+
namespace paddle {
23+
namespace inference {
24+
25+
TEST(AnalysisPredictor, use_gpu) {
26+
std::string model_dir = FLAGS_infer_model + "/" + "model";
27+
AnalysisConfig config;
28+
config.EnableUseGpu(100, 0);
29+
config.SetModel(model_dir + "/model", model_dir + "/params");
30+
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32);
31+
32+
std::vector<PaddleTensor> inputs;
33+
auto predictor = CreatePaddlePredictor(config);
34+
const int batch = 1;
35+
const int channel = 3;
36+
const int height = 318;
37+
const int width = 318;
38+
const int input_num = batch * channel * height * width;
39+
std::vector<float> input(input_num, 1);
40+
41+
PaddleTensor in;
42+
in.shape = {1, 3, 318, 318};
43+
in.data =
44+
PaddleBuf(static_cast<void*>(input.data()), input_num * sizeof(float));
45+
in.dtype = PaddleDType::FLOAT32;
46+
inputs.emplace_back(in);
47+
48+
std::vector<PaddleTensor> outputs;
49+
ASSERT_TRUE(predictor->Run(inputs, &outputs));
50+
51+
const std::vector<float> truth_values = {
52+
127.780396, 738.16656, 1013.2264, -438.17206, 366.4022, 927.66187,
53+
736.2241, -633.68567, -329.92737, -430.15637, -633.0639, -146.54858,
54+
-1324.2804, -1349.3661, -242.67671, 117.44864, -801.7251, -391.51495,
55+
-404.8202, 454.16132, 515.48206, -133.03114, 69.293076, 590.09753,
56+
-1434.6917, -1070.8903, 307.0744, 400.52573, -316.12177, -587.1265,
57+
-161.05742, 800.3663, -96.47157, 748.708, 868.17645, -447.9403,
58+
112.73656, 1127.1992, 47.43518, 677.7219, 593.1881, -336.4011,
59+
551.3634, 397.82474, 78.39835, -715.4006, 405.96988, 404.25684,
60+
246.01978, -8.430191, 131.36617, -648.0528};
61+
62+
const size_t expected_size = 1;
63+
EXPECT_EQ(outputs.size(), expected_size);
64+
float* data_o = static_cast<float*>(outputs[0].data.data());
65+
for (size_t j = 0; j < outputs[0].data.length() / sizeof(float); j += 10) {
66+
EXPECT_NEAR((data_o[j] - truth_values[j / 10]) / truth_values[j / 10], 0.,
67+
10e-5);
68+
}
69+
}
70+
71+
} // namespace inference
72+
} // namespace paddle

0 commit comments

Comments
 (0)