forked from PaddlePaddle/PaddleVideo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_serving_infer_cpp.sh
103 lines (87 loc) · 3.87 KB
/
test_serving_infer_cpp.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
MODE=$2
dataline=$(awk 'NR==1, NR==18{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser serving
model_name=$(func_parser_value "${lines[1]}")
python_list=$(func_parser_value "${lines[2]}")
trans_model_py=$(func_parser_value "${lines[3]}")
infer_model_dir_key=$(func_parser_key "${lines[4]}")
infer_model_dir_value=$(func_parser_value "${lines[4]}")
model_filename_key=$(func_parser_key "${lines[5]}")
model_filename_value=$(func_parser_value "${lines[5]}")
params_filename_key=$(func_parser_key "${lines[6]}")
params_filename_value=$(func_parser_value "${lines[6]}")
serving_server_key=$(func_parser_key "${lines[7]}")
serving_server_value=$(func_parser_value "${lines[7]}")
serving_client_key=$(func_parser_key "${lines[8]}")
serving_client_value=$(func_parser_value "${lines[8]}")
serving_dir_value=$(func_parser_value "${lines[9]}")
run_model_path_key=$(func_parser_key "${lines[10]}")
run_model_path_value=$(func_parser_value "${lines[10]}")
port_key=$(func_parser_key "${lines[11]}")
port_value=$(func_parser_value "${lines[11]}")
cpp_client_value=$(func_parser_value "${lines[12]}")
input_video_key=$(func_parser_key "${lines[13]}")
input_video_value=$(func_parser_value "${lines[13]}")
LOG_PATH="./test_tipc/output/log/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_serving.log"
function func_serving(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
# phase 1: save model
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
python_list=(${python_list})
python=${python_list[0]}
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval ${trans_model_cmd}
# modify the alias name of fetch_var to "outputs"
server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"outputs\"/' $serving_server_value/serving_server_conf.prototxt"
eval ${server_fetch_var_line_cmd}
client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"outputs\"/' $serving_client_value/serving_client_conf.prototxt"
eval ${client_fetch_var_line_cmd}
cd ${serving_dir_value}
echo $PWD
unset https_proxy
unset http_proxy
_save_log_path="${LOG_PATH}/server_infer_gpu_batchsize_1.log"
# phase 2: run server
cpp_server_cmd="${python} -m paddle_serving_server.serve ${run_model_path_key} ${run_model_path_value} ${port_key} ${port_value} &"
eval ${cpp_server_cmd}
sleep 20s
# phase 3: run client
real_model_name=${model_name/PP-/PP}
serving_client_conf_path="${serving_client_value/deploy\/cpp_serving\/}"
serving_client_conf_path="${serving_client_conf_path/\/\//}serving_client_conf.prototxt"
cpp_client_cmd="${python} ${cpp_client_value} -n ${real_model_name} -c ${serving_client_conf_path} ${input_video_key} ${input_video_value} > ${_save_log_path} 2>&1 "
eval ${cpp_client_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
cd ../../
status_check $last_status "${cpp_server_cmd}" "${status_log}"
ps ux | grep -i 'paddle_serving_server' | awk '{print $2}' | xargs kill -s 9
}
# set cuda device
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env
echo "################### run test ###################"
export Count=0
IFS="|"
func_serving "${web_service_cmd}"