|
| 1 | +import fastdeploy as fd |
| 2 | +import cv2 |
| 3 | +import os |
| 4 | +from tqdm import trange |
| 5 | +import numpy as np |
| 6 | +import datetime |
| 7 | +import json |
| 8 | + |
| 9 | + |
| 10 | +def parse_arguments(): |
| 11 | + import argparse |
| 12 | + import ast |
| 13 | + parser = argparse.ArgumentParser() |
| 14 | + parser.add_argument( |
| 15 | + "--model", required=True, help="Path of PaddleClas model.") |
| 16 | + parser.add_argument( |
| 17 | + "--image", type=str, required=False, help="Path of test image file.") |
| 18 | + parser.add_argument( |
| 19 | + "--input_name", |
| 20 | + type=str, |
| 21 | + required=False, |
| 22 | + default="inputs", |
| 23 | + help="input name of inference file.") |
| 24 | + parser.add_argument( |
| 25 | + "--topk", type=int, default=1, help="Return topk results.") |
| 26 | + parser.add_argument( |
| 27 | + "--cpu_num_thread", |
| 28 | + type=int, |
| 29 | + default=12, |
| 30 | + help="default number of cpu thread.") |
| 31 | + parser.add_argument( |
| 32 | + "--size", |
| 33 | + nargs='+', |
| 34 | + type=int, |
| 35 | + default=[1, 3, 224, 224], |
| 36 | + help="size of inference array.") |
| 37 | + parser.add_argument( |
| 38 | + "--iter_num", |
| 39 | + required=True, |
| 40 | + type=int, |
| 41 | + default=30, |
| 42 | + help="number of iterations for computing performace.") |
| 43 | + parser.add_argument( |
| 44 | + "--device", |
| 45 | + nargs='+', |
| 46 | + type=str, |
| 47 | + default=['cpu', 'cpu', 'cpu', 'gpu', 'gpu', 'gpu'], |
| 48 | + help="Type of inference device, support 'cpu' or 'gpu'.") |
| 49 | + parser.add_argument( |
| 50 | + "--backend", |
| 51 | + nargs='+', |
| 52 | + type=str, |
| 53 | + default=['ort', 'paddle', 'ov', 'ort', 'trt', 'paddle'], |
| 54 | + help="inference backend.") |
| 55 | + args = parser.parse_args() |
| 56 | + backend_list = ['ov', 'trt', 'ort', 'paddle'] |
| 57 | + device_list = ['cpu', 'gpu'] |
| 58 | + assert len(args.device) == len( |
| 59 | + args.backend), "the same number of --device and --backend is requested" |
| 60 | + assert args.iter_num > 10, "--iter_num has to bigger than 10" |
| 61 | + assert len(args.size |
| 62 | + ) == 4, "size should include 4 values, e.g., --size 1 3 300 300" |
| 63 | + for b in args.backend: |
| 64 | + assert b in backend_list, "%s backend is not supported" % b |
| 65 | + for d in args.device: |
| 66 | + assert d in device_list, "%s device is not supported" % d |
| 67 | + return args |
| 68 | + |
| 69 | + |
| 70 | +def build_option(index, args): |
| 71 | + option = fd.RuntimeOption() |
| 72 | + device = args.device[index] |
| 73 | + backend = args.backend[index] |
| 74 | + option.set_cpu_thread_num(args.cpu_num_thread) |
| 75 | + if device == "gpu": |
| 76 | + option.use_gpu() |
| 77 | + |
| 78 | + if backend == "trt": |
| 79 | + assert device == "gpu", "the trt backend need device==gpu" |
| 80 | + option.use_trt_backend() |
| 81 | + option.set_trt_input_shape(args.input_name, args.size) |
| 82 | + elif backend == "ov": |
| 83 | + assert device == "cpu", "the openvino backend need device==cpu" |
| 84 | + option.use_openvino_backend() |
| 85 | + |
| 86 | + elif backend == "paddle": |
| 87 | + option.use_paddle_backend() |
| 88 | + |
| 89 | + elif backend == "ort": |
| 90 | + option.use_ort_backend() |
| 91 | + |
| 92 | + else: |
| 93 | + print("%s is an unsupported backend" % backend) |
| 94 | + |
| 95 | + print("============= inference using %s backend on %s device =============" |
| 96 | + % (args.backend[index], args.device[index])) |
| 97 | + return option |
| 98 | + |
| 99 | + |
| 100 | +args = parse_arguments() |
| 101 | + |
| 102 | +save_dict = dict() |
| 103 | + |
| 104 | +for index, device_name in enumerate(args.device): |
| 105 | + if device_name not in save_dict: |
| 106 | + save_dict[device_name] = dict() |
| 107 | + |
| 108 | + # 配置runtime,加载模型 |
| 109 | + runtime_option = build_option(index, args) |
| 110 | + |
| 111 | + model_file = os.path.join(args.model, "inference.pdmodel") |
| 112 | + params_file = os.path.join(args.model, "inference.pdiparams") |
| 113 | + config_file = os.path.join(args.model, "inference_cls.yaml") |
| 114 | + model = fd.vision.classification.PaddleClasModel( |
| 115 | + model_file, params_file, config_file, runtime_option=runtime_option) |
| 116 | + |
| 117 | + # 创建要输入的向量 |
| 118 | + channel = args.size[1] |
| 119 | + height = args.size[2] |
| 120 | + width = args.size[3] |
| 121 | + input_array = np.random.randint( |
| 122 | + 0, high=255, size=(height, width, channel), dtype=np.uint8) |
| 123 | + |
| 124 | + # 如果有输入图片,则使用输入的图片进行推理 |
| 125 | + if args.image: |
| 126 | + input_array = cv2.imread(args.image) |
| 127 | + model_name = args.model.split('/') |
| 128 | + model_name = model_name[-1] if model_name[-1] else model_name[-2] |
| 129 | + print(" Model: ", model_name, " Input shape: ", input_array.shape) |
| 130 | + start_time = datetime.datetime.now() |
| 131 | + model.enable_record_time_of_runtime() |
| 132 | + warmup_iter = args.iter_num // 5 |
| 133 | + warmup_end2end_time = 0 |
| 134 | + if "iter_num" not in save_dict: |
| 135 | + save_dict["iter_num"] = args.iter_num |
| 136 | + if "warmup_iter" not in save_dict: |
| 137 | + save_dict["warmup_iter"] = warmup_iter |
| 138 | + if "cpu_num_thread" not in save_dict: |
| 139 | + save_dict["cpu_num_thread"] = args.cpu_num_thread |
| 140 | + for i in trange(args.iter_num, desc="Inference Progress"): |
| 141 | + if i == warmup_iter: |
| 142 | + # 计算warmup端到端总时间(s) |
| 143 | + warmup_time = datetime.datetime.now() |
| 144 | + warmup_end2end_time = warmup_time - start_time |
| 145 | + warmup_end2end_time = ( |
| 146 | + warmup_end2end_time.days * 24 * 60 * 60 + |
| 147 | + warmup_end2end_time.seconds |
| 148 | + ) * 1000 + warmup_end2end_time.microseconds / 1000 |
| 149 | + result = model.predict(input_array, args.topk) |
| 150 | + end_time = datetime.datetime.now() |
| 151 | + # 计算端到端(前处理,推理,后处理)的总时间 |
| 152 | + statis_info_of_runtime_dict = model.print_statis_info_of_runtime() |
| 153 | + end2end_time = end_time - start_time |
| 154 | + end2end_time = (end2end_time.days * 24 * 60 * 60 + end2end_time.seconds |
| 155 | + ) * 1000 + end2end_time.microseconds / 1000 |
| 156 | + remain_end2end_time = end2end_time - warmup_end2end_time |
| 157 | + pre_post_process = end2end_time - statis_info_of_runtime_dict[ |
| 158 | + "total_time"] * 1000 |
| 159 | + end2end = remain_end2end_time / (args.iter_num - warmup_iter) |
| 160 | + runtime = statis_info_of_runtime_dict["avg_time"] * 1000 |
| 161 | + print("Total time of end2end: %s ms" % str(end2end_time)) |
| 162 | + print("Average time of end2end exclude warmup step: %s ms" % str(end2end)) |
| 163 | + print("Total time of preprocess and postprocess in warmup step: %s ms" % |
| 164 | + str(warmup_end2end_time - statis_info_of_runtime_dict["warmup_time"] |
| 165 | + * 1000)) |
| 166 | + print( |
| 167 | + "Average time of preprocess and postprocess exclude warmup step: %s ms" |
| 168 | + % str((remain_end2end_time - statis_info_of_runtime_dict["remain_time"] |
| 169 | + * 1000) / (args.iter_num - warmup_iter))) |
| 170 | + # 结构化输出 |
| 171 | + backend_name = args.backend[index] |
| 172 | + save_dict[device_name][backend_name] = { |
| 173 | + "end2end": end2end, |
| 174 | + "runtime": runtime |
| 175 | + } |
| 176 | + json_str = json.dumps(save_dict) |
| 177 | + with open("%s.json" % model_name, 'w', encoding='utf-8') as fw: |
| 178 | + json.dump(json_str, fw, indent=4, ensure_ascii=False) |
0 commit comments