diff --git a/examples/vision/detection/paddledetection/coco_label_list.txt b/examples/vision/detection/paddledetection/coco_label_list.txt new file mode 100644 index 0000000000..941cb4e139 --- /dev/null +++ b/examples/vision/detection/paddledetection/coco_label_list.txt @@ -0,0 +1,80 @@ +person +bicycle +car +motorcycle +airplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +couch +potted plant +bed +dining table +toilet +tv +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/examples/vision/detection/yolov7/README.md b/examples/vision/detection/yolov7/README.md index b240568079..925c26100c 100644 --- a/examples/vision/detection/yolov7/README.md +++ b/examples/vision/detection/yolov7/README.md @@ -18,8 +18,8 @@ wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt # 导出onnx格式文件 (Tips: 对应 YOLOv7 release v0.1 代码) python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt -# 如果您的代码版本中有支持NMS的ONNX文件导出,请使用如下命令导出ONNX文件(请暂时不要使用 "--end2end",我们后续将支持带有NMS的ONNX模型的部署) -python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt +# 如果您的代码版本中有支持NMS的ONNX文件导出,请使用如下命令导出ONNX文件,并且参考`yolov7end2end_ort` 或 `yolov7end2end_trt`示例使用 +python models/export.py --grid --dynamic --end2end --weights PATH/TO/yolov7.pt ``` diff --git a/examples/vision/detection/yolov7/README_EN.md b/examples/vision/detection/yolov7/README_EN.md index de15de3528..83394f52cc 100644 --- a/examples/vision/detection/yolov7/README_EN.md +++ b/examples/vision/detection/yolov7/README_EN.md @@ -3,7 +3,7 @@ # YOLOv7 Prepare the model for Deployment - YOLOv7 deployment is based on [YOLOv7](https://github.com/WongKinYiu/yolov7/tree/v0.1) branching code, and [COCO Pre-Trained Models](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1). - + - (1)The *.pt provided by the [Official Library](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) can be deployed after the [export ONNX model](#export ONNX model) operation; *.trt and *.pose models do not support deployment. - (2)As for YOLOv7 model trained on customized data, please follow the operations guidelines in [Export ONNX model](#Export-ONNX-Model) and then refer to [Detailed Deployment Tutorials](#Detailed-Deployment-Tutorials) to complete the deployment. @@ -16,8 +16,8 @@ wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt # Export onnx file (Tips: in accordance with YOLOv7 release v0.1 code) python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt -# If your code supports exporting ONNX files with NMS, please use the following command to export ONNX files (do not use "--end2end" for now. We will support deployment of ONNX models with NMS in the future) -python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt +# If your code supports exporting ONNX files with NMS, please use the following command to export ONNX files, then refer to the example of `yolov7end2end_ort` or `yolov7end2end_ort` +python models/export.py --grid --dynamic --end2end --weights PATH/TO/yolov7.pt ``` ## Download the pre-trained ONNX model diff --git a/fastdeploy/vision/visualize/visualize_pybind.cc b/fastdeploy/vision/visualize/visualize_pybind.cc old mode 100755 new mode 100644 index 739fa7e809..802790c30b --- a/fastdeploy/vision/visualize/visualize_pybind.cc +++ b/fastdeploy/vision/visualize/visualize_pybind.cc @@ -18,10 +18,17 @@ namespace fastdeploy { void BindVisualize(pybind11::module& m) { m.def("vis_detection", [](pybind11::array& im_data, vision::DetectionResult& result, - float score_threshold, int line_size, float font_size) { + std::vector& labels, float score_threshold, + int line_size, float font_size) { auto im = PyArrayToCvMat(im_data); - auto vis_im = vision::VisDetection(im, result, score_threshold, - line_size, font_size); + cv::Mat vis_im; + if (labels.empty()) { + vis_im = vision::VisDetection(im, result, score_threshold, + line_size, font_size); + } else { + vis_im = vision::VisDetection(im, result, labels, score_threshold, + line_size, font_size); + } FDTensor out; vision::Mat(vis_im).ShareWithTensor(&out); return TensorToPyArray(out); @@ -40,8 +47,7 @@ void BindVisualize(pybind11::module& m) { [](pybind11::array& im_data, vision::FaceAlignmentResult& result, int line_size) { auto im = PyArrayToCvMat(im_data); - auto vis_im = - vision::VisFaceAlignment(im, result, line_size); + auto vis_im = vision::VisFaceAlignment(im, result, line_size); FDTensor out; vision::Mat(vis_im).ShareWithTensor(&out); return TensorToPyArray(out); @@ -86,12 +92,13 @@ void BindVisualize(pybind11::module& m) { return TensorToPyArray(out); }) .def("vis_mot", - [](pybind11::array& im_data, vision::MOTResult& result,float score_threshold, vision::tracking::TrailRecorder record) { - auto im = PyArrayToCvMat(im_data); - auto vis_im = vision::VisMOT(im, result, score_threshold, &record); - FDTensor out; - vision::Mat(vis_im).ShareWithTensor(&out); - return TensorToPyArray(out); + [](pybind11::array& im_data, vision::MOTResult& result, + float score_threshold, vision::tracking::TrailRecorder record) { + auto im = PyArrayToCvMat(im_data); + auto vis_im = vision::VisMOT(im, result, score_threshold, &record); + FDTensor out; + vision::Mat(vis_im).ShareWithTensor(&out); + return TensorToPyArray(out); }) .def("vis_matting", [](pybind11::array& im_data, vision::MattingResult& result, @@ -107,8 +114,7 @@ void BindVisualize(pybind11::module& m) { [](pybind11::array& im_data, vision::HeadPoseResult& result, int size, int line_size) { auto im = PyArrayToCvMat(im_data); - auto vis_im = - vision::VisHeadPose(im, result, size, line_size); + auto vis_im = vision::VisHeadPose(im, result, size, line_size); FDTensor out; vision::Mat(vis_im).ShareWithTensor(&out); return TensorToPyArray(out); @@ -131,8 +137,8 @@ void BindVisualize(pybind11::module& m) { [](pybind11::array& im_data, vision::KeyPointDetectionResult& result, float conf_threshold) { auto im = PyArrayToCvMat(im_data); - auto vis_im = vision::VisKeypointDetection( - im, result, conf_threshold); + auto vis_im = + vision::VisKeypointDetection(im, result, conf_threshold); FDTensor out; vision::Mat(vis_im).ShareWithTensor(&out); return TensorToPyArray(out); @@ -194,15 +200,16 @@ void BindVisualize(pybind11::module& m) { vision::Mat(vis_im).ShareWithTensor(&out); return TensorToPyArray(out); }) - .def_static("vis_mot", - [](pybind11::array& im_data, vision::MOTResult& result,float score_threshold, - vision::tracking::TrailRecorder* record) { - auto im = PyArrayToCvMat(im_data); - auto vis_im = vision::VisMOT(im, result, score_threshold, record); - FDTensor out; - vision::Mat(vis_im).ShareWithTensor(&out); - return TensorToPyArray(out); - }) + .def_static( + "vis_mot", + [](pybind11::array& im_data, vision::MOTResult& result, + float score_threshold, vision::tracking::TrailRecorder* record) { + auto im = PyArrayToCvMat(im_data); + auto vis_im = vision::VisMOT(im, result, score_threshold, record); + FDTensor out; + vision::Mat(vis_im).ShareWithTensor(&out); + return TensorToPyArray(out); + }) .def_static("vis_matting_alpha", [](pybind11::array& im_data, vision::MattingResult& result, bool remove_small_connected_area) { diff --git a/python/fastdeploy/vision/visualize/__init__.py b/python/fastdeploy/vision/visualize/__init__.py index a7f7c69cf2..ac640bf274 100755 --- a/python/fastdeploy/vision/visualize/__init__.py +++ b/python/fastdeploy/vision/visualize/__init__.py @@ -20,10 +20,11 @@ def vis_detection(im_data, det_result, + labels=[], score_threshold=0.0, line_size=1, font_size=0.5): - return C.vision.vis_detection(im_data, det_result, score_threshold, + return C.vision.vis_detection(im_data, det_result, labels, score_threshold, line_size, font_size)