|
| 1 | +""" |
| 2 | +Copyright (R) @huawei.com, all rights reserved |
| 3 | +-*- coding:utf-8 -*- |
| 4 | +CREATED: 2023-05-25 09:12:13 |
| 5 | +MODIFIED: 2023-05-25 10:10:55 |
| 6 | +""" |
| 7 | + |
| 8 | + |
| 9 | +# import videocapture as video |
| 10 | +import numpy as np |
| 11 | +import cv2 |
| 12 | +import sys |
| 13 | +sys.path.append("/root/ACLLite/python") # 添加到文件开头 |
| 14 | +import time |
| 15 | + |
| 16 | +from acllite_resource import AclLiteResource |
| 17 | +from acllite_model import AclLiteModel |
| 18 | +from acllite_imageproc import AclLiteImageProc |
| 19 | +from acllite_image import AclLiteImage |
| 20 | +from acllite_logger import log_error, log_info |
| 21 | + |
| 22 | +labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] |
| 23 | + |
| 24 | +class sampleYOLOV7(object): |
| 25 | + '''load the model, and do preprocess, infer, postprocess''' |
| 26 | + def __init__(self, model_path, model_width, model_height): |
| 27 | + self.model_path = model_path |
| 28 | + self.model_width = model_width |
| 29 | + self.model_height = model_height |
| 30 | + |
| 31 | + def init_resource(self): |
| 32 | + # initial acl resource, create image processor, create model |
| 33 | + self._resource = AclLiteResource() |
| 34 | + self._resource.init() |
| 35 | + |
| 36 | + self._dvpp = AclLiteImageProc(self._resource) |
| 37 | + self._model = AclLiteModel(self.model_path) |
| 38 | + |
| 39 | + def preprocess(self, frame): |
| 40 | + # resize frame, keep original image |
| 41 | + self.src_image = frame |
| 42 | + self.resized_image = cv2.resize(frame, (self.model_width, self.model_height)) |
| 43 | + self.resized_image = self.resized_image.transpose(2,0,1)[np.newaxis,:] |
| 44 | + |
| 45 | + def infer(self): |
| 46 | + # infer frame |
| 47 | + self.result = self._model.execute([self.resized_image]) |
| 48 | + |
| 49 | + @staticmethod |
| 50 | + def nms(boxes, scores, iou_threshold=0.5): |
| 51 | + # 按置信度排序 |
| 52 | + order = scores.argsort()[::-1] |
| 53 | + keep = [] |
| 54 | + |
| 55 | + while order.size > 0: |
| 56 | + # 保留置信度最高的框 |
| 57 | + keep.append(order[0]) |
| 58 | + if order.size == 1: |
| 59 | + break |
| 60 | + |
| 61 | + # 计算IoU |
| 62 | + xx1 = np.maximum(boxes[order[0], 0], boxes[order[1:], 0]) |
| 63 | + yy1 = np.maximum(boxes[order[0], 1], boxes[order[1:], 1]) |
| 64 | + xx2 = np.minimum(boxes[order[0], 2], boxes[order[1:], 2]) |
| 65 | + yy2 = np.minimum(boxes[order[0], 3], boxes[order[1:], 3]) |
| 66 | + |
| 67 | + w = np.maximum(0.0, xx2 - xx1) |
| 68 | + h = np.maximum(0.0, yy2 - yy1) |
| 69 | + inter = w * h |
| 70 | + |
| 71 | + area1 = (boxes[order[0], 2] - boxes[order[0], 0]) * (boxes[order[0], 3] - boxes[order[0], 1]) |
| 72 | + area2 = (boxes[order[1:], 2] - boxes[order[1:], 0]) * (boxes[order[1:], 3] - boxes[order[1:], 1]) |
| 73 | + iou = inter / (area1 + area2 - inter) |
| 74 | + |
| 75 | + # 删除IoU大于阈值的框 |
| 76 | + inds = np.where(iou <= iou_threshold)[0] |
| 77 | + order = order[inds + 1] |
| 78 | + |
| 79 | + return keep |
| 80 | + def postprocess(self): |
| 81 | + predictions = self.result[0] |
| 82 | + h, w, _ = self.src_image.shape |
| 83 | + scale_x = w / self.model_width |
| 84 | + scale_y = h / self.model_height |
| 85 | + |
| 86 | + # 解析预测结果 |
| 87 | + predictions = predictions.reshape(-1, predictions.shape[-1]) |
| 88 | + confidences = predictions[:, 4] |
| 89 | + class_scores = predictions[:, 5:] |
| 90 | + class_ids = np.argmax(class_scores, axis=1) |
| 91 | + |
| 92 | + # 修改: 只保留每个类别置信度最高的框 |
| 93 | + keep_boxes = [] |
| 94 | + for cls in np.unique(class_ids): |
| 95 | + cls_mask = (class_ids == cls) |
| 96 | + cls_boxes = predictions[cls_mask] |
| 97 | + cls_conf = confidences[cls_mask] |
| 98 | + |
| 99 | + # 只保留置信度最高的一个框 |
| 100 | + if len(cls_conf) > 0: |
| 101 | + highest_conf_idx = np.argmax(cls_conf) |
| 102 | + cls_idx = np.where(cls_mask)[0][highest_conf_idx] |
| 103 | + keep_boxes.append(cls_idx) |
| 104 | + |
| 105 | + # 过滤低置信度 |
| 106 | + keep_boxes = np.array(keep_boxes) |
| 107 | + final_mask = confidences[keep_boxes] > 0.70 |
| 108 | + boxes = predictions[keep_boxes, :4][final_mask] |
| 109 | + confidences = confidences[keep_boxes][final_mask] |
| 110 | + class_ids = class_ids[keep_boxes][final_mask] |
| 111 | + |
| 112 | + # 绘制检测结果 |
| 113 | + for box, conf, cls_id in zip(boxes, confidences, class_ids): |
| 114 | + x1, y1, x2, y2 = (box * [scale_x, scale_y, scale_x, scale_y]).astype(int) |
| 115 | + label = f"{labels[cls_id]} {conf:.2f}" |
| 116 | + cv2.rectangle(self.src_image, (x1,y1), (x2,y2), (0,255,0), 2) |
| 117 | + cv2.putText(self.src_image, label, (x1, max(y1-20,10)), |
| 118 | + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2) |
| 119 | + |
| 120 | + cv2.imshow('Detection', self.src_image) |
| 121 | + def release_resource(self): |
| 122 | + # release resource includes acl resource, data set and unload model |
| 123 | + del self._resource |
| 124 | + del self._dvpp |
| 125 | + del self._model |
| 126 | + del self.resized_image |
| 127 | + |
| 128 | +def find_camera_index(): |
| 129 | + max_index_to_check = 10 # Maximum index to check for camera |
| 130 | + for index in range(max_index_to_check): |
| 131 | + cap = cv2.VideoCapture(index) |
| 132 | + if cap.read()[0]: |
| 133 | + cap.release() |
| 134 | + return index |
| 135 | + # If no camera is found |
| 136 | + raise ValueError("No camera found.") |
| 137 | + |
| 138 | + |
| 139 | +if __name__ == '__main__': |
| 140 | + model_path = '/root/thuei-1/EdgeAndRobotics/Samples/YOLOV5USBCamera/model/numbers.om' |
| 141 | + model_width = 640 |
| 142 | + model_height = 640 |
| 143 | + model = sampleYOLOV7(model_path, model_width, model_height) |
| 144 | + model.init_resource() |
| 145 | + |
| 146 | + # camera_index = find_camera_index() |
| 147 | + cap = cv2.VideoCapture(0) |
| 148 | + cv2.namedWindow('out', cv2.WINDOW_NORMAL) |
| 149 | + while True: |
| 150 | + ret, frame = cap.read() |
| 151 | + if not ret: |
| 152 | + print("Can't receive frame (stream end?). Exiting ...") |
| 153 | + break |
| 154 | + # print(f"图像形状: {frame.shape}") |
| 155 | + # frame = cv2.resize(frame, (640, 640)) |
| 156 | + # print(f"图像形状: {frame.shape}") |
| 157 | + print(model.model_height,model.model_width, frame.shape, model.model_path) |
| 158 | + model.preprocess(frame) |
| 159 | + model.infer() |
| 160 | + model.postprocess() |
| 161 | + # cv2.imshow('Frame', frame) |
| 162 | + if cv2.waitKey(1) & 0xFF == ord('q'): |
| 163 | + break |
| 164 | + cap.release() |
| 165 | + cv2.destroyAllWindows() |
| 166 | + |
| 167 | + model.release_resource() |
0 commit comments