This repository has been archived by the owner on Oct 20, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 8
/
video_mult_thread.py
93 lines (80 loc) · 3.71 KB
/
video_mult_thread.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#! /usr/bin/env python
# coding=utf-8
import cv2
import time
import numpy as np
import core.utils as utils
import tensorflow as tf
from PIL import Image
return_elements = ["input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"]
pb_file = "./yolov3_visdrone.pb"
video_path = "./docs/images/1.mp4"
# video_path = 0
num_classes = 12
input_size = 416
graph = tf.Graph()
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)
output_path = './output/demo.mp4'
# 图像预处理的函数
# 调整亮度和对比度
# c:对比度, b:亮度
def contrast_brightness_image(img, c, b):
h, w, ch = img.shape # 获取shape的数值,height/width/channel
# 新建全零图片数组blank,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片)
blank = np.zeros([h, w, ch], img.dtype)
dst = cv2.addWeighted(img, c, blank, 1-c, b) # 计算两个图像阵列的加权和 dst=src1*alpha+src2*beta+gamma
return dst
with tf.Session(graph=graph) as sess:
writeVideo_flag = True
if writeVideo_flag:
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = cv2.VideoWriter_fourcc(*'MP4V')
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
#print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
list_file = open('detection.txt', 'w')
frame_index = -1
while True:
return_value, frame = vid.read()
if return_value != True:
break
if return_value:
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
# print('image:',image)
else:
raise ValueError("No image!")
frame_size = frame.shape[:2]
image_data = utils.image_preporcess(np.copy(frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...]
prev_time = time.time()
pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
[return_tensors[1], return_tensors[2], return_tensors[3]],
feed_dict={ return_tensors[0]: image_data})
pred_time = time.time()
# print('time:',pred_time-prev_time)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),
np.reshape(pred_mbbox, (-1, 5 + num_classes)),
np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, 0.45)
bboxes = utils.nms(bboxes, 0.45, method='nms')
image = utils.draw_bbox(frame, bboxes)
curr_time = time.time()
exec_time = curr_time - prev_time
result = np.asarray(image)
info = "time:" + str(round(1000 * exec_time, 2)) + " ms, FPS: " + str(round((1000 / (1000 * exec_time)), 1))
cv2.putText(result, text=info, org=(50, 70), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
# result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if writeVideo_flag:
# save a frame
out.write(result)
cv2.imshow("result", result)
if cv2.waitKey(1) & 0xFF == ord('q'): break