-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
This project will help you to do social distance detection in this CO…
…VID-19 situation at your premise. This is very basic code, feel free to checkout and do any improvisations.
- Loading branch information
0 parents
commit 4c4866b
Showing
17 changed files
with
2,022 additions
and
0 deletions.
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
This code will help you to do social distance detection in this COVID-19 situation at your premise. This is very basic code, feel free to checkout and do any improvisations. | ||
|
||
### Steps to run the code : | ||
1- open command prompt and change directory(cd) to project folder | ||
|
||
2- install all the below depedencies,You will need the following to run this code: | ||
|
||
Python,Cython,imutils,numpy,opencv-python,Pillow,PyYAML,six,torch,torchvision | ||
|
||
3- For human detection download: | ||
yolov3.weights, yolov3.cfg files (weights file in not present because of size issue. It can be downloaded from | ||
here : https://pjreddie.com/media/files/yolov3.weights) | ||
|
||
4- Put the downloaded yolov3.weights in your project yolo-coco folder | ||
|
||
5- Run this code : python social_distance_detector.py --input pedestrians.mp4 --output output.avi | ||
|
||
|
||
For running: | ||
Good GPU, for faster results. CPU is also fine(I have tried it on CPU). |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
person | ||
bicycle | ||
car | ||
motorcycle | ||
airplane | ||
bus | ||
train | ||
truck | ||
boat | ||
traffic light | ||
fire hydrant | ||
stop sign | ||
parking meter | ||
bench | ||
bird | ||
cat | ||
dog | ||
horse | ||
sheep | ||
cow | ||
elephant | ||
bear | ||
zebra | ||
giraffe | ||
backpack | ||
umbrella | ||
handbag | ||
tie | ||
suitcase | ||
frisbee | ||
skis | ||
snowboard | ||
sports ball | ||
kite | ||
baseball bat | ||
baseball glove | ||
skateboard | ||
surfboard | ||
tennis racket | ||
bottle | ||
wine glass | ||
cup | ||
fork | ||
knife | ||
spoon | ||
bowl | ||
banana | ||
apple | ||
sandwich | ||
orange | ||
broccoli | ||
carrot | ||
hot dog | ||
pizza | ||
donut | ||
cake | ||
chair | ||
couch | ||
potted plant | ||
bed | ||
dining table | ||
toilet | ||
tv | ||
laptop | ||
mouse | ||
remote | ||
keyboard | ||
cell phone | ||
microwave | ||
oven | ||
toaster | ||
sink | ||
refrigerator | ||
book | ||
clock | ||
vase | ||
scissors | ||
teddy bear | ||
hair drier | ||
toothbrush |
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,138 @@ | ||
# import the necessary packages | ||
from vinayedula import social_distancing_config as config | ||
from vinayedula.detection import detect_people | ||
from scipy.spatial import distance as dist | ||
import numpy as np | ||
import argparse | ||
import imutils | ||
import cv2 | ||
import os | ||
|
||
# construct the argument parse and parse the arguments | ||
ap = argparse.ArgumentParser() | ||
ap.add_argument("-i", "--input", type=str, default="", | ||
help="path to (optional) input video file") | ||
ap.add_argument("-o", "--output", type=str, default="", | ||
help="path to (optional) output video file") | ||
ap.add_argument("-d", "--display", type=int, default=1, | ||
help="whether or not output frame should be displayed") | ||
args = vars(ap.parse_args()) | ||
|
||
# load the COCO class labels our YOLO model was trained on | ||
labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"]) | ||
LABELS = open(labelsPath).read().strip().split("\n") | ||
|
||
# derive the paths to the YOLO weights and model configuration | ||
weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"]) | ||
configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"]) | ||
|
||
# load our YOLO object detector trained on COCO dataset (80 classes) | ||
print("[INFO] loading YOLO from disk...") | ||
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) | ||
|
||
# check if we are going to use GPU | ||
if config.USE_GPU: | ||
# set CUDA as the preferable backend and target | ||
print("[INFO] setting preferable backend and target to CUDA...") | ||
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA) | ||
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA) | ||
|
||
# determine only the *output* layer names that we need from YOLO | ||
ln = net.getLayerNames() | ||
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] | ||
|
||
# initialize the video stream and pointer to output video file | ||
print("[INFO] accessing video stream...") | ||
vs = cv2.VideoCapture(args["input"] if args["input"] else 0) | ||
writer = None | ||
|
||
# loop over the frames from the video stream | ||
while True: | ||
# read the next frame from the file | ||
(grabbed, frame) = vs.read() | ||
|
||
# if the frame was not grabbed, then we have reached the end | ||
# of the stream | ||
if not grabbed: | ||
break | ||
|
||
# resize the frame and then detect people (and only people) in it | ||
frame = imutils.resize(frame, width=700) | ||
results = detect_people(frame, net, ln, | ||
personIdx=LABELS.index("person")) | ||
|
||
# initialize the set of indexes that violate the minimum social | ||
# distance | ||
violate = set() | ||
|
||
# ensure there are *at least* two people detections (required in | ||
# order to compute our pairwise distance maps) | ||
if len(results) >= 2: | ||
# extract all centroids from the results and compute the | ||
# Euclidean distances between all pairs of the centroids | ||
centroids = np.array([r[2] for r in results]) | ||
D = dist.cdist(centroids, centroids, metric="euclidean") | ||
|
||
# loop over the upper triangular of the distance matrix | ||
for i in range(0, D.shape[0]): | ||
for j in range(i + 1, D.shape[1]): | ||
# check to see if the distance between any two | ||
# centroid pairs is less than the configured number | ||
# of pixels | ||
if D[i, j] < config.MIN_DISTANCE: | ||
# update our violation set with the indexes of | ||
# the centroid pairs | ||
violate.add(i) | ||
violate.add(j) | ||
|
||
# loop over the results | ||
for (i, (prob, bbox, centroid)) in enumerate(results): | ||
# extract the bounding box and centroid coordinates, then | ||
# initialize the color of the annotation | ||
(startX, startY, endX, endY) = bbox | ||
(cX, cY) = centroid | ||
color = (0, 255, 0) | ||
|
||
# if the index pair exists within the violation set, then | ||
# update the color | ||
if i in violate: | ||
color = (0, 0, 255) | ||
|
||
# draw (1) a bounding box around the person and (2) the | ||
# centroid coordinates of the person, | ||
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) | ||
cv2.circle(frame, (cX, cY), 5, color, 1) | ||
|
||
# draw the total number of social distancing violations on the | ||
# output frame | ||
text = "Social Distancing Violations: {}".format(len(violate)) | ||
cv2.putText(frame, text, (10, frame.shape[0] - 25), | ||
cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3) | ||
|
||
# check to see if the output frame should be displayed to our | ||
# screen | ||
if args["display"] > 0: | ||
# show the output frame | ||
cv2.imshow("Frame", frame) | ||
key = cv2.waitKey(1) & 0xFF | ||
|
||
# if the `q` key was pressed, break from the loop | ||
if key == ord("q"): | ||
break | ||
|
||
# if an output video file path has been supplied and the video | ||
# writer has not been initialized, do so now | ||
if args["output"] != "" and writer is None: | ||
# initialize our video writer | ||
fourcc = cv2.VideoWriter_fourcc(*"MJPG") | ||
writer = cv2.VideoWriter(args["output"], fourcc, 25, | ||
(frame.shape[1], frame.shape[0]), True) | ||
|
||
# if the video writer is not None, write the frame to the output | ||
# video file | ||
if writer is not None: | ||
writer.write(frame) | ||
|
||
# USAGE | ||
# python social_distance_detector.py --input pedestrians.mp4 | ||
# python social_distance_detector.py --input pedestrians.mp4 --output output.avi |
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
# import the necessary packages | ||
from .social_distancing_config import NMS_THRESH | ||
from .social_distancing_config import MIN_CONF | ||
import numpy as np | ||
import cv2 | ||
|
||
def detect_people(frame, net, ln, personIdx=0): | ||
# grab the dimensions of the frame and initialize the list of | ||
# results | ||
(H, W) = frame.shape[:2] | ||
results = [] | ||
|
||
# construct a blob from the input frame and then perform a forward | ||
# pass of the YOLO object detector, giving us our bounding boxes | ||
# and associated probabilities | ||
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), | ||
swapRB=True, crop=False) | ||
net.setInput(blob) | ||
layerOutputs = net.forward(ln) | ||
|
||
# initialize our lists of detected bounding boxes, centroids, and | ||
# confidences, respectively | ||
boxes = [] | ||
centroids = [] | ||
confidences = [] | ||
|
||
# loop over each of the layer outputs | ||
for output in layerOutputs: | ||
# loop over each of the detections | ||
for detection in output: | ||
# extract the class ID and confidence (i.e., probability) | ||
# of the current object detection | ||
scores = detection[5:] | ||
classID = np.argmax(scores) | ||
confidence = scores[classID] | ||
|
||
# filter detections by (1) ensuring that the object | ||
# detected was a person and (2) that the minimum | ||
# confidence is met | ||
if classID == personIdx and confidence > MIN_CONF: | ||
# scale the bounding box coordinates back relative to | ||
# the size of the image, keeping in mind that YOLO | ||
# actually returns the center (x, y)-coordinates of | ||
# the bounding box followed by the boxes' width and | ||
# height | ||
box = detection[0:4] * np.array([W, H, W, H]) | ||
(centerX, centerY, width, height) = box.astype("int") | ||
|
||
# use the center (x, y)-coordinates to derive the top | ||
# and and left corner of the bounding box | ||
x = int(centerX - (width / 2)) | ||
y = int(centerY - (height / 2)) | ||
|
||
# update our list of bounding box coordinates, | ||
# centroids, and confidences | ||
boxes.append([x, y, int(width), int(height)]) | ||
centroids.append((centerX, centerY)) | ||
confidences.append(float(confidence)) | ||
|
||
# apply non-maxima suppression to suppress weak, overlapping | ||
# bounding boxes | ||
idxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CONF, NMS_THRESH) | ||
|
||
# ensure at least one detection exists | ||
if len(idxs) > 0: | ||
# loop over the indexes we are keeping | ||
for i in idxs.flatten(): | ||
# extract the bounding box coordinates | ||
(x, y) = (boxes[i][0], boxes[i][1]) | ||
(w, h) = (boxes[i][2], boxes[i][3]) | ||
|
||
# update our results list to consist of the person | ||
# prediction probability, bounding box coordinates, | ||
# and the centroid | ||
r = (confidences[i], (x, y, x + w, y + h), centroids[i]) | ||
results.append(r) | ||
|
||
# return the list of results | ||
return results |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
# base path to YOLO directory | ||
MODEL_PATH = "yolo-coco" | ||
|
||
# initialize minimum probability to filter weak detections along with | ||
# the threshold when applying non-maxima suppression | ||
MIN_CONF = 0.3 | ||
NMS_THRESH = 0.3 | ||
|
||
# boolean indicating if NVIDIA CUDA GPU should be used | ||
USE_GPU = False | ||
|
||
# define the minimum safe distance (in pixels) that two people can be | ||
# from each other | ||
MIN_DISTANCE = 50 |
Oops, something went wrong.