최성환

Delete Run.py

1 -from mylib import config, thread
2 -from mylib.mailer import Mailer
3 -from mylib.detection import detect_people
4 -from imutils.video import VideoStream, FPS
5 -from scipy.spatial import distance as dist
6 -import numpy as np
7 -import argparse, imutils, cv2, os, time, schedule
8 -
9 -#----------------------------Parse req. arguments------------------------------#
10 -ap = argparse.ArgumentParser()
11 -ap.add_argument("-i", "--input", type=str, default="",
12 - help="path to (optional) input video file")
13 -ap.add_argument("-o", "--output", type=str, default="",
14 - help="path to (optional) output video file")
15 -ap.add_argument("-d", "--display", type=int, default=1,
16 - help="whether or not output frame should be displayed")
17 -args = vars(ap.parse_args())
18 -#------------------------------------------------------------------------------#
19 -
20 -# load the COCO class labels our YOLO model was trained on
21 -labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
22 -LABELS = open(labelsPath).read().strip().split("\n")
23 -
24 -# derive the paths to the YOLO weights and model configuration
25 -weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
26 -configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])
27 -
28 -# load our YOLO object detector trained on COCO dataset (80 classes)
29 -net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
30 -
31 -# check if we are going to use GPU
32 -if config.USE_GPU:
33 - # set CUDA as the preferable backend and target
34 - print("")
35 - print("[INFO] Looking for GPU")
36 - net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
37 - net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
38 -
39 -# determine only the *output* layer names that we need from YOLO
40 -ln = net.getLayerNames()
41 -ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
42 -
43 -# if a video path was not supplied, grab a reference to the camera
44 -if not args.get("input", False):
45 - print("[INFO] Starting the live stream..")
46 - vs = cv2.VideoCapture(config.url)
47 - if config.Thread:
48 - cap = thread.ThreadingClass(config.url)
49 - time.sleep(2.0)
50 -
51 -# otherwise, grab a reference to the video file
52 -else:
53 - print("[INFO] Starting the video..")
54 - vs = cv2.VideoCapture(args["input"])
55 - if config.Thread:
56 - cap = thread.ThreadingClass(args["input"])
57 -
58 -writer = None
59 -# start the FPS counter
60 -fps = FPS().start()
61 -
62 -# loop over the frames from the video stream
63 -while True:
64 - # read the next frame from the file
65 - if config.Thread:
66 - frame = cap.read()
67 -
68 - else:
69 - (grabbed, frame) = vs.read()
70 - # if the frame was not grabbed, then we have reached the end of the stream
71 - if not grabbed:
72 - break
73 -
74 - # resize the frame and then detect people (and only people) in it
75 - frame = imutils.resize(frame, width=700)
76 - results = detect_people(frame, net, ln,
77 - personIdx=LABELS.index("person"))
78 -
79 - # initialize the set of indexes that violate the max/min social distance limits
80 - serious = set()
81 - abnormal = set()
82 -
83 - # ensure there are *at least* two people detections (required in
84 - # order to compute our pairwise distance maps)
85 - if len(results) >= 2:
86 - # extract all centroids from the results and compute the
87 - # Euclidean distances between all pairs of the centroids
88 - centroids = np.array([r[2] for r in results])
89 - D = dist.cdist(centroids, centroids, metric="euclidean")
90 -
91 - # loop over the upper triangular of the distance matrix
92 - for i in range(0, D.shape[0]):
93 - for j in range(i + 1, D.shape[1]):
94 - # check to see if the distance between any two
95 - # centroid pairs is less than the configured number of pixels
96 - if D[i, j] < config.MIN_DISTANCE:
97 - # update our violation set with the indexes of the centroid pairs
98 - serious.add(i)
99 - serious.add(j)
100 - # update our abnormal set if the centroid distance is below max distance limit
101 - if (D[i, j] < config.MAX_DISTANCE) and not serious:
102 - abnormal.add(i)
103 - abnormal.add(j)
104 -
105 - # loop over the results
106 - for (i, (prob, bbox, centroid)) in enumerate(results):
107 - # extract the bounding box and centroid coordinates, then
108 - # initialize the color of the annotation
109 - (startX, startY, endX, endY) = bbox
110 - (cX, cY) = centroid
111 - color = (0, 255, 0)
112 -
113 - # if the index pair exists within the violation/abnormal sets, then update the color
114 - if i in serious:
115 - color = (0, 0, 255)
116 - elif i in abnormal:
117 - color = (0, 255, 255) #orange = (0, 165, 255)
118 -
119 - # draw (1) a bounding box around the person and (2) the
120 - # centroid coordinates of the person,
121 - cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
122 - cv2.circle(frame, (cX, cY), 5, color, 2)
123 -
124 - # draw some of the parameters
125 - Safe_Distance = "Safe distance: >{} px".format(config.MAX_DISTANCE)
126 - cv2.putText(frame, Safe_Distance, (470, frame.shape[0] - 25),
127 - cv2.FONT_HERSHEY_SIMPLEX, 0.60, (255, 0, 0), 2)
128 - Threshold = "Threshold limit: {}".format(config.Threshold)
129 - cv2.putText(frame, Threshold, (470, frame.shape[0] - 50),
130 - cv2.FONT_HERSHEY_SIMPLEX, 0.60, (255, 0, 0), 2)
131 -
132 - # draw the total number of social distancing violations on the output frame
133 - text = "Total serious violations: {}".format(len(serious))
134 - cv2.putText(frame, text, (10, frame.shape[0] - 55),
135 - cv2.FONT_HERSHEY_SIMPLEX, 0.70, (0, 0, 255), 2)
136 -
137 - text1 = "Total abnormal violations: {}".format(len(abnormal))
138 - cv2.putText(frame, text1, (10, frame.shape[0] - 25),
139 - cv2.FONT_HERSHEY_SIMPLEX, 0.70, (0, 255, 255), 2)
140 -
141 -#------------------------------Alert function----------------------------------#
142 - if len(serious) >= config.Threshold:
143 - cv2.putText(frame, "-ALERT: Violations over limit-", (10, frame.shape[0] - 80),
144 - cv2.FONT_HERSHEY_COMPLEX, 0.60, (0, 0, 255), 2)
145 - if config.ALERT:
146 - print("")
147 - print('[INFO] Sending mail...')
148 - Mailer().send(config.MAIL)
149 - print('[INFO] Mail sent')
150 - #config.ALERT = False
151 -#------------------------------------------------------------------------------#
152 - # check to see if the output frame should be displayed to our screen
153 - if args["display"] > 0:
154 - # show the output frame
155 - cv2.imshow("Real-Time Monitoring/Analysis Window", frame)
156 - key = cv2.waitKey(1) & 0xFF
157 -
158 - # if the `q` key was pressed, break from the loop
159 - if key == ord("q"):
160 - break
161 - # update the FPS counter
162 - fps.update()
163 -
164 - # if an output video file path has been supplied and the video
165 - # writer has not been initialized, do so now
166 - if args["output"] != "" and writer is None:
167 - # initialize our video writer
168 - fourcc = cv2.VideoWriter_fourcc(*"MJPG")
169 - writer = cv2.VideoWriter(args["output"], fourcc, 25,
170 - (frame.shape[1], frame.shape[0]), True)
171 -
172 - # if the video writer is not None, write the frame to the output video file
173 - if writer is not None:
174 - writer.write(frame)
175 -
176 -# stop the timer and display FPS information
177 -fps.stop()
178 -print("===========================")
179 -print("[INFO] Elasped time: {:.2f}".format(fps.elapsed()))
180 -print("[INFO] Approx. FPS: {:.2f}".format(fps.fps()))
181 -
182 -# close any open windows
183 -cv2.destroyAllWindows()