Showing
1 changed file
with
249 additions
and
0 deletions
source/detect_mask_video_test.py
0 → 100644
1 | +# USAGE | ||
2 | +# python detect_mask_video.py | ||
3 | + | ||
4 | +# import the necessary packages | ||
5 | +from tensorflow.keras.applications.mobilenet_v2 import preprocess_input | ||
6 | +from tensorflow.keras.preprocessing.image import img_to_array | ||
7 | +from tensorflow.keras.models import load_model | ||
8 | +from imutils.video import VideoStream | ||
9 | +import numpy as np | ||
10 | +import argparse | ||
11 | +import imutils | ||
12 | +import time | ||
13 | +import os | ||
14 | + | ||
15 | +import cv2 | ||
16 | +import sys | ||
17 | +from PyQt5 import QtCore | ||
18 | +from PyQt5 import QtWidgets | ||
19 | +from PyQt5 import QtGui | ||
20 | + | ||
21 | +class ShowVideo(QtCore.QObject): | ||
22 | + | ||
23 | + flag = 0 | ||
24 | + | ||
25 | + camera = cv2.VideoCapture(0) # 연결된 영상장치 index, 기본은 0 | ||
26 | + | ||
27 | + ret, image = camera.read() # 2개의 값 리턴, 첫 번째는 프레임 읽음여부, 두 번째는 프레임 자체 | ||
28 | + height, width = image.shape[:2] | ||
29 | + | ||
30 | + VideoSignal1 = QtCore.pyqtSignal(QtGui.QImage) # VideoSignal1이라는 사용자 정의 시그널 생성 | ||
31 | + VideoSignal2 = QtCore.pyqtSignal(QtGui.QImage) # VideoSignal2이라는 사용자 정의 시그널 생성 | ||
32 | + | ||
33 | + def __init__(self, parent=None): | ||
34 | + super(ShowVideo, self).__init__(parent) | ||
35 | + | ||
36 | + @QtCore.pyqtSlot() | ||
37 | + def startVideo(self, faceNet, maskNet): | ||
38 | + global image | ||
39 | + | ||
40 | + run_video = True | ||
41 | + while run_video: | ||
42 | + ret, image = self.camera.read() | ||
43 | + | ||
44 | + # detect faces in the frame and determine if they are wearing a | ||
45 | + # face mask or not | ||
46 | + (locs, preds) = detect_and_predict_mask(image, faceNet, maskNet) | ||
47 | + | ||
48 | + frame = image | ||
49 | + # loop over the detected face locations and their corresponding | ||
50 | + # locations | ||
51 | + for (box, pred) in zip(locs, preds): | ||
52 | + # unpack the bounding box and predictions | ||
53 | + (startX, startY, endX, endY) = box | ||
54 | + (mask, withoutMask) = pred | ||
55 | + | ||
56 | + # determine the class label and color we'll use to draw | ||
57 | + # the bounding box and text | ||
58 | + label = "Mask" if mask > withoutMask else "No Mask" | ||
59 | + color = (0, 255, 0) if label == "Mask" else (0, 0, 255) | ||
60 | + | ||
61 | + # include the probability in the label | ||
62 | + label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) | ||
63 | + | ||
64 | + # display the label and bounding box rectangle on the output | ||
65 | + # frame | ||
66 | + cv2.putText(frame, label, (startX, startY - 10), | ||
67 | + cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2) | ||
68 | + cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) | ||
69 | + ### | ||
70 | + color_swapped_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | ||
71 | + | ||
72 | + qt_image1 = QtGui.QImage(color_swapped_image.data, | ||
73 | + self.width, | ||
74 | + self.height, | ||
75 | + color_swapped_image.strides[0], | ||
76 | + QtGui.QImage.Format_RGB888) | ||
77 | + self.VideoSignal1.emit(qt_image1) | ||
78 | + | ||
79 | + | ||
80 | + if self.flag: | ||
81 | + img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | ||
82 | + img_canny = cv2.Canny(img_gray, 50, 100) | ||
83 | + | ||
84 | + qt_image2 = QtGui.QImage(img_canny.data, | ||
85 | + self.width, | ||
86 | + self.height, | ||
87 | + img_canny.strides[0], | ||
88 | + QtGui.QImage.Format_Grayscale8) | ||
89 | + | ||
90 | + self.VideoSignal2.emit(qt_image2) | ||
91 | + | ||
92 | + | ||
93 | + loop = QtCore.QEventLoop() | ||
94 | + QtCore.QTimer.singleShot(25, loop.quit) #25 ms | ||
95 | + loop.exec_() | ||
96 | + | ||
97 | + @QtCore.pyqtSlot() | ||
98 | + def canny(self): | ||
99 | + self.flag = 1 - self.flag | ||
100 | + | ||
101 | + | ||
102 | +class ImageViewer(QtWidgets.QWidget): | ||
103 | + def __init__(self, parent=None): | ||
104 | + super(ImageViewer, self).__init__(parent) | ||
105 | + self.image = QtGui.QImage() | ||
106 | + self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent) | ||
107 | + | ||
108 | + def paintEvent(self, event): | ||
109 | + painter = QtGui.QPainter(self) | ||
110 | + painter.drawImage(0, 0, self.image) | ||
111 | + self.image = QtGui.QImage() | ||
112 | + | ||
113 | + def initUI(self): | ||
114 | + self.setWindowTitle('Webcam') | ||
115 | + | ||
116 | + @QtCore.pyqtSlot(QtGui.QImage) | ||
117 | + def setImage(self, image): | ||
118 | + if image.isNull(): | ||
119 | + print("Viewer Dropped frame!") | ||
120 | + | ||
121 | + self.image = image | ||
122 | + if image.size() != self.size(): | ||
123 | + self.setFixedSize(image.size()) | ||
124 | + self.update() | ||
125 | + | ||
126 | +def detect_and_predict_mask(frame, faceNet, maskNet): | ||
127 | + # grab the dimensions of the frame and then construct a blob | ||
128 | + # from it | ||
129 | + (h, w) = frame.shape[:2] | ||
130 | + blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), | ||
131 | + (104.0, 177.0, 123.0)) | ||
132 | + | ||
133 | + # pass the blob through the network and obtain the face detections | ||
134 | + faceNet.setInput(blob) | ||
135 | + detections = faceNet.forward() | ||
136 | + | ||
137 | + # initialize our list of faces, their corresponding locations, | ||
138 | + # and the list of predictions from our face mask network | ||
139 | + faces = [] | ||
140 | + locs = [] | ||
141 | + preds = [] | ||
142 | + | ||
143 | + # loop over the detections | ||
144 | + for i in range(0, detections.shape[2]): | ||
145 | + # extract the confidence (i.e., probability) associated with | ||
146 | + # the detection | ||
147 | + confidence = detections[0, 0, i, 2] | ||
148 | + | ||
149 | + # filter out weak detections by ensuring the confidence is | ||
150 | + # greater than the minimum confidence | ||
151 | + if confidence > args["confidence"]: | ||
152 | + # compute the (x, y)-coordinates of the bounding box for | ||
153 | + # the object | ||
154 | + box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) | ||
155 | + (startX, startY, endX, endY) = box.astype("int") | ||
156 | + | ||
157 | + # ensure the bounding boxes fall within the dimensions of | ||
158 | + # the frame | ||
159 | + (startX, startY) = (max(0, startX), max(0, startY)) | ||
160 | + (endX, endY) = (min(w - 1, endX), min(h - 1, endY)) | ||
161 | + | ||
162 | + # extract the face ROI, convert it from BGR to RGB channel | ||
163 | + # ordering, resize it to 224x224, and preprocess it | ||
164 | + face = frame[startY:endY, startX:endX] | ||
165 | + face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) | ||
166 | + face = cv2.resize(face, (224, 224)) | ||
167 | + face = img_to_array(face) | ||
168 | + face = preprocess_input(face) | ||
169 | + | ||
170 | + # add the face and bounding boxes to their respective | ||
171 | + # lists | ||
172 | + faces.append(face) | ||
173 | + locs.append((startX, startY, endX, endY)) | ||
174 | + | ||
175 | + # only make a predictions if at least one face was detected | ||
176 | + if len(faces) > 0: | ||
177 | + # for faster inference we'll make batch predictions on *all* | ||
178 | + # faces at the same time rather than one-by-one predictions | ||
179 | + # in the above `for` loop | ||
180 | + faces = np.array(faces, dtype="float32") | ||
181 | + preds = maskNet.predict(faces, batch_size=32) | ||
182 | + | ||
183 | + # return a 2-tuple of the face locations and their corresponding | ||
184 | + # locations | ||
185 | + return (locs, preds) | ||
186 | + | ||
187 | + | ||
188 | +if __name__ == '__main__': | ||
189 | + | ||
190 | + # construct the argument parser and parse the arguments | ||
191 | + ap = argparse.ArgumentParser() | ||
192 | + ap.add_argument("-f", "--face", type=str,default="face_detector", | ||
193 | + help="path to face detector model directory") | ||
194 | + ap.add_argument("-m", "--model", type=str,default="mask_detector.model", | ||
195 | + help="path to trained face mask detector model") | ||
196 | + ap.add_argument("-c", "--confidence", type=float, default=0.5, | ||
197 | + help="minimum probability to filter weak detections") | ||
198 | + args = vars(ap.parse_args()) | ||
199 | + | ||
200 | + # load our serialized face detector model from disk | ||
201 | + print("[INFO] loading face detector model...") | ||
202 | + prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"]) | ||
203 | + weightsPath = os.path.sep.join([args["face"], | ||
204 | + "res10_300x300_ssd_iter_140000.caffemodel"]) | ||
205 | + faceNet = cv2.dnn.readNet(prototxtPath, weightsPath) | ||
206 | + | ||
207 | + # load the face mask detector model from disk | ||
208 | + print("[INFO] loading face mask detector model...") | ||
209 | + maskNet = load_model(args["model"]) | ||
210 | + | ||
211 | + | ||
212 | + app = QtWidgets.QApplication(sys.argv) # app 생성 | ||
213 | + | ||
214 | + | ||
215 | + thread = QtCore.QThread() | ||
216 | + thread.start() | ||
217 | + vid = ShowVideo() | ||
218 | + vid.moveToThread(thread) | ||
219 | + | ||
220 | + image_viewer1 = ImageViewer() | ||
221 | + #image_viewer2 = ImageViewer() | ||
222 | + | ||
223 | + vid.VideoSignal1.connect(image_viewer1.setImage) | ||
224 | + #vid.VideoSignal2.connect(image_viewer2.setImage) | ||
225 | + | ||
226 | + #push_button1 = QtWidgets.QPushButton('Start') | ||
227 | + #push_button2 = QtWidgets.QPushButton('Canny') | ||
228 | + #push_button1.clicked.connect(vid.startVideo) | ||
229 | + #push_button2.clicked.connect(vid.canny) | ||
230 | + | ||
231 | + vertical_layout = QtWidgets.QVBoxLayout() | ||
232 | + horizontal_layout = QtWidgets.QHBoxLayout() | ||
233 | + horizontal_layout.addWidget(image_viewer1) | ||
234 | + #horizontal_layout.addWidget(image_viewer2) | ||
235 | + vertical_layout.addLayout(horizontal_layout) | ||
236 | + #vertical_layout.addWidget(push_button1) | ||
237 | + #vertical_layout.addWidget(push_button2) | ||
238 | + | ||
239 | + layout_widget = QtWidgets.QWidget() | ||
240 | + layout_widget.setLayout(vertical_layout) | ||
241 | + | ||
242 | + main_window = QtWidgets.QMainWindow() | ||
243 | + main_window.setCentralWidget(layout_widget) | ||
244 | + main_window.setWindowTitle('웹캠 테스트') # main window 제목 | ||
245 | + main_window.show() | ||
246 | + #### | ||
247 | + vid.startVideo(faceNet, maskNet) | ||
248 | + #### | ||
249 | + sys.exit(app.exec_()) # 프로그램 대기상태 유지, 무한루프 |
-
Please register or login to post a comment