Showing
1 changed file
with
650 additions
and
0 deletions
source/detect_mask_video_v1.py
0 → 100644
1 | +# USAGE | ||
2 | +# python detect_mask_video.py | ||
3 | + | ||
4 | +# import the necessary packages | ||
5 | +from tensorflow.keras.applications.mobilenet_v2 import preprocess_input | ||
6 | +from tensorflow.keras.preprocessing.image import img_to_array | ||
7 | +from tensorflow.keras.models import load_model | ||
8 | +import numpy as np | ||
9 | +import argparse | ||
10 | +import os | ||
11 | +import cv2 | ||
12 | +import sys | ||
13 | +from PyQt5 import QtCore | ||
14 | +from PyQt5 import QtWidgets | ||
15 | +from PyQt5 import QtGui | ||
16 | +from PyQt5 import QtTest | ||
17 | +import pyaudio | ||
18 | +import wave | ||
19 | +import requests | ||
20 | +import time | ||
21 | + | ||
22 | +#Audio | ||
23 | +# Record Audio의 startRecording 메서드에서 input_device_index는 기기마다 다름. | ||
24 | +FORMAT = pyaudio.paInt16 | ||
25 | +CHANNELS = 1 | ||
26 | +RATE = 16000 | ||
27 | +CHUNK = 1024 | ||
28 | +MAX_RECORD_SECONDS = 30 | ||
29 | +WAVE_OUTPUT_FILENAME = "saved_voice\\audiofile\\file.wav" | ||
30 | +WAVE_ENROLL_FILENAME = "saved_voice\\enrollfile\\file.wav" | ||
31 | +#URL | ||
32 | +URL = 'http://163.180.146.68:7777/{}' | ||
33 | +#SpeakerRecognition | ||
34 | +THRESHOLD = 0.8 | ||
35 | +SPEAKER_ID = 'NA' | ||
36 | + | ||
37 | +class ShowVideo(QtCore.QObject): | ||
38 | + flag_detect_mask = True | ||
39 | + run_video = True | ||
40 | + | ||
41 | + camera = cv2.VideoCapture(0) # 연결된 영상장치 index, 기본은 0 | ||
42 | + | ||
43 | + ret, image = camera.read() # 2개의 값 리턴, 첫 번째는 프레임 읽음여부, 두 번째는 프레임 자체 | ||
44 | + height, width = image.shape[:2] | ||
45 | + | ||
46 | + VideoSignal1 = QtCore.pyqtSignal(QtGui.QImage) # VideoSignal1이라는 사용자 정의 시그널 생성 | ||
47 | + | ||
48 | + def __init__(self, parent=None): | ||
49 | + super(ShowVideo, self).__init__(parent) | ||
50 | + | ||
51 | + @QtCore.pyqtSlot() | ||
52 | + def startVideo(self, faceNet, maskNet): | ||
53 | + global image | ||
54 | + | ||
55 | + run_video = True | ||
56 | + self.flag_detect_mask = True | ||
57 | + while run_video: | ||
58 | + ret, image = self.camera.read() | ||
59 | + | ||
60 | + # detect faces in the frame and determine if they are wearing a | ||
61 | + # face mask or not | ||
62 | + QtWidgets.QApplication.processEvents() | ||
63 | + if self.flag_detect_mask: | ||
64 | + (locs, preds) = detect_and_predict_mask(image, faceNet, maskNet) | ||
65 | + | ||
66 | + # QtWidgets.QApplication.processEvents() | ||
67 | + # if self.flag_detect_mask: | ||
68 | + frame = image | ||
69 | + # loop over the detected face locations and their corresponding | ||
70 | + # locations | ||
71 | + for (box, pred) in zip(locs, preds): | ||
72 | + # unpack the bounding box and predictions | ||
73 | + (startX, startY, endX, endY) = box | ||
74 | + (mask, withoutMask) = pred | ||
75 | + | ||
76 | + # determine the class label and color we'll use to draw | ||
77 | + # the bounding box and text | ||
78 | + label = "Mask" if mask > withoutMask else "No Mask" # 박스 상단 출력 string | ||
79 | + color = (0, 255, 0) if label == "Mask" else (0, 0, 255) | ||
80 | + | ||
81 | + # include the probability in the label | ||
82 | + label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) | ||
83 | + | ||
84 | + # display the label and bounding box rectangle on the output | ||
85 | + # frame | ||
86 | + cv2.putText(frame, label, (startX, startY - 10), # label에 string들어감 | ||
87 | + cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2) | ||
88 | + cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) | ||
89 | + image = frame | ||
90 | + ### | ||
91 | + color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | ||
92 | + | ||
93 | + qt_image1 = QtGui.QImage(color_swapped_image.data, | ||
94 | + self.width, | ||
95 | + self.height, | ||
96 | + color_swapped_image.strides[0], | ||
97 | + QtGui.QImage.Format_RGB888) | ||
98 | + self.VideoSignal1.emit(qt_image1) | ||
99 | + | ||
100 | + loop = QtCore.QEventLoop() | ||
101 | + QtCore.QTimer.singleShot(25, loop.quit) # 25 ms | ||
102 | + loop.exec_() | ||
103 | + | ||
104 | + @QtCore.pyqtSlot() | ||
105 | + def maskdetectionoff(self): | ||
106 | + self.flag_detect_mask = False | ||
107 | + | ||
108 | + | ||
109 | +class ImageViewer(QtWidgets.QWidget): | ||
110 | + def __init__(self, parent=None): | ||
111 | + super(ImageViewer, self).__init__(parent) | ||
112 | + self.image = QtGui.QImage() | ||
113 | + self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent) | ||
114 | + | ||
115 | + def paintEvent(self, event): | ||
116 | + painter = QtGui.QPainter(self) | ||
117 | + painter.drawImage(0, 0, self.image) | ||
118 | + self.image = QtGui.QImage() | ||
119 | + | ||
120 | + def initUI(self): | ||
121 | + self.setWindowTitle('Webcam') | ||
122 | + | ||
123 | + @QtCore.pyqtSlot(QtGui.QImage) | ||
124 | + def setImage(self, image): | ||
125 | + if image.isNull(): | ||
126 | + print("Viewer Dropped frame!") | ||
127 | + | ||
128 | + self.image = image | ||
129 | + if image.size() != self.size(): | ||
130 | + self.setFixedSize(image.size()) | ||
131 | + self.update() | ||
132 | + | ||
133 | + | ||
134 | +def detect_and_predict_mask(frame, faceNet, maskNet): | ||
135 | + # grab the dimensions of the frame and then construct a blob | ||
136 | + # from it | ||
137 | + (h, w) = frame.shape[:2] | ||
138 | + blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), | ||
139 | + (104.0, 177.0, 123.0)) | ||
140 | + | ||
141 | + # pass the blob through the network and obtain the face detections | ||
142 | + faceNet.setInput(blob) | ||
143 | + detections = faceNet.forward() | ||
144 | + | ||
145 | + # initialize our list of faces, their corresponding locations, | ||
146 | + # and the list of predictions from our face mask network | ||
147 | + faces = [] | ||
148 | + locs = [] | ||
149 | + preds = [] | ||
150 | + | ||
151 | + # loop over the detections | ||
152 | + for i in range(0, detections.shape[2]): | ||
153 | + # extract the confidence (i.e., probability) associated with | ||
154 | + # the detection | ||
155 | + confidence = detections[0, 0, i, 2] | ||
156 | + | ||
157 | + # filter out weak detections by ensuring the confidence is | ||
158 | + # greater than the minimum confidence | ||
159 | + if confidence > args["confidence"]: | ||
160 | + # compute the (x, y)-coordinates of the bounding box for | ||
161 | + # the object | ||
162 | + box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) | ||
163 | + (startX, startY, endX, endY) = box.astype("int") | ||
164 | + | ||
165 | + # ensure the bounding boxes fall within the dimensions of | ||
166 | + # the frame | ||
167 | + (startX, startY) = (max(0, startX), max(0, startY)) | ||
168 | + (endX, endY) = (min(w - 1, endX), min(h - 1, endY)) | ||
169 | + | ||
170 | + # extract the face ROI, convert it from BGR to RGB channel | ||
171 | + # ordering, resize it to 224x224, and preprocess it | ||
172 | + face = frame[startY:endY, startX:endX] | ||
173 | + face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) | ||
174 | + face = cv2.resize(face, (224, 224)) | ||
175 | + face = img_to_array(face) | ||
176 | + face = preprocess_input(face) | ||
177 | + | ||
178 | + # add the face and bounding boxes to their respective | ||
179 | + # lists | ||
180 | + faces.append(face) | ||
181 | + locs.append((startX, startY, endX, endY)) | ||
182 | + | ||
183 | + # only make a predictions if at least one face was detected | ||
184 | + if len(faces) > 0: | ||
185 | + # for faster inference we'll make batch predictions on *all* | ||
186 | + # faces at the same time rather than one-by-one predictions | ||
187 | + # in the above `for` loop | ||
188 | + faces = np.array(faces, dtype="float32") | ||
189 | + preds = maskNet.predict(faces, batch_size=32) | ||
190 | + | ||
191 | + # return a 2-tuple of the face locations and their corresponding | ||
192 | + # locations | ||
193 | + return (locs, preds) | ||
194 | + | ||
195 | + | ||
196 | +class SpeakerRecognition(QtWidgets.QWidget): | ||
197 | + verification_url = URL.format('verification') | ||
198 | + identification_url = URL.format('identification') | ||
199 | + enrollment_url = URL.format('enroll') | ||
200 | + speaker_id = '' | ||
201 | + | ||
202 | + def __init__(self, parent=None): | ||
203 | + super(SpeakerRecognition, self).__init__(parent) | ||
204 | + self.initUI() | ||
205 | + | ||
206 | + def initUI(self): | ||
207 | + self.label_1_1 = QtWidgets.QLabel('Result Message: ', self) | ||
208 | + self.label_1_2 = QtWidgets.QLabel('', self) | ||
209 | + self.push_button5 =QtWidgets.QPushButton('Authenticate', self) | ||
210 | + self.push_button5.clicked.connect(self.doAction) | ||
211 | + | ||
212 | + self.dialog_button = QtWidgets.QPushButton('화자 ID 입력:', self) | ||
213 | + self.dialog_button.clicked.connect(self.showDialog) | ||
214 | + self.le = QtWidgets.QLineEdit(self) | ||
215 | + | ||
216 | + self.register_button = QtWidgets.QPushButton('Register new voice', self) | ||
217 | + self.register_button.clicked.connect(self.switch_enrollment) | ||
218 | + | ||
219 | + def verification(self, speaker): | ||
220 | + try: | ||
221 | + with open(WAVE_OUTPUT_FILENAME, 'rb') as file_opened: | ||
222 | + files = {'file': file_opened} | ||
223 | + data = {'enroll_speaker': speaker} | ||
224 | + r = requests.post(self.verification_url, files=files, data=data) | ||
225 | + print(r.text) | ||
226 | + return r.text | ||
227 | + except FileNotFoundError: | ||
228 | + return False | ||
229 | + | ||
230 | + def identification(self): | ||
231 | + try: | ||
232 | + with open(WAVE_OUTPUT_FILENAME, 'rb') as file_opened: | ||
233 | + files = {'file': file_opened} | ||
234 | + r = requests.post(self.identification_url, files=files) | ||
235 | + print(r.text) | ||
236 | + return r.text | ||
237 | + except FileNotFoundError: | ||
238 | + return False | ||
239 | + | ||
240 | + def recognition(self): | ||
241 | + speaker = self.identification() | ||
242 | + if speaker == False: | ||
243 | + print('Record voice first!') | ||
244 | + return False | ||
245 | + | ||
246 | + percentage = self.verification(speaker) | ||
247 | + print(speaker, percentage) | ||
248 | + | ||
249 | + if float(percentage) >= THRESHOLD: | ||
250 | + result = '승인! 등록된 화자입니다.(ID: {})'.format(speaker) | ||
251 | + #result = speaker | ||
252 | + else: | ||
253 | + result = '등록되지 않은 화자입니다!' | ||
254 | + return result | ||
255 | + | ||
256 | + @QtCore.pyqtSlot() | ||
257 | + def doAction(self): | ||
258 | + start = time.time() | ||
259 | + recog = self.recognition() | ||
260 | + print("Inference time : ", time.time() - start) | ||
261 | + if recog == False: | ||
262 | + self.label_1_2.setText('Voice not recorded, record voice first!') | ||
263 | + else: | ||
264 | + self.label_1_2.setText(recog) | ||
265 | + | ||
266 | + def enrollment(self, speaker_id): | ||
267 | + try: | ||
268 | + if speaker_id == '': | ||
269 | + return 0 | ||
270 | + with open(WAVE_ENROLL_FILENAME, 'rb') as file_opened: | ||
271 | + files = {'file': file_opened} | ||
272 | + data = {'enroll_speaker': speaker_id} | ||
273 | + r = requests.post(self.enrollment_url, files=files, data=data) | ||
274 | + print(r.text) | ||
275 | + return r.text | ||
276 | + except FileNotFoundError: | ||
277 | + return 1 | ||
278 | + | ||
279 | + def switch_enrollment(self): | ||
280 | + enroll = self.enrollment(self.speaker_id) | ||
281 | + if enroll == 1: | ||
282 | + self.label_1_2.setText('Voice not recorded, record voice first!') | ||
283 | + elif enroll == 0: | ||
284 | + self.label_1_2.setText('No speaker ID input!') | ||
285 | + else: | ||
286 | + self.label_1_2.setText("""New speaker registered!('%s')""" % self.speaker_id) | ||
287 | + self.speaker_id = '' | ||
288 | + self.le.setText(self.speaker_id) | ||
289 | + | ||
290 | + def showDialog(self): | ||
291 | + text, ok = QtWidgets.QInputDialog.getText(self, '화자 등록', | ||
292 | + '등록할 화자 ID(Unique 값)을 입력하십시오:') | ||
293 | + if ok: | ||
294 | + self.le.setText(str(text)) | ||
295 | + self.speaker_id = str(text) | ||
296 | + | ||
297 | +class RecordAudio(QtCore.QObject): | ||
298 | + isrecording = False | ||
299 | + frames = [] | ||
300 | + | ||
301 | + def __init__(self, parent=None): | ||
302 | + super(RecordAudio, self).__init__(parent) | ||
303 | + | ||
304 | + @QtCore.pyqtSlot() | ||
305 | + def startRecording(self): | ||
306 | + # start Recording | ||
307 | + self.audio = pyaudio.PyAudio() | ||
308 | + self.stream = self.audio.open(format=pyaudio.paInt16, | ||
309 | + channels=CHANNELS, | ||
310 | + rate=RATE, | ||
311 | + input=True, | ||
312 | + input_device_index=1, # 기기마다 마이크 인덱스 다름 | ||
313 | + frames_per_buffer=CHUNK) | ||
314 | + self.isrecording = True | ||
315 | + print("recording...") | ||
316 | + | ||
317 | + # frames = [] | ||
318 | + self.frames.clear() | ||
319 | + | ||
320 | + for i in range(0, int(RATE / CHUNK * MAX_RECORD_SECONDS)): | ||
321 | + QtWidgets.QApplication.processEvents() | ||
322 | + if self.isrecording: | ||
323 | + data = self.stream.read(CHUNK) | ||
324 | + self.frames.append(data) | ||
325 | + else: | ||
326 | + print("Stopped recording") | ||
327 | + break | ||
328 | + print("finished recording") | ||
329 | + | ||
330 | + # stop Recording | ||
331 | + self.stream.stop_stream() | ||
332 | + self.stream.close() | ||
333 | + self.audio.terminate() | ||
334 | + waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb') | ||
335 | + waveFile.setnchannels(CHANNELS) | ||
336 | + waveFile.setsampwidth(self.audio.get_sample_size(FORMAT)) | ||
337 | + waveFile.setframerate(RATE) | ||
338 | + waveFile.writeframes(b''.join(self.frames)) | ||
339 | + waveFile.close() | ||
340 | + self.frames.clear() | ||
341 | + | ||
342 | + def stopRecording(self): | ||
343 | + print("stop called") | ||
344 | + self.isrecording = False | ||
345 | + | ||
346 | + def switch(self): | ||
347 | + if self.isrecording: | ||
348 | + QtTest.QTest.qWait(1 * 1000) | ||
349 | + self.stopRecording() | ||
350 | + else: | ||
351 | + self.startRecording() | ||
352 | + | ||
353 | +class RecordAudio_enroll(QtCore.QObject): | ||
354 | + isrecording = False | ||
355 | + frames = [] | ||
356 | + | ||
357 | + def __init__(self, parent=None): | ||
358 | + super(RecordAudio_enroll, self).__init__(parent) | ||
359 | + | ||
360 | + @QtCore.pyqtSlot() | ||
361 | + def startRecording(self): | ||
362 | + # start Recording | ||
363 | + self.audio = pyaudio.PyAudio() | ||
364 | + self.stream = self.audio.open(format=pyaudio.paInt16, | ||
365 | + channels=CHANNELS, | ||
366 | + rate=RATE, | ||
367 | + input=True, | ||
368 | + input_device_index=1, # 기기마다 마이크 인덱스 다름 | ||
369 | + frames_per_buffer=CHUNK) | ||
370 | + self.isrecording = True | ||
371 | + print("recording...") | ||
372 | + | ||
373 | + # frames = [] | ||
374 | + self.frames.clear() | ||
375 | + | ||
376 | + for i in range(0, int(RATE / CHUNK * MAX_RECORD_SECONDS)): | ||
377 | + QtWidgets.QApplication.processEvents() | ||
378 | + if self.isrecording: | ||
379 | + data = self.stream.read(CHUNK) | ||
380 | + self.frames.append(data) | ||
381 | + else: | ||
382 | + print("Stopped recording") | ||
383 | + break | ||
384 | + print("finished recording") | ||
385 | + | ||
386 | + # stop Recording | ||
387 | + self.stream.stop_stream() | ||
388 | + self.stream.close() | ||
389 | + self.audio.terminate() | ||
390 | + waveFile = wave.open(WAVE_ENROLL_FILENAME, 'wb') | ||
391 | + waveFile.setnchannels(CHANNELS) | ||
392 | + waveFile.setsampwidth(self.audio.get_sample_size(FORMAT)) | ||
393 | + waveFile.setframerate(RATE) | ||
394 | + waveFile.writeframes(b''.join(self.frames)) | ||
395 | + waveFile.close() | ||
396 | + self.frames.clear() | ||
397 | + | ||
398 | + def stopRecording(self): | ||
399 | + print("stop called") | ||
400 | + self.isrecording = False | ||
401 | + | ||
402 | + def switch(self): | ||
403 | + if self.isrecording: | ||
404 | + QtTest.QTest.qWait(1 * 1000) | ||
405 | + self.stopRecording() | ||
406 | + else: | ||
407 | + self.startRecording() | ||
408 | + | ||
409 | +class RecordViewer(QtWidgets.QWidget): | ||
410 | + def __init__(self, parent=None): | ||
411 | + super(RecordViewer, self).__init__(parent) | ||
412 | + self.initUI() | ||
413 | + | ||
414 | + def initUI(self): | ||
415 | + self.pbar = QtWidgets.QProgressBar(self) | ||
416 | + self.pbar.setFixedWidth(400) | ||
417 | + self.pbar.setMaximum(MAX_RECORD_SECONDS) | ||
418 | + self.pbar.setAlignment(QtCore.Qt.AlignCenter) | ||
419 | + | ||
420 | + self.push_button3 = QtWidgets.QPushButton('Start Audio Record', self) | ||
421 | + self.push_button3.clicked.connect(self.doAction) | ||
422 | + | ||
423 | + self.timer = QtCore.QBasicTimer() | ||
424 | + self.step = 0 | ||
425 | + | ||
426 | + def timerEvent(self, e): | ||
427 | + if self.step >= MAX_RECORD_SECONDS: | ||
428 | + self.timer.stop() | ||
429 | + self.push_button3.setText("Restart") | ||
430 | + return | ||
431 | + self.step = self.step + 1 | ||
432 | + self.pbar.setValue(self.step) | ||
433 | + self.pbar.setFormat("%d sec" % self.step) | ||
434 | + | ||
435 | + @QtCore.pyqtSlot() | ||
436 | + def doAction(self): | ||
437 | + if self.timer.isActive(): | ||
438 | + self.timer.stop() | ||
439 | + self.push_button3.setText("Restart") | ||
440 | + else: | ||
441 | + self.pbar.reset() | ||
442 | + self.step = 0 | ||
443 | + self.timer.start(1000, self) # 1000/1000초마다 timer실행 | ||
444 | + self.push_button3.setText("Stop") | ||
445 | + | ||
446 | + | ||
447 | +if __name__ == '__main__': | ||
448 | + # construct the argument parser and parse the arguments | ||
449 | + ap = argparse.ArgumentParser() | ||
450 | + ap.add_argument("-f", "--face", type=str, default="face_detector", | ||
451 | + help="path to face detector model directory") | ||
452 | + ap.add_argument("-m", "--model", type=str, default="mask_detector.model", | ||
453 | + help="path to trained face mask detector model") | ||
454 | + ap.add_argument("-c", "--confidence", type=float, default=0.5, | ||
455 | + help="minimum probability to filter weak detections") | ||
456 | + args = vars(ap.parse_args()) | ||
457 | + | ||
458 | + # load our serialized face detector model from disk | ||
459 | + print("[INFO] loading face detector model...") | ||
460 | + prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"]) | ||
461 | + weightsPath = os.path.sep.join([args["face"], | ||
462 | + "res10_300x300_ssd_iter_140000.caffemodel"]) | ||
463 | + faceNet = cv2.dnn.readNet(prototxtPath, weightsPath) | ||
464 | + | ||
465 | + # load the face mask detector model from disk | ||
466 | + print("[INFO] loading face mask detector model...") | ||
467 | + maskNet = load_model(args["model"]) | ||
468 | + | ||
469 | + app = QtWidgets.QApplication(sys.argv) # app 생성 | ||
470 | + | ||
471 | + thread = QtCore.QThread() | ||
472 | + thread.start() | ||
473 | + vid = ShowVideo() | ||
474 | + vid.moveToThread(thread) | ||
475 | + | ||
476 | + thread2 = QtCore.QThread() | ||
477 | + thread2.start() | ||
478 | + aud = RecordViewer() | ||
479 | + aud.moveToThread(thread2) | ||
480 | + | ||
481 | + thread3 = QtCore.QThread() | ||
482 | + thread3.start() | ||
483 | + mic = RecordAudio_enroll() | ||
484 | + mic.moveToThread(thread3) | ||
485 | + | ||
486 | + thread4 = QtCore.QThread() | ||
487 | + thread4.start() | ||
488 | + sr = SpeakerRecognition() | ||
489 | + sr.moveToThread(thread4) | ||
490 | + | ||
491 | + thread5 = QtCore.QThread() | ||
492 | + thread5.start() | ||
493 | + aud2 = RecordViewer() | ||
494 | + aud2.moveToThread(thread5) | ||
495 | + | ||
496 | + thread6 = QtCore.QThread() | ||
497 | + thread6.start() | ||
498 | + mic2 = RecordAudio() | ||
499 | + mic2.moveToThread(thread6) | ||
500 | + | ||
501 | + thread7 = QtCore.QThread() | ||
502 | + thread7.start() | ||
503 | + sr2 = SpeakerRecognition() | ||
504 | + sr2.moveToThread(thread7) | ||
505 | + | ||
506 | + | ||
507 | + image_viewer1 = ImageViewer() | ||
508 | + | ||
509 | + vid.VideoSignal1.connect(image_viewer1.setImage) | ||
510 | + | ||
511 | + push_button1 = QtWidgets.QPushButton('Start Mask Detection') | ||
512 | + push_button2 = QtWidgets.QPushButton('Mask Detection Off') | ||
513 | + push_button4 = QtWidgets.QPushButton('Close') | ||
514 | + | ||
515 | + push_button1.clicked.connect(lambda: vid.startVideo(faceNet, maskNet)) | ||
516 | + push_button2.clicked.connect(vid.maskdetectionoff) | ||
517 | + aud.push_button3.clicked.connect(mic.switch) | ||
518 | + push_button4.clicked.connect(sys.exit) | ||
519 | + aud2.push_button3.clicked.connect(mic2.switch) | ||
520 | + | ||
521 | + empty_label = QtWidgets.QLabel() | ||
522 | + empty_label.setText('') | ||
523 | + | ||
524 | + L_groupBox = QtWidgets.QGroupBox("Mask Detection") | ||
525 | + LR_layout = QtWidgets.QVBoxLayout() | ||
526 | + LR_layout.addWidget(push_button1) | ||
527 | + LR_layout.addWidget(push_button2) | ||
528 | + LR_layout.addStretch(1) | ||
529 | + | ||
530 | + L_horizontal_layout1 = QtWidgets.QHBoxLayout() | ||
531 | + L_horizontal_layout1.addWidget(image_viewer1) | ||
532 | + L_horizontal_layout1.addLayout(LR_layout) | ||
533 | + L_groupBox.setLayout(L_horizontal_layout1) | ||
534 | + | ||
535 | + RU_groupBox = QtWidgets.QGroupBox("Speaker Registration") | ||
536 | + pbar_layout = QtWidgets.QHBoxLayout() | ||
537 | + pbar_layout.addWidget(aud.pbar) | ||
538 | + pbar_layout.addStretch(1) | ||
539 | + ## | ||
540 | + dialog_layout = QtWidgets.QHBoxLayout() | ||
541 | + dialog_layout.addWidget(sr2.dialog_button) | ||
542 | + dialog_layout.addWidget(sr2.le) | ||
543 | + dialog_layout.addStretch(1) | ||
544 | + | ||
545 | + register_layout = QtWidgets.QHBoxLayout() | ||
546 | + register_layout.addWidget(sr2.register_button) | ||
547 | + | ||
548 | + result_1_layout = QtWidgets.QHBoxLayout() | ||
549 | + result_1_layout.addWidget(sr2.label_1_1) | ||
550 | + result_1_layout.addWidget(sr2.label_1_2) | ||
551 | + result_1_layout.addStretch(1) | ||
552 | + ## | ||
553 | + RL_label1 = QtWidgets.QLabel() | ||
554 | + RL_label1.setText("Max Record Time: 30 sec") | ||
555 | + RL_label2 = QtWidgets.QLabel() | ||
556 | + RL_label2.setText("Press Start/Restart to begin recording") | ||
557 | + | ||
558 | + RL_layout = QtWidgets.QVBoxLayout() | ||
559 | + RL_layout.addLayout(pbar_layout) | ||
560 | + RL_layout.addWidget(RL_label1) | ||
561 | + RL_layout.addWidget(RL_label2) | ||
562 | + RL_layout.addLayout(dialog_layout) | ||
563 | + RL_layout.addLayout(result_1_layout) | ||
564 | + RL_layout.addStretch(1) | ||
565 | + | ||
566 | + push_button3_layout = QtWidgets.QHBoxLayout() | ||
567 | + push_button3_layout.addWidget(aud.push_button3) | ||
568 | + # push_button3_layout.addStretch(1) | ||
569 | + | ||
570 | + # close_layout = QtWidgets.QHBoxLayout() | ||
571 | + # close_layout.addWidget(push_button4) | ||
572 | + | ||
573 | + RR_layout = QtWidgets.QVBoxLayout() | ||
574 | + RR_layout.addLayout(push_button3_layout) | ||
575 | + RR_layout.addWidget(empty_label) | ||
576 | + RR_layout.addWidget(empty_label) | ||
577 | + RR_layout.addLayout(register_layout) | ||
578 | + RR_layout.addStretch(1) | ||
579 | + # RR_layout.addLayout(close_layout) | ||
580 | + | ||
581 | + R_horizontal_layout2 = QtWidgets.QHBoxLayout() | ||
582 | + R_horizontal_layout2.addLayout(RL_layout) | ||
583 | + R_horizontal_layout2.addLayout(RR_layout) | ||
584 | + RU_groupBox.setLayout(R_horizontal_layout2) | ||
585 | + | ||
586 | + | ||
587 | + RD_groupBox = QtWidgets.QGroupBox("Speaker Recognition") | ||
588 | +### | ||
589 | + pbar_2_layout = QtWidgets.QHBoxLayout() | ||
590 | + pbar_2_layout.addWidget(aud2.pbar) | ||
591 | + pbar_2_layout.addStretch(1) | ||
592 | + | ||
593 | + RDL_label1 = QtWidgets.QLabel() | ||
594 | + RDL_label1.setText("Max Record Time: 30 sec") | ||
595 | + RDL_label2 = QtWidgets.QLabel() | ||
596 | + RDL_label2.setText("Press Start/Restart to begin recording") | ||
597 | + | ||
598 | + push_button3_2_layout = QtWidgets.QHBoxLayout() | ||
599 | + push_button3_2_layout.addWidget(aud2.push_button3) | ||
600 | +### | ||
601 | + result_2_layout = QtWidgets.QHBoxLayout() | ||
602 | + result_2_layout.addWidget(sr.label_1_1) | ||
603 | + result_2_layout.addWidget(sr.label_1_2) | ||
604 | + result_2_layout.addStretch(1) | ||
605 | + | ||
606 | + RDL_layout = QtWidgets.QVBoxLayout() | ||
607 | + RDL_layout.addLayout(pbar_2_layout) | ||
608 | + RDL_layout.addWidget(RDL_label1) | ||
609 | + RDL_layout.addWidget(RDL_label2) | ||
610 | + RDL_layout.addWidget(empty_label) | ||
611 | + RDL_layout.addLayout(result_2_layout) | ||
612 | + RDL_layout.addStretch(1) | ||
613 | + | ||
614 | + push_button5_layout = QtWidgets.QHBoxLayout() | ||
615 | + push_button5_layout.addWidget(sr.push_button5) | ||
616 | + | ||
617 | + close_layout = QtWidgets.QHBoxLayout() | ||
618 | + close_layout.addWidget(push_button4) | ||
619 | + | ||
620 | + RDR_layout = QtWidgets.QVBoxLayout() | ||
621 | + RDR_layout.addLayout(push_button3_2_layout) | ||
622 | + RDR_layout.addWidget(empty_label) | ||
623 | + RDR_layout.addWidget(empty_label) | ||
624 | + RDR_layout.addWidget(empty_label) | ||
625 | + RDR_layout.addLayout(push_button5_layout) | ||
626 | + RDR_layout.addStretch(1) | ||
627 | + RDR_layout.addLayout(close_layout) | ||
628 | + | ||
629 | + RD_horizontal_layout = QtWidgets.QHBoxLayout() | ||
630 | + RD_horizontal_layout.addLayout(RDL_layout) | ||
631 | + RD_horizontal_layout.addLayout(RDR_layout) | ||
632 | + RD_groupBox.setLayout(RD_horizontal_layout) | ||
633 | + | ||
634 | + R_layout = QtWidgets.QVBoxLayout() | ||
635 | + R_layout.addWidget(RU_groupBox) | ||
636 | + R_layout.addWidget(RD_groupBox) | ||
637 | + | ||
638 | + layout = QtWidgets.QHBoxLayout() | ||
639 | + layout.addWidget(L_groupBox) | ||
640 | + layout.addLayout(R_layout) | ||
641 | + | ||
642 | + layout_widget = QtWidgets.QWidget() | ||
643 | + layout_widget.setLayout(layout) | ||
644 | + | ||
645 | + main_window = QtWidgets.QMainWindow() | ||
646 | + main_window.setGeometry(150, 150, 500, 500) # test | ||
647 | + main_window.setCentralWidget(layout_widget) | ||
648 | + main_window.setWindowTitle('마스크 디텍션 및 화자 식별을 통한 입출입 시스템') # main window 제목 | ||
649 | + main_window.show() | ||
650 | + sys.exit(app.exec_()) # 프로그램 대기상태 유지, 무한루프 |
-
Please register or login to post a comment