You need to sign in or sign up before continuing.
Graduate

Modify client.py

1 +##################################################
2 +#1. webcam에서 얼굴을 인식합니다.
3 +#2. 얼굴일 확률이 97% 이상이고 영역이 15000 이상인 이미지를 서버에 전송
4 +##################################################
5 +import torch
6 +import numpy as np
7 +import cv2
8 +import asyncio
9 +import websockets
10 +import json
11 +import os
12 +import timeit
13 +import base64
14 +import time
15 +
16 +from PIL import Image
17 +from io import BytesIO
18 +import requests
19 +
20 +from models.mtcnn import MTCNN
21 +
22 +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
23 +print('Running on device: {}'.format(device))
24 +
25 +mtcnn = MTCNN(keep_all=True, post_process=True, device=device)
26 +
27 +uri = 'ws://localhost:8765'
28 +
29 +async def send_face(face_list, image_list):
30 + async with websockets.connect(uri) as websocket:
31 + for face, image in zip(face_list, image_list):
32 + #type: np.float32
33 + send = json.dumps({'action': 'verify', 'MTCNN': face.tolist()})
34 + await websocket.send(send)
35 + recv = await websocket.recv()
36 + data = json.loads(recv)
37 + if data['status'] == 'success':
38 + # 성공
39 + print(data['student_id'], 'is attend')
40 + else:
41 + print('verification failed:', data['status'])
42 + if data['status'] == 'failed':
43 + send = json.dumps({'action': 'save_image', 'image': image.tolist()})
44 +
45 +def detect_face(frame):
46 + results = mtcnn.detect(frame)
47 + faces = mtcnn(frame, return_prob = False)
48 + image_list = []
49 + face_list = []
50 + if results[1][0] == None:
51 + return [], []
52 + for box, face, prob in zip(results[0], faces, results[1]):
53 + if prob < 0.97:
54 + continue
55 + print('face detected. prob:', prob)
56 + x1, y1, x2, y2 = box
57 + if (x2-x1) * (y2-y1) < 15000:
58 + # 얼굴 해상도가 너무 낮으면 무시
59 + continue
60 + # 얼굴 주변 ±3 영역 저장
61 + image = frame[int(y1-3):int(y2+3), int(x1-3):int(x2+3)]
62 + image_list.append(image)
63 + # MTCNN 데이터 저장
64 + face_list.append(face.numpy())
65 + return image_list, face_list
66 +
67 +def make_face_list(frame):
68 + results, prob = mtcnn(frame, return_prob = True)
69 + face_list = []
70 + if prob[0] == None:
71 + return []
72 + for result, prob in zip(results, prob):
73 + if prob < 0.97:
74 + continue
75 + #np.float32
76 + face_list.append(result.numpy())
77 + return face_list
78 +
79 +if __name__ == '__main__':
80 + cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
81 + cap.set(3, 720)
82 + cap.set(4, 480)
83 + #cv2.namedWindow("img", cv2.WINDOW_NORMAL)
84 + while True:
85 + try:
86 + ret, frame = cap.read()
87 + #cv2.imshow('img', frame)
88 + #cv2.waitKey(10)
89 + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
90 + image_list, face_list = detect_face(frame)
91 + if not face_list:
92 + continue;
93 + asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list))
94 + time.sleep(1)
95 + except Exception as ex:
96 + print(ex)
1 +##################################################
2 +#1. webcam에서 얼굴을 인식합니다. #
3 +#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. #
4 +#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
5 +##################################################
6 +import torch
7 +import numpy as np
8 +import cv2
9 +import asyncio
10 +import websockets
11 +import json
12 +import os
13 +import timeit
14 +import base64
15 +
16 +from PIL import Image
17 +from io import BytesIO
18 +import requests
19 +
20 +from models.mtcnn import MTCNN
21 +
22 +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
23 +print('Running on device: {}'.format(device))
24 +
25 +mtcnn = MTCNN(keep_all=True, device=device)
26 +
27 +uri = 'ws://localhost:8765'
28 +
29 +async def send_face(face_list, image_list):
30 + global uri
31 + async with websockets.connect(uri) as websocket:
32 + for face, image in zip(face_list, image_list):
33 + #type: np.float32
34 + send = json.dumps({"action": "verify", "MTCNN": face.tolist()})
35 + await websocket.send(send)
36 + recv = await websocket.recv()
37 + data = json.loads(recv)
38 + if data['status'] == 'success':
39 + # 성공
40 + print(data['id'], 'is attend')
41 + else:
42 + print('verification failed')
43 + send = json.dumps({'action': 'save_image', 'image': image.tolist(), 'shape': image.shape})
44 + await websocket.send(send)
45 +
46 +async def send_image(image_list):
47 + global uri
48 + async with websockets.connect(uri) as websocket:
49 + for image in image_list:
50 + data = json.dumps({'action': 'save_image', 'image': image.tolist(), 'shape': image.shape})
51 + await websocket.send(data)
52 + print('send', len(image_list), 'image(s)')
53 + code = await websocket.recv()
54 + print('code:', code)
55 +
56 +def detect_face(frame):
57 + # If required, create a face detection pipeline using MTCNN:
58 + global mtcnn
59 + results = mtcnn.detect(frame)
60 + image_list = []
61 + if results[1][0] == None:
62 + return []
63 + for box, prob in zip(results[0], results[1]):
64 + if prob < 0.95:
65 + continue
66 + print('face detected. prob:', prob)
67 + x1, y1, x2, y2 = box
68 + image = frame[int(y1-10):int(y2+10), int(x1-10):int(x2+10)]
69 + image_list.append(image)
70 + return image_list
71 +
72 +def make_face_list(frame):
73 + global mtcnn
74 + results, prob = mtcnn(frame, return_prob = True)
75 + face_list = []
76 + if prob[0] == None:
77 + return []
78 + for result, prob in zip(results, prob):
79 + if prob < 0.95:
80 + continue
81 + #np.float32
82 + face_list.append(result.numpy())
83 + return face_list
84 +
85 +cap = cv2.VideoCapture(0)
86 +cap.set(3, 720)
87 +cap.set(4, 480)
88 +while True:
89 + try:
90 + #start = timeit.default_timer()
91 + ret, frame = cap.read()
92 + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
93 + face_list = make_face_list(frame)
94 + image_list = detect_face(frame)
95 + ##embedding server로 전송##
96 + if face_list:
97 + asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list))
98 + ###################
99 + ##image server로 전송##
100 + #if image_list:
101 + #asyncio.get_event_loop().run_until_complete(send_image(image_list))
102 + ###################
103 + #end = timeit.default_timer()
104 + #print('delta time: ', end - start)
105 + except Exception as ex:
106 + print(ex)
1 +##################################################
2 +#1. webcam에서 얼굴을 인식합니다.
3 +#2. 얼굴일 확률이 97% 이상인 이미지를 이미지 서버로 전송합니다
4 +#3. 전처리 된 데이터를 verification 서버에 전송합니다.
5 +##################################################
6 +import torch
7 +import numpy as np
8 +import cv2
9 +import asyncio
10 +import websockets
11 +import json
12 +import os
13 +import timeit
14 +import base64
15 +
16 +from PIL import Image
17 +from io import BytesIO
18 +import requests
19 +
20 +from models.mtcnn import MTCNN
21 +
22 +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
23 +print('Running on device: {}'.format(device))
24 +
25 +mtcnn = MTCNN(keep_all=True, device=device)
26 +
27 +uri = 'ws://localhost:8765'
28 +
29 +async def send_face(face_list, image_list):
30 + async with websockets.connect(uri) as websocket:
31 + for face, image in zip(face_list, image_list):
32 + #type: np.float32
33 + send = json.dumps({'action': 'verify', 'image': image.tolist(), 'MTCNN': face.tolist()})
34 + await websocket.send(send)
35 + recv = await websocket.recv()
36 + data = json.loads(recv)
37 + if data['status'] == 'success':
38 + # 성공
39 + print(data['student_id'], 'is attend')
40 + elif data['status'] == 'failed':
41 + print('verification failed:', data['status'])
42 +
43 +def detect_face(frame):
44 + results = mtcnn.detect(frame)
45 + image_list = []
46 + if results[1][0] == None:
47 + return []
48 + for box, prob in zip(results[0], results[1]):
49 + if prob < 0.97:
50 + continue
51 + print('face detected. prob:', prob)
52 + x1, y1, x2, y2 = box
53 + image = frame[int(y1-3):int(y2+3), int(x1-3):int(x2+3)]
54 + image_list.append(image)
55 + print(image.shape)
56 + return image_list
57 +
58 +def make_face_list(frame):
59 + results, prob = mtcnn(frame, return_prob = True)
60 + face_list = []
61 + if prob[0] == None:
62 + return []
63 + for result, prob in zip(results, prob):
64 + if prob < 0.97:
65 + continue
66 + #np.float32
67 + face_list.append(result.numpy())
68 + return face_list
69 +
70 +if __name__ == '__main__':
71 + cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
72 + cap.set(3, 720)
73 + cap.set(4, 480)
74 + cv2.namedWindow("img", cv2.WINDOW_NORMAL)
75 + while True:
76 + try:
77 + ret, frame = cap.read()
78 + cv2.imshow('img', frame)
79 + cv2.waitKey(10)
80 + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
81 + image_list = detect_face(frame)
82 + if not image_list:
83 + continue;
84 + face_list = make_face_list(frame)
85 + ##embedding server로 전송##
86 + if face_list:
87 + asyncio.get_event_loop().run_until_complete(send_face(face_list, image_list))
88 + except Exception as ex:
89 + print(ex)