Graduate

final version

# Default ignored files
/shelf/
/workspace.xml
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="GOOGLE" />
<option name="myDocStringFormat" value="Google" />
</component>
</module>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/KHY_Project1.iml" filepath="$PROJECT_DIR$/.idea/KHY_Project1.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
......@@ -115,7 +115,7 @@ class Client(tk.Frame):
continue
image = frame[int(y1):int(y2), int(x1):int(x2)]
image_list.append(image)
# MTCNN 데이터 저장
# tensor 데이터 저장
face_list.append(face.numpy())
return face_list, image_list
......@@ -128,13 +128,14 @@ class Client(tk.Frame):
y2 = int(self.cam_height / 2 + self.detecting_square[1] / 2)
while getattr(t, "do_run", True):
ret, frame = self.cap.read()
# model에 이용하기 위해 convert
# BGR to RGB
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
face_list, image_list = self.detect_face(converted[y1:y2, x1:x2])
# 얼굴이 인식되면 출석요청
self.event_loop.run_until_complete(self.send_face(face_list, image_list))
if face_list:
self.event_loop.run_until_complete(self.send_face(face_list, image_list))
# show image
# 사각형 영역 표시
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), self.rectangle_color, 3)
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 거울상으로 보여준다
......@@ -147,18 +148,15 @@ class Client(tk.Frame):
@asyncio.coroutine
def set_rectangle(self):
self.rectangle_color = (255, 0, 0)
yield from asyncio.sleep(3)
yield from asyncio.sleep(2)
self.rectangle_color = (0, 0, 255)
async def wait(self, n):
await asyncio.sleep(n)
async def send_face(self, face_list, image_list):
try:
async with websockets.connect(uri) as websocket:
for face, image in zip(face_list, image_list):
#type: np.float32
send = json.dumps({'action': 'verify', 'MTCNN': face.tolist()})
send = json.dumps({'action': 'verify', 'tensor': face.tolist()})
await websocket.send(send)
recv = await websocket.recv()
data = json.loads(recv)
......
No preview for this file type
No preview for this file type
......@@ -135,13 +135,12 @@ class Register(tk.Frame):
continue
image = frame
image_list.append(image)
# MTCNN 데이터 저장
# tensor 데이터 저장
face_list.append(face.numpy())
return face_list, image_list
def mainthread(self):
t = threading.currentThread()
#asyncio.set_event_loop(self.event_loop)
x1 = int(self.cam_width / 2 - self.detecting_square[0] / 2)
x2 = int(self.cam_width / 2 + self.detecting_square[0] / 2)
y1 = int(self.cam_height / 2 - self.detecting_square[1] / 2)
......@@ -153,7 +152,7 @@ class Register(tk.Frame):
# model에 이용하기 위해 convert
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 사각형 영역만 검사 (속도 차이 큼)
# 사각형 영역만 검사
face_list, image_list = self.detect_face(converted[y1:y2, x1:x2])
# 얼굴이 인식된 경우 파란색 사각형을 띄움
......@@ -162,7 +161,7 @@ class Register(tk.Frame):
else:
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3)
# show image
# BGR color에서 RGB로 변환
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 유저에게 보여줄 땐 거울상으로 보여준다
converted = cv2.flip(converted,1)
......@@ -198,7 +197,10 @@ class Register(tk.Frame):
async with websockets.connect(self.uri) as websocket:
for face, image in zip(self.face_list, self.image_list):
#type: np.float32
send = json.dumps({'action': 'register', 'student_id':self.studentID.get(), 'student_name':self.studentName.get(), 'MTCNN': face.tolist()})
send = json.dumps({'action': 'register',
'student_id':self.studentID.get(),
'student_name':self.studentName.get(),
'tensor': face.tolist()})
await websocket.send(send)
recv = await websocket.recv()
data = json.loads(recv)
......
......@@ -35,8 +35,7 @@ clients = set()
async def get_embeddings(face_list):
global model
x = torch.Tensor(face_list).to(device)
yhat = model(x)
return yhat
return model(x)
async def get_distance(arr1, arr2):
distance = np.linalg.norm(arr1 - arr2)
......@@ -78,7 +77,7 @@ async def thread(websocket, path):
# load json
student_id = data['student_id']
student_name = data['student_name']
face = np.asarray(data['MTCNN'], dtype = np.float32)
face = np.asarray(data['tensor'], dtype = np.float32)
face = face.reshape((1,3,160,160))
# DB에 연결
......@@ -113,7 +112,7 @@ async def thread(websocket, path):
print(msg)
# load json
face = np.asarray(data['MTCNN'], dtype = np.float32)
face = np.asarray(data['tensor'], dtype = np.float32)
face = face.reshape((1,3,160,160))
embedding = await get_embeddings(face)
......@@ -171,7 +170,6 @@ async def thread(websocket, path):
print(msg)
arr = np.asarray(data['image'], dtype = np.uint8)
blob = arr.tobytes()
# TODO: lecture DB에 tuple 삽입해야 아래 코드가 돌아감
# 테이블 맨 뒤에 datetime attribute가 있음. 서버 시간 가져오게 default로 설정해둠.
cursor = attendance_db.cursor(pymysql.cursors.DictCursor)
sql = "INSERT INTO undefined_image(lecture_id, image, width, height) VALUES (%s, _binary %s, %s, %s)"
......