Toggle navigation
Toggle navigation
This project
Loading...
Sign in
2020-1-capstone-design1
/
KHY_Project1
Go to a project
Toggle navigation
Toggle navigation pinning
Projects
Groups
Snippets
Help
Project
Activity
Repository
Graphs
Network
Create a new issue
Commits
Issue Boards
Authored by
허진호
2020-06-21 18:05:58 +0900
Browse Files
Options
Browse Files
Download
Plain Diff
Commit
7a967319ca6814ff2212b7a403ac0112dec70a4e
7a967319
2 parents
71833cfe
405057c4
Merge branch 'master' of
http://khuhub.khu.ac.kr/2020-1-capstone-design1/KHY_Project1
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
405 additions
and
67 deletions
DB/SQL/create_table_lecture.sql
README.md
client/client.py
client/client(window).py → client/legacy/client(window)0617.py
client/legacy/clinet(window)-06042035.py → client/legacy/clinet(window)0604.py
client/legacy/clinet(window)200605.py → client/legacy/clinet(window)0605(2).py
register/register.py
server/server.py
webserver/myapp/conf/db.js
DB/SQL/create_table_lecture.sql
View file @
7a96731
CREATE
TABLE
lecture
(
lecture_id
VARCHAR
(
20
)
NOT
NULL
,
lecture_name
VARCHAR
(
50
),
lecture_room
VARCHAR
(
50
)
NOT
NULL
,
PRIMARY
KEY
(
lecture_id
)
);
...
...
@@ -32,7 +31,8 @@ FOREIGN KEY (lecture_id) REFERENCES lecture(lecture_id)
CREATE
TABLE
lecture_schedule
(
lecture_id
VARCHAR
(
20
)
NOT
NULL
,
lecture_day
VARCHAR
(
20
)
NOT
NULL
,
lecture_day
TINYINT
NOT
NULL
,
lecture_room
VARCHAR
(
50
)
NOT
NULL
,
lecture_start_time
TIME
NOT
NULL
,
lecture_end_time
TIME
NOT
NULL
,
FOREIGN
KEY
(
lecture_id
)
REFERENCES
lecture
(
lecture_id
)
...
...
README.md
View file @
7a96731
# 주제
얼굴 인식 전자 출결 시스템
[
![License: MIT
](
https://img.shields.io/badge/License-MIT-yellow.svg
)
](https://opensource.org/licenses/MIT)
# Topic
**얼굴 인식 전자 출결 시스템**
#
팀원
#
Team
-
정해갑(컴퓨터공학과, 2014104149)
-
허진호(컴퓨터공학과, 2014104161)
# 개발환경
-
Windows, IBM Cloud(Ubuntu 18.04.4 LTS), MySQL
# Hardware
-
server: IBM Cloud(2 vCPU | 4 GB | Ubuntu 18.04.4 LTS)
-
client: (i7-7700HQ | 16 GB | Windows)
#
활용기술
#
License
-
pytorch(https://github.com/pytorch/pytorch)
-
facenet(https://github.com/davidsandberg/facenet)
-
facenet-pytorch(https://github.com/timesler/facenet-pytorch)
...
...
@@ -16,3 +18,11 @@
-
NodeJS(https://nodejs.org)
-
MySQL(https://www.mysql.com)
-
PyMySQL(https://github.com/PyMySQL/PyMySQL)
# Usage
## Server
-
python3 server/server.py & npm start --prefix webserver/myapp &
## Client(windows)
-
execute register/register.py
-
execute client/client(window).py
\ No newline at end of file
...
...
client/client.py
0 → 100644
View file @
7a96731
##################################################
#1. webcam에서 얼굴을 인식합니다.
#2. 얼굴일 확률이 97% 이상이고 영역이 15000 이상인 이미지를 서버에 전송
##################################################
import
tkinter
as
tk
import
tkinter.font
import
tkinter.messagebox
import
tkinter.scrolledtext
import
threading
import
torch
import
numpy
as
np
import
cv2
import
asyncio
import
websockets
import
json
import
os
import
timeit
import
base64
import
time
from
PIL
import
Image
,
ImageTk
from
io
import
BytesIO
import
requests
from
models.mtcnn
import
MTCNN
device
=
torch
.
device
(
'cuda:0'
if
torch
.
cuda
.
is_available
()
else
'cpu'
)
print
(
'Running on device: {}'
.
format
(
device
))
mtcnn
=
MTCNN
(
keep_all
=
True
,
post_process
=
True
,
device
=
device
)
uri
=
'ws://169.56.95.131:8765'
class
Client
(
tk
.
Frame
):
def
__init__
(
self
,
parent
,
*
args
,
**
kwargs
):
tk
.
Frame
.
__init__
(
self
,
parent
,
*
args
,
**
kwargs
)
# URI
self
.
uri
=
'ws://169.56.95.131:8765'
# Pytorch Model
self
.
device
=
device
=
torch
.
device
(
'cuda:0'
if
torch
.
cuda
.
is_available
()
else
'cpu'
)
self
.
mtcnn
=
MTCNN
(
keep_all
=
True
,
device
=
device
)
# OpenCV
self
.
cap
=
cv2
.
VideoCapture
(
0
,
cv2
.
CAP_DSHOW
)
self
.
cam_width
=
640
self
.
cam_height
=
480
self
.
cap
.
set
(
3
,
self
.
cam_width
)
self
.
cap
.
set
(
4
,
self
.
cam_height
)
# Application Function
# cam에서 MTCNN 적용하는 영역
self
.
detecting_square
=
(
500
,
300
)
# 영상 위에 사각형 색상 지정
self
.
rectangle_color
=
(
0
,
0
,
255
)
# tkinter GUI
self
.
width
=
740
self
.
height
=
700
self
.
parent
=
parent
self
.
parent
.
title
(
"출석시스템"
)
self
.
parent
.
geometry
(
"
%
dx
%
d+100+100"
%
(
self
.
width
,
self
.
height
))
self
.
pack
()
self
.
create_widgets
()
# Event loop and Thread
self
.
event_loop
=
asyncio
.
new_event_loop
()
self
.
thread
=
threading
.
Thread
(
target
=
self
.
mainthread
)
self
.
thread
.
start
()
def
create_widgets
(
self
):
image
=
np
.
zeros
([
self
.
cam_height
,
self
.
cam_width
,
3
],
dtype
=
np
.
uint8
)
image
=
Image
.
fromarray
(
image
)
image
=
ImageTk
.
PhotoImage
(
image
)
font
=
tk
.
font
.
Font
(
family
=
"맑은 고딕"
,
size
=
15
)
self
.
alert
=
tk
.
Label
(
self
,
text
=
"출석시스템"
,
font
=
font
)
self
.
alert
.
grid
(
row
=
0
,
column
=
0
,
columnspan
=
20
)
self
.
label
=
tk
.
Label
(
self
,
image
=
image
)
self
.
label
.
grid
(
row
=
1
,
column
=
0
,
columnspan
=
20
)
self
.
log
=
tk
.
scrolledtext
.
ScrolledText
(
self
,
wrap
=
tk
.
WORD
,
state
=
tk
.
DISABLED
,
width
=
96
,
height
=
10
)
self
.
log
.
grid
(
row
=
2
,
column
=
0
,
columnspan
=
20
)
self
.
quit
=
tk
.
Button
(
self
,
text
=
"나가기"
,
fg
=
"red"
,
command
=
self
.
stop
)
self
.
quit
.
grid
(
row
=
3
,
column
=
10
)
def
logging
(
self
,
text
):
self
.
log
.
config
(
state
=
tk
.
NORMAL
)
self
.
log
.
insert
(
tkinter
.
CURRENT
,
text
)
self
.
log
.
insert
(
tkinter
.
CURRENT
,
'
\n
'
)
self
.
log
.
config
(
state
=
tk
.
DISABLED
)
def
detect_face
(
self
,
frame
):
results
=
self
.
mtcnn
.
detect
(
frame
)
faces
=
self
.
mtcnn
(
frame
,
return_prob
=
False
)
image_list
=
[]
face_list
=
[]
if
results
[
1
][
0
]
==
None
:
return
[],
[]
for
box
,
face
,
prob
in
zip
(
results
[
0
],
faces
,
results
[
1
]):
if
prob
<
0.97
:
continue
# for debug
# print('face detected. prob:', prob)
x1
,
y1
,
x2
,
y2
=
box
if
(
x2
-
x1
)
*
(
y2
-
y1
)
<
15000
:
# 얼굴 해상도가 너무 낮으면 무시
continue
image
=
frame
[
int
(
y1
):
int
(
y2
),
int
(
x1
):
int
(
x2
)]
image_list
.
append
(
image
)
# MTCNN 데이터 저장
face_list
.
append
(
face
.
numpy
())
return
face_list
,
image_list
def
mainthread
(
self
):
t
=
threading
.
currentThread
()
asyncio
.
set_event_loop
(
self
.
event_loop
)
x1
=
int
(
self
.
cam_width
/
2
-
self
.
detecting_square
[
0
]
/
2
)
x2
=
int
(
self
.
cam_width
/
2
+
self
.
detecting_square
[
0
]
/
2
)
y1
=
int
(
self
.
cam_height
/
2
-
self
.
detecting_square
[
1
]
/
2
)
y2
=
int
(
self
.
cam_height
/
2
+
self
.
detecting_square
[
1
]
/
2
)
while
getattr
(
t
,
"do_run"
,
True
):
ret
,
frame
=
self
.
cap
.
read
()
# model에 이용하기 위해 convert
converted
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
face_list
,
image_list
=
self
.
detect_face
(
converted
[
y1
:
y2
,
x1
:
x2
])
# 얼굴이 인식되면 출석요청
self
.
event_loop
.
run_until_complete
(
self
.
send_face
(
face_list
,
image_list
))
# show image
frame
=
cv2
.
rectangle
(
frame
,
(
x1
,
y1
),
(
x2
,
y2
),
self
.
rectangle_color
,
3
)
converted
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
# 거울상으로 보여준다
converted
=
cv2
.
flip
(
converted
,
1
)
image
=
Image
.
fromarray
(
converted
)
image
=
ImageTk
.
PhotoImage
(
image
)
self
.
label
.
configure
(
image
=
image
)
self
.
label
.
image
=
image
# kind of double buffering
@asyncio.coroutine
def
set_rectangle
(
self
):
self
.
rectangle_color
=
(
255
,
0
,
0
)
yield
from
asyncio
.
sleep
(
3
)
self
.
rectangle_color
=
(
0
,
0
,
255
)
async
def
wait
(
self
,
n
):
await
asyncio
.
sleep
(
n
)
async
def
send_face
(
self
,
face_list
,
image_list
):
try
:
async
with
websockets
.
connect
(
uri
)
as
websocket
:
for
face
,
image
in
zip
(
face_list
,
image_list
):
#type: np.float32
send
=
json
.
dumps
({
'action'
:
'verify'
,
'MTCNN'
:
face
.
tolist
()})
await
websocket
.
send
(
send
)
recv
=
await
websocket
.
recv
()
data
=
json
.
loads
(
recv
)
if
data
[
'status'
]
==
'success'
:
# 성공
self
.
logging
(
'출석확인: '
+
data
[
'student_id'
])
asyncio
.
ensure_future
(
self
.
set_rectangle
())
else
:
# 이미지 DB에 저장, 일단 보류
#if data['status'] == 'fail':
# send = json.dumps({'action': 'save_image', 'image': image.tolist()})
# await websocket.send(send)
if
data
[
'status'
]
==
'already'
:
asyncio
.
ensure_future
(
self
.
set_rectangle
())
except
Exception
as
e
:
self
.
logging
(
e
)
def
stop
(
self
):
self
.
thread
.
do_run
=
False
# self.thread.join() # there is a freeze problem
self
.
event_loop
.
close
()
self
.
cap
.
release
()
self
.
parent
.
destroy
()
if
__name__
==
'__main__'
:
root
=
tk
.
Tk
()
Client
(
root
)
root
.
mainloop
()
client/
client(window)
.py
→
client/
legacy/client(window)0617
.py
View file @
7a96731
File moved
client/legacy/clinet(window)
-06042035
.py
→
client/legacy/clinet(window)
0604
.py
View file @
7a96731
File moved
client/legacy/clinet(window)
200605
.py
→
client/legacy/clinet(window)
0605(2)
.py
View file @
7a96731
File moved
register/register.py
View file @
7a96731
...
...
@@ -2,6 +2,10 @@
#1. webcam에서 얼굴을 인식합니다
#2. 인식한 얼굴을 등록합니다
##################################################
import
tkinter
as
tk
import
tkinter.font
import
tkinter.messagebox
import
threading
import
torch
import
numpy
as
np
import
cv2
...
...
@@ -11,52 +15,109 @@ import json
import
os
import
timeit
import
base64
import
time
from
PIL
import
Image
from
PIL
import
Image
,
ImageTk
from
io
import
BytesIO
import
requests
from
models.mtcnn
import
MTCNN
device
=
torch
.
device
(
'cuda:0'
if
torch
.
cuda
.
is_available
()
else
'cpu'
)
print
(
'Running on device: {}'
.
format
(
device
))
class
Register
(
tk
.
Frame
):
def
__init__
(
self
,
parent
,
*
args
,
**
kwargs
):
tk
.
Frame
.
__init__
(
self
,
parent
,
*
args
,
**
kwargs
)
mtcnn
=
MTCNN
(
keep_all
=
True
,
device
=
device
)
# URI
self
.
uri
=
'ws://169.56.95.131:8765'
uri
=
'ws://169.56.95.131:8765'
# Pytorch Model
self
.
device
=
device
=
torch
.
device
(
'cuda:0'
if
torch
.
cuda
.
is_available
()
else
'cpu'
)
self
.
mtcnn
=
MTCNN
(
keep_all
=
True
,
device
=
device
)
async
def
send_face
(
face_list
,
image_list
):
global
uri
async
with
websockets
.
connect
(
uri
)
as
websocket
:
for
face
,
image
in
zip
(
face_list
,
image_list
):
#type: np.float32
send
=
json
.
dumps
({
'action'
:
'register'
,
'student_id'
:
'2014101898'
,
'student_name'
:
'김다솜'
,
'MTCNN'
:
face
.
tolist
()})
await
websocket
.
send
(
send
)
recv
=
await
websocket
.
recv
()
data
=
json
.
loads
(
recv
)
if
data
[
'status'
]
==
'success'
:
# 성공
print
(
data
[
'student_id'
],
'is registered'
)
# OpenCV
self
.
cap
=
cv2
.
VideoCapture
(
0
,
cv2
.
CAP_DSHOW
)
self
.
cam_width
=
640
self
.
cam_height
=
480
self
.
cap
.
set
(
3
,
self
.
cam_width
)
self
.
cap
.
set
(
4
,
self
.
cam_height
)
def
detect_face
(
frame
):
# If required, create a face detection pipeline using MTCNN:
global
mtcnn
results
=
mtcnn
.
detect
(
frame
)
image_list
=
[]
if
results
[
1
][
0
]
==
None
:
return
[]
for
box
,
prob
in
zip
(
results
[
0
],
results
[
1
]):
if
prob
<
0.95
:
continue
print
(
'face detected. prob:'
,
prob
)
x1
,
y1
,
x2
,
y2
=
box
image
=
frame
[
int
(
y1
-
10
):
int
(
y2
+
10
),
int
(
x1
-
10
):
int
(
x2
+
10
)]
image_list
.
append
(
image
)
return
image_list
# Application Function
self
.
detecting_square
=
(
200
,
200
)
self
.
detected
=
False
self
.
face_list
=
[]
self
.
image_list
=
[]
# tkinter GUI
self
.
width
=
740
self
.
height
=
640
self
.
parent
=
parent
self
.
parent
.
title
(
"출석 데이터 등록"
)
self
.
parent
.
geometry
(
"
%
dx
%
d+100+100"
%
(
self
.
width
,
self
.
height
))
self
.
pack
()
self
.
create_widgets
()
# Event loop and Thread
# self.event_loop = asyncio.new_event_loop()
self
.
thread
=
threading
.
Thread
(
target
=
self
.
mainthread
)
self
.
thread
.
start
()
def
create_widgets
(
self
):
image
=
np
.
zeros
([
self
.
cam_height
,
self
.
cam_width
,
3
],
dtype
=
np
.
uint8
)
image
=
Image
.
fromarray
(
image
)
image
=
ImageTk
.
PhotoImage
(
image
)
font
=
tk
.
font
.
Font
(
family
=
"맑은 고딕"
,
size
=
15
)
self
.
alert
=
tk
.
Label
(
self
,
text
=
"카메라를 정면으로 향하고 화면의 사각형에 얼굴을 맞춰주세요"
,
font
=
font
)
self
.
alert
.
grid
(
row
=
0
,
column
=
0
,
columnspan
=
20
)
self
.
label
=
tk
.
Label
(
self
,
image
=
image
)
self
.
label
.
grid
(
row
=
1
,
column
=
0
,
columnspan
=
20
)
self
.
studentID
=
tk
.
StringVar
()
self
.
studentIdLabel
=
tk
.
Label
(
self
,
text
=
"학번"
)
self
.
studentIdLabel
.
grid
(
row
=
2
,
column
=
10
)
self
.
studentIdEntry
=
tk
.
Entry
(
self
,
width
=
20
,
textvariable
=
self
.
studentID
)
self
.
studentIdEntry
.
grid
(
row
=
2
,
column
=
11
)
self
.
studentName
=
tk
.
StringVar
()
self
.
studentNameLabel
=
tk
.
Label
(
self
,
text
=
"이름"
)
self
.
studentNameLabel
.
grid
(
row
=
3
,
column
=
10
)
self
.
studentNameEntry
=
tk
.
Entry
(
self
,
width
=
20
,
textvariable
=
self
.
studentName
)
self
.
studentNameEntry
.
grid
(
row
=
3
,
column
=
11
)
self
.
registerButton
=
tk
.
Button
(
self
,
text
=
"등록"
,
fg
=
"blue"
,
command
=
self
.
register_face
)
self
.
registerButton
.
grid
(
row
=
4
,
column
=
10
)
self
.
registerButton
=
tk
.
Button
(
self
,
text
=
"다시촬영"
,
command
=
self
.
restart
)
self
.
registerButton
.
grid
(
row
=
4
,
column
=
11
)
self
.
quit
=
tk
.
Button
(
self
,
text
=
"나가기"
,
fg
=
"red"
,
command
=
self
.
stop
)
self
.
quit
.
grid
(
row
=
5
,
column
=
10
)
def
register_face
(
self
):
if
not
self
.
detected
:
tk
.
messagebox
.
showinfo
(
"경고"
,
"얼굴이 인식되지 않았습니다."
)
return
asyncio
.
get_event_loop
()
.
run_until_complete
(
self
.
send_face
())
def
detect_face
(
frame
):
results
=
mtcnn
.
detect
(
frame
)
faces
=
mtcnn
(
frame
,
return_prob
=
False
)
def
restart
(
self
):
if
not
self
.
thread
.
isAlive
():
self
.
cap
=
cv2
.
VideoCapture
(
0
,
cv2
.
CAP_DSHOW
)
self
.
cap
.
set
(
3
,
self
.
cam_width
)
self
.
cap
.
set
(
4
,
self
.
cam_height
)
self
.
detected
=
False
self
.
face_list
=
[]
self
.
image_list
=
[]
self
.
thread
=
threading
.
Thread
(
target
=
self
.
mainthread
)
self
.
thread
.
start
()
def
detect_face
(
self
,
frame
):
results
=
self
.
mtcnn
.
detect
(
frame
)
faces
=
self
.
mtcnn
(
frame
,
return_prob
=
False
)
image_list
=
[]
face_list
=
[]
if
results
[
1
][
0
]
==
None
:
...
...
@@ -64,23 +125,98 @@ def detect_face(frame):
for
box
,
face
,
prob
in
zip
(
results
[
0
],
faces
,
results
[
1
]):
if
prob
<
0.97
:
continue
print
(
'face detected. prob:'
,
prob
)
# for debug
# print('face detected. prob:', prob)
x1
,
y1
,
x2
,
y2
=
box
if
(
x2
-
x1
)
*
(
y2
-
y1
)
<
15000
:
# 얼굴 해상도가 너무 낮으면 무시
self
.
alert
.
config
(
text
=
"인식된 얼굴이 너무 작습니다. 카메라에 더 가까이 접근해주세요."
,
fg
=
"red"
)
self
.
alert
.
update
()
continue
# 얼굴 주변 ±3 영역 저장
image
=
frame
[
int
(
y1
-
3
):
int
(
y2
+
3
),
int
(
x1
-
3
):
int
(
x2
+
3
)]
image
=
frame
image_list
.
append
(
image
)
# MTCNN 데이터 저장
face_list
.
append
(
face
.
numpy
())
return
image_list
,
face_list
cap
=
cv2
.
VideoCapture
(
0
,
cv2
.
CAP_DSHOW
)
cap
.
set
(
3
,
720
)
cap
.
set
(
4
,
480
)
ret
,
frame
=
cap
.
read
()
frame
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
image_list
,
face_list
=
detect_face
(
frame
)
if
face_list
:
asyncio
.
get_event_loop
()
.
run_until_complete
(
send_face
(
face_list
,
image_list
))
\ No newline at end of file
return
face_list
,
image_list
def
mainthread
(
self
):
t
=
threading
.
currentThread
()
#asyncio.set_event_loop(self.event_loop)
x1
=
int
(
self
.
cam_width
/
2
-
self
.
detecting_square
[
0
]
/
2
)
x2
=
int
(
self
.
cam_width
/
2
+
self
.
detecting_square
[
0
]
/
2
)
y1
=
int
(
self
.
cam_height
/
2
-
self
.
detecting_square
[
1
]
/
2
)
y2
=
int
(
self
.
cam_height
/
2
+
self
.
detecting_square
[
1
]
/
2
)
detected_time
=
None
while
getattr
(
t
,
"do_run"
,
True
):
ret
,
frame
=
self
.
cap
.
read
()
# model에 이용하기 위해 convert
converted
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
# 사각형 영역만 검사 (속도 차이 큼)
face_list
,
image_list
=
self
.
detect_face
(
converted
[
y1
:
y2
,
x1
:
x2
])
# 얼굴이 인식된 경우 파란색 사각형을 띄움
if
face_list
:
frame
=
cv2
.
rectangle
(
frame
,
(
x1
,
y1
),
(
x2
,
y2
),
(
255
,
0
,
0
),
3
)
else
:
frame
=
cv2
.
rectangle
(
frame
,
(
x1
,
y1
),
(
x2
,
y2
),
(
0
,
0
,
255
),
3
)
# show image
converted
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
# 유저에게 보여줄 땐 거울상으로 보여준다
converted
=
cv2
.
flip
(
converted
,
1
)
image
=
Image
.
fromarray
(
converted
)
image
=
ImageTk
.
PhotoImage
(
image
)
self
.
label
.
configure
(
image
=
image
)
self
.
label
.
image
=
image
# kind of double buffering
# 얼굴이 인식되면 멤버함수에 넣음
if
face_list
:
self
.
face_list
=
face_list
self
.
image_list
=
image_list
# 2초 후에 사진이 찍힘
if
detected_time
is
None
:
detected_time
=
time
.
time
()
else
:
self
.
alert
.
config
(
text
=
"얼굴이 인식되었습니다.
%
f초 후 사진을 촬영합니다"
%
(
2
-
(
time
.
time
()
-
detected_time
)),
fg
=
"red"
)
if
time
.
time
()
-
detected_time
>=
2
:
self
.
thread
.
do_run
=
False
self
.
detected
=
True
self
.
alert
.
config
(
text
=
"얼굴을 등록해주세요. 올바르게 촬영되지 않았을 경우 다시촬영을 눌러주세요."
,
fg
=
"blue"
)
else
:
detected_time
=
None
self
.
face_list
=
[]
self
.
image_list
=
[]
async
def
wait
(
self
,
n
):
await
asyncio
.
sleep
(
n
)
async
def
send_face
(
self
):
try
:
async
with
websockets
.
connect
(
self
.
uri
)
as
websocket
:
for
face
,
image
in
zip
(
self
.
face_list
,
self
.
image_list
):
#type: np.float32
send
=
json
.
dumps
({
'action'
:
'register'
,
'student_id'
:
self
.
studentID
.
get
(),
'student_name'
:
self
.
studentName
.
get
(),
'MTCNN'
:
face
.
tolist
()})
await
websocket
.
send
(
send
)
recv
=
await
websocket
.
recv
()
data
=
json
.
loads
(
recv
)
if
data
[
'status'
]
==
'success'
:
tk
.
messagebox
.
showinfo
(
"등록완료"
,
self
.
studentID
.
get
()
+
' '
+
self
.
studentName
.
get
())
except
Exception
as
e
:
tk
.
messagebox
.
showinfo
(
"등록실패"
,
e
)
def
stop
(
self
):
self
.
thread
.
do_run
=
False
# self.thread.join() # there is a freeze problem
# self.event_loop.close()
self
.
cap
.
release
()
self
.
parent
.
destroy
()
if
__name__
==
'__main__'
:
root
=
tk
.
Tk
()
Register
(
root
)
root
.
mainloop
()
...
...
server/server.py
View file @
7a96731
...
...
@@ -51,18 +51,18 @@ async def register(websocket):
global
clients
async
with
lock
:
clients
.
add
(
websocket
)
remote_ip
=
websocket
.
remote_address
[
0
]
msg
=
'[{ip}] connected'
.
format
(
ip
=
remote_ip
)
print
(
msg
)
#
remote_ip = websocket.remote_address[0]
#
msg='[{ip}] connected'.format(ip=remote_ip)
#
print(msg)
async
def
unregister
(
websocket
):
global
lock
global
clients
async
with
lock
:
clients
.
remove
(
websocket
)
remote_ip
=
websocket
.
remote_address
[
0
]
msg
=
'[{ip}] disconnected'
.
format
(
ip
=
remote_ip
)
print
(
msg
)
#
remote_ip = websocket.remote_address[0]
#
msg='[{ip}] disconnected'.format(ip=remote_ip)
#
print(msg)
async
def
thread
(
websocket
,
path
):
await
register
(
websocket
)
...
...
@@ -130,12 +130,12 @@ async def thread(websocket, path):
db_embedding
=
np
.
frombuffer
(
row_data
[
'embedding'
],
dtype
=
np
.
float32
)
db_embedding
=
db_embedding
.
reshape
((
1
,
512
))
distance
=
await
get_distance
(
embedding
,
db_embedding
)
if
(
distance
<
distance_min
):
if
(
distance
<
0.4
):
verified_id
=
row_data
[
'student_id'
]
distance_min
=
distance
break
# 출석 데이터 전송
print
(
'[debug] distance:'
,
distance_min
)
send
=
''
if
distance_min
<
0.4
:
# 인증 성공
...
...
webserver/myapp/conf/db.js
View file @
7a96731
module
.
exports
=
(
function
(){
return
{
local
:
{
host
:
'
yapp.cmarogp1dz0t.ap-northeast-2.rds.amazonaws.com
'
,
user
:
'
admin
'
,
password
:
'
findmyzone!
'
,
host
:
'
localhost
'
,
user
:
'
root
'
,
password
:
'
1234
'
,
database
:
'attendance'
}
}
...
...
Please
register
or
login
to post a comment