Toggle navigation
Toggle navigation
This project
Loading...
Sign in
2020-1-capstone-design1
/
KHY_Project1
Go to a project
Toggle navigation
Toggle navigation pinning
Projects
Groups
Snippets
Help
Project
Activity
Repository
Graphs
Network
Create a new issue
Commits
Issue Boards
Authored by
Graduate
2020-06-05 17:05:39 +0900
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Commit
96abc08ec80b2c3c838a2f8b8d3ef2bc28408dcf
96abc08e
1 parent
f70a1fd4
Modify client.py
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
127 additions
and
33 deletions
client/clinet(window).py
client/legacy/clinet(window)0605.py
client/clinet(window).py
View file @
96abc08
##################################################
#1. webcam에서 얼굴을 인식합니다. #
#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. #
#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
#1. webcam에서 얼굴을 인식합니다.
#2. 얼굴일 확률이 97% 이상이고 영역이 15000 이상인 이미지를 서버에 전송
##################################################
import
torch
import
numpy
as
np
...
...
@@ -27,11 +26,10 @@ mtcnn = MTCNN(keep_all=True, device=device)
uri
=
'ws://localhost:8765'
async
def
send_face
(
face_list
,
image_list
):
global
uri
async
with
websockets
.
connect
(
uri
)
as
websocket
:
for
face
,
image
in
zip
(
face_list
,
image_list
):
#type: np.float32
send
=
json
.
dumps
({
"action"
:
"verify"
,
"MTCNN"
:
face
.
tolist
()})
send
=
json
.
dumps
({
'action'
:
'verify'
,
'MTCNN'
:
face
.
tolist
()})
await
websocket
.
send
(
send
)
recv
=
await
websocket
.
recv
()
data
=
json
.
loads
(
recv
)
...
...
@@ -39,53 +37,58 @@ async def send_face(face_list, image_list):
# 성공
print
(
data
[
'student_id'
],
'is attend'
)
else
:
print
(
'verification failed
'
)
send
=
json
.
dumps
({
'action'
:
'save_image'
,
'image'
:
image
.
tolist
()})
await
websocket
.
send
(
send
)
print
(
'verification failed
:'
,
data
[
'status'
]
)
if
data
[
'status'
]
==
'failed'
:
send
=
json
.
dumps
({
'action'
:
'save_image'
,
'image'
:
image
.
tolist
()}
)
def
detect_face
(
frame
):
# If required, create a face detection pipeline using MTCNN:
global
mtcnn
results
=
mtcnn
.
detect
(
frame
)
faces
=
mtcnn
(
frame
,
return_prob
=
False
)
image_list
=
[]
face_list
=
[]
if
results
[
1
][
0
]
==
None
:
return
[]
for
box
,
prob
in
zip
(
results
[
0
]
,
results
[
1
]):
if
prob
<
0.9
5
:
return
[]
,
[]
for
box
,
face
,
prob
in
zip
(
results
[
0
],
faces
,
results
[
1
]):
if
prob
<
0.9
7
:
continue
print
(
'face detected. prob:'
,
prob
)
x1
,
y1
,
x2
,
y2
=
box
image
=
frame
[
int
(
y1
-
10
):
int
(
y2
+
10
),
int
(
x1
-
10
):
int
(
x2
+
10
)]
if
(
x2
-
x1
)
*
(
y2
-
y1
)
<
15000
:
# 얼굴 해상도가 너무 낮으면 무시
continue
# 얼굴 주변 ±3 영역 저장
image
=
frame
[
int
(
y1
-
3
):
int
(
y2
+
3
),
int
(
x1
-
3
):
int
(
x2
+
3
)]
image_list
.
append
(
image
)
return
image_list
# MTCNN 데이터 저장
face_list
.
append
(
face
.
numpy
())
return
image_list
,
face_list
def
make_face_list
(
frame
):
global
mtcnn
results
,
prob
=
mtcnn
(
frame
,
return_prob
=
True
)
face_list
=
[]
if
prob
[
0
]
==
None
:
return
[]
for
result
,
prob
in
zip
(
results
,
prob
):
if
prob
<
0.9
5
:
if
prob
<
0.9
7
:
continue
#np.float32
face_list
.
append
(
result
.
numpy
())
return
face_list
cap
=
cv2
.
VideoCapture
(
0
)
cap
.
set
(
3
,
720
)
cap
.
set
(
4
,
480
)
while
True
:
try
:
#start = timeit.default_timer()
ret
,
frame
=
cap
.
read
()
frame
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
face_list
=
make_face_list
(
frame
)
image_list
=
detect_face
(
frame
)
##embedding server로 전송##
if
face_list
:
if
__name__
==
'__main__'
:
cap
=
cv2
.
VideoCapture
(
0
,
cv2
.
CAP_DSHOW
)
cap
.
set
(
3
,
720
)
cap
.
set
(
4
,
480
)
cv2
.
namedWindow
(
"img"
,
cv2
.
WINDOW_NORMAL
)
while
True
:
try
:
ret
,
frame
=
cap
.
read
()
cv2
.
imshow
(
'img'
,
frame
)
cv2
.
waitKey
(
10
)
frame
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
image_list
,
face_list
=
detect_face
(
frame
)
if
not
face_list
:
continue
;
asyncio
.
get_event_loop
()
.
run_until_complete
(
send_face
(
face_list
,
image_list
))
#end = timeit.default_timer()
#print('delta time: ', end - start)
except
Exception
as
ex
:
print
(
ex
)
except
Exception
as
ex
:
print
(
ex
)
...
...
client/legacy/clinet(window)0605.py
0 → 100644
View file @
96abc08
##################################################
#1. webcam에서 얼굴을 인식합니다. #
#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. #
#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
##################################################
import
torch
import
numpy
as
np
import
cv2
import
asyncio
import
websockets
import
json
import
os
import
timeit
import
base64
from
PIL
import
Image
from
io
import
BytesIO
import
requests
from
models.mtcnn
import
MTCNN
device
=
torch
.
device
(
'cuda:0'
if
torch
.
cuda
.
is_available
()
else
'cpu'
)
print
(
'Running on device: {}'
.
format
(
device
))
mtcnn
=
MTCNN
(
keep_all
=
True
,
device
=
device
)
uri
=
'ws://localhost:8765'
async
def
send_face
(
face_list
,
image_list
):
global
uri
async
with
websockets
.
connect
(
uri
)
as
websocket
:
for
face
,
image
in
zip
(
face_list
,
image_list
):
#type: np.float32
send
=
json
.
dumps
({
"action"
:
"verify"
,
"MTCNN"
:
face
.
tolist
()})
await
websocket
.
send
(
send
)
recv
=
await
websocket
.
recv
()
data
=
json
.
loads
(
recv
)
if
data
[
'status'
]
==
'success'
:
# 성공
print
(
data
[
'student_id'
],
'is attend'
)
else
:
print
(
'verification failed'
)
send
=
json
.
dumps
({
'action'
:
'save_image'
,
'image'
:
image
.
tolist
()})
await
websocket
.
send
(
send
)
def
detect_face
(
frame
):
# If required, create a face detection pipeline using MTCNN:
global
mtcnn
results
=
mtcnn
.
detect
(
frame
)
image_list
=
[]
if
results
[
1
][
0
]
==
None
:
return
[]
for
box
,
prob
in
zip
(
results
[
0
],
results
[
1
]):
if
prob
<
0.95
:
continue
print
(
'face detected. prob:'
,
prob
)
x1
,
y1
,
x2
,
y2
=
box
image
=
frame
[
int
(
y1
-
10
):
int
(
y2
+
10
),
int
(
x1
-
10
):
int
(
x2
+
10
)]
image_list
.
append
(
image
)
return
image_list
def
make_face_list
(
frame
):
global
mtcnn
results
,
prob
=
mtcnn
(
frame
,
return_prob
=
True
)
face_list
=
[]
if
prob
[
0
]
==
None
:
return
[]
for
result
,
prob
in
zip
(
results
,
prob
):
if
prob
<
0.95
:
continue
#np.float32
face_list
.
append
(
result
.
numpy
())
return
face_list
cap
=
cv2
.
VideoCapture
(
0
)
cap
.
set
(
3
,
720
)
cap
.
set
(
4
,
480
)
while
True
:
try
:
#start = timeit.default_timer()
ret
,
frame
=
cap
.
read
()
frame
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
face_list
=
make_face_list
(
frame
)
image_list
=
detect_face
(
frame
)
##embedding server로 전송##
if
face_list
:
asyncio
.
get_event_loop
()
.
run_until_complete
(
send_face
(
face_list
,
image_list
))
#end = timeit.default_timer()
#print('delta time: ', end - start)
except
Exception
as
ex
:
print
(
ex
)
Please
register
or
login
to post a comment