Graduate

make client

1 +##################################################
2 +#1. webcam에서 얼굴을 인식합니다. #
3 +#2. 얼굴일 확률이 95% 이상인 이미지를 이미지 서버로 전송합니다. #
4 +#3. 전처리 된 데이터를 verification 서버에 전송합니다. #
5 +##################################################
6 +import torch
7 +import numpy as np
8 +import cv2
9 +import asyncio
10 +import websockets
11 +import json
12 +import os
13 +import timeit
14 +import base64
15 +
16 +from PIL import Image
17 +from io import BytesIO
18 +import requests
19 +
20 +from models.mtcnn import MTCNN
21 +
22 +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
23 +print('Running on device: {}'.format(device))
24 +
25 +mtcnn = MTCNN(keep_all=True, device=device)
26 +
27 +uri = 'ws://localhost:8765'
28 +
29 +async def send_face(face_list):
30 + global uri
31 + async with websockets.connect(uri) as websocket:
32 + for face in face_list:
33 + #type: np.float32
34 + print(face.shape)
35 + data = json.dumps({"action": "verify", "MTCNN": face.tolist()})
36 + await websocket.send(data)
37 + print('send: verify', len(face_list), 'face(s)')
38 + code = await websocket.recv()
39 + print('code:', code)
40 +
41 +async def send_image(image_list):
42 + global uri
43 + async with websockets.connect(uri) as websocket:
44 + for image in image_list:
45 + data = json.dumps({"action": "save_image", "image": image.tolist(), "shape": image.shape})
46 + await websocket.send(data)
47 + print('send', len(image_list), 'image(s)')
48 + code = await websocket.recv()
49 + print('code:', code)
50 +
51 +def detect_face(frame):
52 + # If required, create a face detection pipeline using MTCNN:
53 + global mtcnn
54 + results = mtcnn.detect(frame)
55 + image_list = []
56 + if results[1][0] == None:
57 + return []
58 + for box, prob in zip(results[0], results[1]):
59 + if prob < 0.95:
60 + continue
61 + print('face detected. prob:', prob)
62 + x1, y1, x2, y2 = box
63 + image = frame[int(y1-10):int(y2+10), int(x1-10):int(x2+10)]
64 + image_list.append(image)
65 + return image_list
66 +
67 +def make_face_list(frame):
68 + global mtcnn
69 + results, prob = mtcnn(frame, return_prob = True)
70 + face_list = []
71 + if prob[0] == None:
72 + return []
73 + for result, prob in zip(results, prob):
74 + if prob < 0.95:
75 + continue
76 + #np.float32
77 + face_list.append(result.numpy())
78 + return face_list
79 +
80 +cap = cv2.VideoCapture(0)
81 +cap.set(3, 720)
82 +cap.set(4, 480)
83 +while True:
84 + try:
85 + #start = timeit.default_timer()
86 + ret, frame = cap.read()
87 + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
88 + face_list = make_face_list(frame)
89 + image_list = detect_face(frame)
90 + ##embedding server로 전송##
91 + if face_list:
92 + asyncio.get_event_loop().run_until_complete(send_face(face_list))
93 + ###################
94 + ##image server로 전송##
95 + if image_list:
96 + asyncio.get_event_loop().run_until_complete(send_image(image_list))
97 + ###################
98 + #end = timeit.default_timer()
99 + #print('delta time: ', end - start)
100 + except Exception as ex:
101 + print(ex)
1 +import torch
2 +import numpy as np
3 +import cv2
4 +import matplotlib.pyplot as plt
5 +import os
6 +
7 +from PIL import Image, ImageDraw
8 +from IPython import display
9 +
10 +from models import mtcnn
11 +from models import inception_resnet_v1
12 +
13 +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
14 +print('Running on device: {}'.format(device))
15 +
16 +def extract_face(filename, required_size=(224, 224)):
17 + # If required, create a face detection pipeline using MTCNN:
18 + mtcnn_model = mtcnn.MTCNN(keep_all=True, device=device)
19 + pixels = plt.imread(os.path.join(os.path.abspath(''), filename))
20 + results = mtcnn_model.detect(pixels)
21 + face_array = []
22 + for box, prob in zip(results[0], results[1]):
23 + #boxes, _ = result
24 + print('face detected. prob:', prob)
25 + x1, y1, x2, y2 = box
26 + face = pixels[int(y1):int(y2), int(x1):int(x2)]
27 + image = Image.fromarray(face)
28 + image = image.resize(required_size)
29 + face_array.append(np.asarray(image))
30 + return face_array
31 +
32 +face_array = extract_face('image/test1.jpg')
33 +for face in face_array:
34 + plt.figure()
35 + plt.imshow(face)
36 + plt.show()
37 +
38 +face_array = extract_face('image/test2.jpg')
39 +for face in face_array:
40 + plt.figure()
41 + plt.imshow(face)
42 + plt.show()
43 +
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
1 +import torch
2 +import numpy as np
3 +import time
4 +
5 +
6 +class Logger(object):
7 +
8 + def __init__(self, mode, length, calculate_mean=False):
9 + self.mode = mode
10 + self.length = length
11 + self.calculate_mean = calculate_mean
12 + if self.calculate_mean:
13 + self.fn = lambda x, i: x / (i + 1)
14 + else:
15 + self.fn = lambda x, i: x
16 +
17 + def __call__(self, loss, metrics, i):
18 + track_str = '\r{} | {:5d}/{:<5d}| '.format(self.mode, i + 1, self.length)
19 + loss_str = 'loss: {:9.4f} | '.format(self.fn(loss, i))
20 + metric_str = ' | '.join('{}: {:9.4f}'.format(k, self.fn(v, i)) for k, v in metrics.items())
21 + print(track_str + loss_str + metric_str + ' ', end='')
22 + if i + 1 == self.length:
23 + print('')
24 +
25 +
26 +class BatchTimer(object):
27 + """Batch timing class.
28 + Use this class for tracking training and testing time/rate per batch or per sample.
29 +
30 + Keyword Arguments:
31 + rate {bool} -- Whether to report a rate (batches or samples per second) or a time (seconds
32 + per batch or sample). (default: {True})
33 + per_sample {bool} -- Whether to report times or rates per sample or per batch.
34 + (default: {True})
35 + """
36 +
37 + def __init__(self, rate=True, per_sample=True):
38 + self.start = time.time()
39 + self.end = None
40 + self.rate = rate
41 + self.per_sample = per_sample
42 +
43 + def __call__(self, y_pred, y):
44 + self.end = time.time()
45 + elapsed = self.end - self.start
46 + self.start = self.end
47 + self.end = None
48 +
49 + if self.per_sample:
50 + elapsed /= len(y_pred)
51 + if self.rate:
52 + elapsed = 1 / elapsed
53 +
54 + return torch.tensor(elapsed)
55 +
56 +
57 +def accuracy(logits, y):
58 + _, preds = torch.max(logits, 1)
59 + return (preds == y).float().mean()
60 +
61 +
62 +def pass_epoch(
63 + model, loss_fn, loader, optimizer=None, scheduler=None,
64 + batch_metrics={'time': BatchTimer()}, show_running=True,
65 + device='cpu', writer=None
66 +):
67 + """Train or evaluate over a data epoch.
68 +
69 + Arguments:
70 + model {torch.nn.Module} -- Pytorch model.
71 + loss_fn {callable} -- A function to compute (scalar) loss.
72 + loader {torch.utils.data.DataLoader} -- A pytorch data loader.
73 +
74 + Keyword Arguments:
75 + optimizer {torch.optim.Optimizer} -- A pytorch optimizer.
76 + scheduler {torch.optim.lr_scheduler._LRScheduler} -- LR scheduler (default: {None})
77 + batch_metrics {dict} -- Dictionary of metric functions to call on each batch. The default
78 + is a simple timer. A progressive average of these metrics, along with the average
79 + loss, is printed every batch. (default: {{'time': iter_timer()}})
80 + show_running {bool} -- Whether or not to print losses and metrics for the current batch
81 + or rolling averages. (default: {False})
82 + device {str or torch.device} -- Device for pytorch to use. (default: {'cpu'})
83 + writer {torch.utils.tensorboard.SummaryWriter} -- Tensorboard SummaryWriter. (default: {None})
84 +
85 + Returns:
86 + tuple(torch.Tensor, dict) -- A tuple of the average loss and a dictionary of average
87 + metric values across the epoch.
88 + """
89 +
90 + mode = 'Train' if model.training else 'Valid'
91 + logger = Logger(mode, length=len(loader), calculate_mean=show_running)
92 + loss = 0
93 + metrics = {}
94 +
95 + for i_batch, (x, y) in enumerate(loader):
96 + x = x.to(device)
97 + y = y.to(device)
98 + y_pred = model(x)
99 + loss_batch = loss_fn(y_pred, y)
100 +
101 + if model.training:
102 + loss_batch.backward()
103 + optimizer.step()
104 + optimizer.zero_grad()
105 +
106 + metrics_batch = {}
107 + for metric_name, metric_fn in batch_metrics.items():
108 + metrics_batch[metric_name] = metric_fn(y_pred, y).detach().cpu()
109 + metrics[metric_name] = metrics.get(metric_name, 0) + metrics_batch[metric_name]
110 +
111 + if writer is not None and model.training:
112 + if writer.iteration % writer.interval == 0:
113 + writer.add_scalars('loss', {mode: loss_batch.detach().cpu()}, writer.iteration)
114 + for metric_name, metric_batch in metrics_batch.items():
115 + writer.add_scalars(metric_name, {mode: metric_batch}, writer.iteration)
116 + writer.iteration += 1
117 +
118 + loss_batch = loss_batch.detach().cpu()
119 + loss += loss_batch
120 + if show_running:
121 + logger(loss, metrics, i_batch)
122 + else:
123 + logger(loss_batch, metrics_batch, i_batch)
124 +
125 + if model.training and scheduler is not None:
126 + scheduler.step()
127 +
128 + loss = loss / (i_batch + 1)
129 + metrics = {k: v / (i_batch + 1) for k, v in metrics.items()}
130 +
131 + if writer is not None and not model.training:
132 + writer.add_scalars('loss', {mode: loss.detach()}, writer.iteration)
133 + for metric_name, metric in metrics.items():
134 + writer.add_scalars(metric_name, {mode: metric})
135 +
136 + return loss, metrics
137 +
138 +
139 +def collate_pil(x):
140 + out_x, out_y = [], []
141 + for xx, yy in x:
142 + out_x.append(xx)
143 + out_y.append(yy)
144 + return out_x, out_y