장규범

feat: add pose-estimation

1 +import argparse
2 +import logging
3 +import time
4 +
5 +import cv2
6 +import numpy as np
7 +
8 +from tf_pose.estimator import TfPoseEstimator
9 +from tf_pose.networks import get_graph_path, model_wh
10 +
11 +logger = logging.getLogger('TfPoseEstimator-WebCam')
12 +logger.setLevel(logging.DEBUG)
13 +ch = logging.StreamHandler()
14 +ch.setLevel(logging.DEBUG)
15 +formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
16 +ch.setFormatter(formatter)
17 +logger.addHandler(ch)
18 +
19 +fps_time = 0
20 +
21 +def str2bool(v):
22 + return v.lower() in ("yes", "true", "t", "1")
23 +
24 +if __name__ == '__main__':
25 + parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
26 + parser.add_argument('--video', type=str, default='')
27 + parser.add_argument('--resize', type=str, default='0x0',
28 + help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
29 + parser.add_argument('--resize-out-ratio', type=float, default=4.0,
30 + help='if provided, resize heatmaps before they are post-processed. default=1.0')
31 +
32 + parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
33 + parser.add_argument('--show-process', type=bool, default=False,
34 + help='for debug purpose, if enabled, speed for inference is dropped.')
35 +
36 + parser.add_argument('--tensorrt', type=str, default="False",
37 + help='for tensorrt process.')
38 + args = parser.parse_args()
39 +
40 + logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
41 + w, h = model_wh(args.resize)
42 + if w > 0 and h > 0:
43 + e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h), trt_bool=str2bool(args.tensorrt))
44 + else:
45 + e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368), trt_bool=str2bool(args.tensorrt))
46 + logger.debug('cam read+')
47 + cam = cv2.VideoCapture(args.video)
48 + ret_val, image = cam.read()
49 + logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
50 + #create video writer
51 + out = cv2.VideoWriter('output.mp4',cv2.VideoWriter_fourcc(*'XVID'), 30.0, (1920,1080))
52 +
53 + while True:
54 + ret_val, image = cam.read()
55 +
56 + logger.debug('image process+')
57 + humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
58 +
59 + logger.debug('postprocess+')
60 + image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
61 +
62 + logger.debug('show+')
63 + cv2.putText(image,
64 + "FPS: %f" % (1.0 / (time.time() - fps_time)),
65 + (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
66 + (0, 255, 0), 2)
67 + cv2.imshow('tf-pose-estimation result', image)
68 +
69 + fps_time = time.time()
70 + out.write(image) #write to output.mp4
71 +
72 + if cv2.waitKey(1) == 27:
73 + break
74 +
75 + logger.debug('finished+')
76 +
77 +
78 + out.release()
79 + cv2.destroyAllWindows()
1 -Subproject commit 0c6703de0aa441c50605a2484bfcc26aa043de9b