acmyu commited on
Commit
194a721
·
1 Parent(s): 317a82a

use own easy_dwpose library

Browse files
libs/easy_dwpose/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from easy_dwpose.dwpose import DWposeDetector
2
+
3
+ __all__ = ["DWposeDetector"]
libs/easy_dwpose/body_estimation/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .utils import resize_image
2
+ from .wholebody import Wholebody
3
+
4
+ __all__ = ["Wholebody", "resize_image"]
libs/easy_dwpose/body_estimation/detector.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+
5
+ def nms(boxes, scores, nms_thr):
6
+ """Single class NMS implemented in Numpy.
7
+
8
+ Args:
9
+ boxes (np.ndarray): shape=(N,4); N is number of boxes
10
+ scores (np.ndarray): the score of bboxes
11
+ nms_thr (float): the threshold in NMS
12
+
13
+ Returns:
14
+ List[int]: output bbox ids
15
+ """
16
+ x1 = boxes[:, 0]
17
+ y1 = boxes[:, 1]
18
+ x2 = boxes[:, 2]
19
+ y2 = boxes[:, 3]
20
+
21
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
22
+ order = scores.argsort()[::-1]
23
+
24
+ keep = []
25
+ while order.size > 0:
26
+ i = order[0]
27
+ keep.append(i)
28
+ xx1 = np.maximum(x1[i], x1[order[1:]])
29
+ yy1 = np.maximum(y1[i], y1[order[1:]])
30
+ xx2 = np.minimum(x2[i], x2[order[1:]])
31
+ yy2 = np.minimum(y2[i], y2[order[1:]])
32
+
33
+ w = np.maximum(0.0, xx2 - xx1 + 1)
34
+ h = np.maximum(0.0, yy2 - yy1 + 1)
35
+ inter = w * h
36
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
37
+
38
+ inds = np.where(ovr <= nms_thr)[0]
39
+ order = order[inds + 1]
40
+
41
+ return keep
42
+
43
+
44
+ def multiclass_nms(boxes, scores, nms_thr, score_thr):
45
+ """Multiclass NMS implemented in Numpy. Class-aware version.
46
+
47
+ Args:
48
+ boxes (np.ndarray): shape=(N,4); N is number of boxes
49
+ scores (np.ndarray): the score of bboxes
50
+ nms_thr (float): the threshold in NMS
51
+ score_thr (float): the threshold of cls score
52
+
53
+ Returns:
54
+ np.ndarray: outputs bboxes coordinate
55
+ """
56
+ final_dets = []
57
+ num_classes = scores.shape[1]
58
+ for cls_ind in range(num_classes):
59
+ cls_scores = scores[:, cls_ind]
60
+ valid_score_mask = cls_scores > score_thr
61
+ if valid_score_mask.sum() == 0:
62
+ continue
63
+ else:
64
+ valid_scores = cls_scores[valid_score_mask]
65
+ valid_boxes = boxes[valid_score_mask]
66
+ keep = nms(valid_boxes, valid_scores, nms_thr)
67
+ if len(keep) > 0:
68
+ cls_inds = np.ones((len(keep), 1)) * cls_ind
69
+ dets = np.concatenate([valid_boxes[keep], valid_scores[keep, None], cls_inds], 1)
70
+ final_dets.append(dets)
71
+ if len(final_dets) == 0:
72
+ return None
73
+ return np.concatenate(final_dets, 0)
74
+
75
+
76
+ def demo_postprocess(outputs, img_size, p6=False):
77
+ grids = []
78
+ expanded_strides = []
79
+ strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
80
+
81
+ hsizes = [img_size[0] // stride for stride in strides]
82
+ wsizes = [img_size[1] // stride for stride in strides]
83
+
84
+ for hsize, wsize, stride in zip(hsizes, wsizes, strides):
85
+ xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
86
+ grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
87
+ grids.append(grid)
88
+ shape = grid.shape[:2]
89
+ expanded_strides.append(np.full((*shape, 1), stride))
90
+
91
+ grids = np.concatenate(grids, 1)
92
+ expanded_strides = np.concatenate(expanded_strides, 1)
93
+ outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
94
+ outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
95
+
96
+ return outputs
97
+
98
+
99
+ def preprocess(img, input_size, swap=(2, 0, 1)):
100
+ if len(img.shape) == 3:
101
+ padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
102
+ else:
103
+ padded_img = np.ones(input_size, dtype=np.uint8) * 114
104
+
105
+ r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
106
+ resized_img = cv2.resize(
107
+ img,
108
+ (int(img.shape[1] * r), int(img.shape[0] * r)),
109
+ interpolation=cv2.INTER_LINEAR,
110
+ ).astype(np.uint8)
111
+ padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
112
+
113
+ padded_img = padded_img.transpose(swap)
114
+ padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
115
+ return padded_img, r
116
+
117
+
118
+ def inference_detector(session, oriImg):
119
+ """run human detect"""
120
+ input_shape = (640, 640)
121
+ img, ratio = preprocess(oriImg, input_shape)
122
+
123
+ ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
124
+ output = session.run(None, ort_inputs)
125
+ predictions = demo_postprocess(output[0], input_shape)[0]
126
+
127
+ boxes = predictions[:, :4]
128
+ scores = predictions[:, 4:5] * predictions[:, 5:]
129
+
130
+ boxes_xyxy = np.ones_like(boxes)
131
+ boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
132
+ boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
133
+ boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
134
+ boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
135
+ boxes_xyxy /= ratio
136
+ dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
137
+ if dets is not None:
138
+ final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
139
+ isscore = final_scores > 0.3
140
+ iscat = final_cls_inds == 0
141
+ isbbox = [i and j for (i, j) in zip(isscore, iscat)]
142
+ final_boxes = final_boxes[isbbox]
143
+ else:
144
+ final_boxes = np.array([])
145
+
146
+ return final_boxes
libs/easy_dwpose/body_estimation/pose.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple
2
+
3
+ import cv2
4
+ import numpy as np
5
+ import onnxruntime as ort
6
+
7
+
8
+ def preprocess(
9
+ img: np.ndarray, out_bbox, input_size: Tuple[int, int] = (192, 256)
10
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
11
+ """Do preprocessing for RTMPose model inference.
12
+
13
+ Args:
14
+ img (np.ndarray): Input image in shape.
15
+ input_size (tuple): Input image size in shape (w, h).
16
+
17
+ Returns:
18
+ tuple:
19
+ - resized_img (np.ndarray): Preprocessed image.
20
+ - center (np.ndarray): Center of image.
21
+ - scale (np.ndarray): Scale of image.
22
+ """
23
+ # get shape of image
24
+ img_shape = img.shape[:2]
25
+ out_img, out_center, out_scale = [], [], []
26
+ if len(out_bbox) == 0:
27
+ out_bbox = [[0, 0, img_shape[1], img_shape[0]]]
28
+ for i in range(len(out_bbox)):
29
+ x0 = out_bbox[i][0]
30
+ y0 = out_bbox[i][1]
31
+ x1 = out_bbox[i][2]
32
+ y1 = out_bbox[i][3]
33
+ bbox = np.array([x0, y0, x1, y1])
34
+
35
+ # get center and scale
36
+ center, scale = bbox_xyxy2cs(bbox, padding=1.25)
37
+
38
+ # do affine transformation
39
+ resized_img, scale = top_down_affine(input_size, scale, center, img)
40
+
41
+ # normalize image
42
+ mean = np.array([123.675, 116.28, 103.53])
43
+ std = np.array([58.395, 57.12, 57.375])
44
+ resized_img = (resized_img - mean) / std
45
+
46
+ out_img.append(resized_img)
47
+ out_center.append(center)
48
+ out_scale.append(scale)
49
+
50
+ return out_img, out_center, out_scale
51
+
52
+
53
+ def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray:
54
+ """Inference RTMPose model.
55
+
56
+ Args:
57
+ sess (ort.InferenceSession): ONNXRuntime session.
58
+ img (np.ndarray): Input image in shape.
59
+
60
+ Returns:
61
+ outputs (np.ndarray): Output of RTMPose model.
62
+ """
63
+ all_out = []
64
+ # build input
65
+ for i in range(len(img)):
66
+ input = [img[i].transpose(2, 0, 1)]
67
+
68
+ # build output
69
+ sess_input = {sess.get_inputs()[0].name: input}
70
+ sess_output = []
71
+ for out in sess.get_outputs():
72
+ sess_output.append(out.name)
73
+
74
+ # run model
75
+ outputs = sess.run(sess_output, sess_input)
76
+ all_out.append(outputs)
77
+
78
+ return all_out
79
+
80
+
81
+ def postprocess(
82
+ outputs: List[np.ndarray],
83
+ model_input_size: Tuple[int, int],
84
+ center: Tuple[int, int],
85
+ scale: Tuple[int, int],
86
+ simcc_split_ratio: float = 2.0,
87
+ ) -> Tuple[np.ndarray, np.ndarray]:
88
+ """Postprocess for RTMPose model output.
89
+
90
+ Args:
91
+ outputs (np.ndarray): Output of RTMPose model.
92
+ model_input_size (tuple): RTMPose model Input image size.
93
+ center (tuple): Center of bbox in shape (x, y).
94
+ scale (tuple): Scale of bbox in shape (w, h).
95
+ simcc_split_ratio (float): Split ratio of simcc.
96
+
97
+ Returns:
98
+ tuple:
99
+ - keypoints (np.ndarray): Rescaled keypoints.
100
+ - scores (np.ndarray): Model predict scores.
101
+ """
102
+ all_key = []
103
+ all_score = []
104
+ for i in range(len(outputs)):
105
+ # use simcc to decode
106
+ simcc_x, simcc_y = outputs[i]
107
+ keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio)
108
+
109
+ # rescale keypoints
110
+ keypoints = keypoints / model_input_size * scale[i] + center[i] - scale[i] / 2
111
+ all_key.append(keypoints[0])
112
+ all_score.append(scores[0])
113
+
114
+ return np.array(all_key), np.array(all_score)
115
+
116
+
117
+ def bbox_xyxy2cs(bbox: np.ndarray, padding: float = 1.0) -> Tuple[np.ndarray, np.ndarray]:
118
+ """Transform the bbox format from (x,y,w,h) into (center, scale)
119
+
120
+ Args:
121
+ bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
122
+ as (left, top, right, bottom)
123
+ padding (float): BBox padding factor that will be multilied to scale.
124
+ Default: 1.0
125
+
126
+ Returns:
127
+ tuple: A tuple containing center and scale.
128
+ - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
129
+ (n, 2)
130
+ - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
131
+ (n, 2)
132
+ """
133
+ # convert single bbox from (4, ) to (1, 4)
134
+ dim = bbox.ndim
135
+ if dim == 1:
136
+ bbox = bbox[None, :]
137
+
138
+ # get bbox center and scale
139
+ x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
140
+ center = np.hstack([x1 + x2, y1 + y2]) * 0.5
141
+ scale = np.hstack([x2 - x1, y2 - y1]) * padding
142
+
143
+ if dim == 1:
144
+ center = center[0]
145
+ scale = scale[0]
146
+
147
+ return center, scale
148
+
149
+
150
+ def _fix_aspect_ratio(bbox_scale: np.ndarray, aspect_ratio: float) -> np.ndarray:
151
+ """Extend the scale to match the given aspect ratio.
152
+
153
+ Args:
154
+ scale (np.ndarray): The image scale (w, h) in shape (2, )
155
+ aspect_ratio (float): The ratio of ``w/h``
156
+
157
+ Returns:
158
+ np.ndarray: The reshaped image scale in (2, )
159
+ """
160
+ w, h = np.hsplit(bbox_scale, [1])
161
+ bbox_scale = np.where(w > h * aspect_ratio, np.hstack([w, w / aspect_ratio]), np.hstack([h * aspect_ratio, h]))
162
+ return bbox_scale
163
+
164
+
165
+ def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray:
166
+ """Rotate a point by an angle.
167
+
168
+ Args:
169
+ pt (np.ndarray): 2D point coordinates (x, y) in shape (2, )
170
+ angle_rad (float): rotation angle in radian
171
+
172
+ Returns:
173
+ np.ndarray: Rotated point in shape (2, )
174
+ """
175
+ sn, cs = np.sin(angle_rad), np.cos(angle_rad)
176
+ rot_mat = np.array([[cs, -sn], [sn, cs]])
177
+ return rot_mat @ pt
178
+
179
+
180
+ def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray:
181
+ """To calculate the affine matrix, three pairs of points are required. This
182
+ function is used to get the 3rd point, given 2D points a & b.
183
+
184
+ The 3rd point is defined by rotating vector `a - b` by 90 degrees
185
+ anticlockwise, using b as the rotation center.
186
+
187
+ Args:
188
+ a (np.ndarray): The 1st point (x,y) in shape (2, )
189
+ b (np.ndarray): The 2nd point (x,y) in shape (2, )
190
+
191
+ Returns:
192
+ np.ndarray: The 3rd point.
193
+ """
194
+ direction = a - b
195
+ c = b + np.r_[-direction[1], direction[0]]
196
+ return c
197
+
198
+
199
+ def get_warp_matrix(
200
+ center: np.ndarray,
201
+ scale: np.ndarray,
202
+ rot: float,
203
+ output_size: Tuple[int, int],
204
+ shift: Tuple[float, float] = (0.0, 0.0),
205
+ inv: bool = False,
206
+ ) -> np.ndarray:
207
+ """Calculate the affine transformation matrix that can warp the bbox area
208
+ in the input image to the output size.
209
+
210
+ Args:
211
+ center (np.ndarray[2, ]): Center of the bounding box (x, y).
212
+ scale (np.ndarray[2, ]): Scale of the bounding box
213
+ wrt [width, height].
214
+ rot (float): Rotation angle (degree).
215
+ output_size (np.ndarray[2, ] | list(2,)): Size of the
216
+ destination heatmaps.
217
+ shift (0-100%): Shift translation ratio wrt the width/height.
218
+ Default (0., 0.).
219
+ inv (bool): Option to inverse the affine transform direction.
220
+ (inv=False: src->dst or inv=True: dst->src)
221
+
222
+ Returns:
223
+ np.ndarray: A 2x3 transformation matrix
224
+ """
225
+ shift = np.array(shift)
226
+ src_w = scale[0]
227
+ dst_w = output_size[0]
228
+ dst_h = output_size[1]
229
+
230
+ # compute transformation matrix
231
+ rot_rad = np.deg2rad(rot)
232
+ src_dir = _rotate_point(np.array([0.0, src_w * -0.5]), rot_rad)
233
+ dst_dir = np.array([0.0, dst_w * -0.5])
234
+
235
+ # get four corners of the src rectangle in the original image
236
+ src = np.zeros((3, 2), dtype=np.float32)
237
+ src[0, :] = center + scale * shift
238
+ src[1, :] = center + src_dir + scale * shift
239
+ src[2, :] = _get_3rd_point(src[0, :], src[1, :])
240
+
241
+ # get four corners of the dst rectangle in the input image
242
+ dst = np.zeros((3, 2), dtype=np.float32)
243
+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
244
+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
245
+ dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
246
+
247
+ if inv:
248
+ warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src))
249
+ else:
250
+ warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst))
251
+
252
+ return warp_mat
253
+
254
+
255
+ def top_down_affine(
256
+ input_size: dict, bbox_scale: dict, bbox_center: dict, img: np.ndarray
257
+ ) -> Tuple[np.ndarray, np.ndarray]:
258
+ """Get the bbox image as the model input by affine transform.
259
+
260
+ Args:
261
+ input_size (dict): The input size of the model.
262
+ bbox_scale (dict): The bbox scale of the img.
263
+ bbox_center (dict): The bbox center of the img.
264
+ img (np.ndarray): The original image.
265
+
266
+ Returns:
267
+ tuple: A tuple containing center and scale.
268
+ - np.ndarray[float32]: img after affine transform.
269
+ - np.ndarray[float32]: bbox scale after affine transform.
270
+ """
271
+ w, h = input_size
272
+ warp_size = (int(w), int(h))
273
+
274
+ # reshape bbox to fixed aspect ratio
275
+ bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h)
276
+
277
+ # get the affine matrix
278
+ center = bbox_center
279
+ scale = bbox_scale
280
+ rot = 0
281
+ warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h))
282
+
283
+ # do affine transform
284
+ img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR)
285
+
286
+ return img, bbox_scale
287
+
288
+
289
+ def get_simcc_maximum(simcc_x: np.ndarray, simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
290
+ """Get maximum response location and value from simcc representations.
291
+
292
+ Note:
293
+ instance number: N
294
+ num_keypoints: K
295
+ heatmap height: H
296
+ heatmap width: W
297
+
298
+ Args:
299
+ simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
300
+ simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)
301
+
302
+ Returns:
303
+ tuple:
304
+ - locs (np.ndarray): locations of maximum heatmap responses in shape
305
+ (K, 2) or (N, K, 2)
306
+ - vals (np.ndarray): values of maximum heatmap responses in shape
307
+ (K,) or (N, K)
308
+ """
309
+ N, K, Wx = simcc_x.shape
310
+ simcc_x = simcc_x.reshape(N * K, -1)
311
+ simcc_y = simcc_y.reshape(N * K, -1)
312
+
313
+ # get maximum value locations
314
+ x_locs = np.argmax(simcc_x, axis=1)
315
+ y_locs = np.argmax(simcc_y, axis=1)
316
+ locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
317
+ max_val_x = np.amax(simcc_x, axis=1)
318
+ max_val_y = np.amax(simcc_y, axis=1)
319
+
320
+ # get maximum value across x and y axis
321
+ mask = max_val_x > max_val_y
322
+ max_val_x[mask] = max_val_y[mask]
323
+ vals = max_val_x
324
+ locs[vals <= 0.0] = -1
325
+
326
+ # reshape
327
+ locs = locs.reshape(N, K, 2)
328
+ vals = vals.reshape(N, K)
329
+
330
+ return locs, vals
331
+
332
+
333
+ def decode(simcc_x: np.ndarray, simcc_y: np.ndarray, simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]:
334
+ """Modulate simcc distribution with Gaussian.
335
+
336
+ Args:
337
+ simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
338
+ simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
339
+ simcc_split_ratio (int): The split ratio of simcc.
340
+
341
+ Returns:
342
+ tuple: A tuple containing center and scale.
343
+ - np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
344
+ - np.ndarray[float32]: scores in shape (K,) or (n, K)
345
+ """
346
+ keypoints, scores = get_simcc_maximum(simcc_x, simcc_y)
347
+ keypoints /= simcc_split_ratio
348
+
349
+ return keypoints, scores
350
+
351
+
352
+ def inference_pose(session, out_bbox, oriImg):
353
+ """run pose detect
354
+
355
+ Args:
356
+ session (ort.InferenceSession): ONNXRuntime session.
357
+ out_bbox (np.ndarray): bbox list
358
+ oriImg (np.ndarray): Input image in shape.
359
+
360
+ Returns:
361
+ tuple:
362
+ - keypoints (np.ndarray): Rescaled keypoints.
363
+ - scores (np.ndarray): Model predict scores.
364
+ """
365
+ h, w = session.get_inputs()[0].shape[2:]
366
+ model_input_size = (w, h)
367
+ # preprocess for rtm-pose model inference.
368
+ resized_img, center, scale = preprocess(oriImg, out_bbox, model_input_size)
369
+ # run pose estimation for processed img
370
+ outputs = inference(session, resized_img)
371
+ # postprocess for rtm-pose model output.
372
+ keypoints, scores = postprocess(outputs, model_input_size, center, scale)
373
+
374
+ return keypoints, scores
libs/easy_dwpose/body_estimation/utils.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+
5
+ def resize_image(input_image: np.ndarray, target_resolution: input = 512, dividable_by: int = 64) -> np.ndarray:
6
+ height, width, _ = input_image.shape
7
+
8
+ k = float(target_resolution) / min(height, width)
9
+
10
+ target_width = width * k
11
+ target_width = int(np.round(target_width / dividable_by)) * dividable_by
12
+
13
+ target_height = height * k
14
+ target_height = int(np.round(target_height / dividable_by)) * dividable_by
15
+
16
+ return cv2.resize(
17
+ input_image, (target_width, target_height), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA
18
+ )
libs/easy_dwpose/body_estimation/wholebody.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import onnxruntime
3
+
4
+ from .detector import inference_detector
5
+ from .pose import inference_pose
6
+
7
+
8
+ class Wholebody:
9
+ """detect human pose by dwpose"""
10
+
11
+ def __init__(self, model_det, model_pose, device="cpu"):
12
+ device = str(device)
13
+
14
+ if device == "cpu":
15
+ providers = ["CPUExecutionProvider"]
16
+ provider_options = None
17
+ else:
18
+ providers = ["CUDAExecutionProvider"]
19
+ if ":" in device:
20
+ gpu_id = int(device.split(":")[1])
21
+ provider_options = [{"device_id": gpu_id}]
22
+ else:
23
+ provider_options = [{"device_id": 0}]
24
+
25
+ self.session_det = onnxruntime.InferenceSession(
26
+ path_or_bytes=model_det, providers=providers, provider_options=provider_options
27
+ )
28
+ self.session_pose = onnxruntime.InferenceSession(
29
+ path_or_bytes=model_pose, providers=providers, provider_options=provider_options
30
+ )
31
+
32
+ def __call__(self, oriImg):
33
+ """call to process dwpose-detect
34
+
35
+ Args:
36
+ oriImg (np.ndarray): detected image
37
+
38
+ """
39
+ det_result = inference_detector(self.session_det, oriImg)
40
+ keypoints, scores = inference_pose(self.session_pose, det_result, oriImg)
41
+
42
+ keypoints_info = np.concatenate((keypoints, scores[..., None]), axis=-1)
43
+ # compute neck joint
44
+ neck = np.mean(keypoints_info[:, [5, 6]], axis=1)
45
+ # neck score when visualizing pred
46
+ neck[:, 2:4] = np.logical_and(keypoints_info[:, 5, 2:4] > 0.3, keypoints_info[:, 6, 2:4] > 0.3).astype(int)
47
+ new_keypoints_info = np.insert(keypoints_info, 17, neck, axis=1)
48
+ mmpose_idx = [17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3]
49
+ openpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17]
50
+ new_keypoints_info[:, openpose_idx] = new_keypoints_info[:, mmpose_idx]
51
+ keypoints_info = new_keypoints_info
52
+
53
+ keypoints, scores = keypoints_info[..., :2], keypoints_info[..., 2]
54
+
55
+ return keypoints, scores
libs/easy_dwpose/draw/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .openpose import draw_pose as draw_openpose
2
+
3
+ __all__ = ["draw_openpose"]
libs/easy_dwpose/draw/mimic_motion.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reference drawing function from the MimicMotion
3
+ https://github.com/Tencent/MimicMotion/blob/main/mimicmotion/dwpose/util.py
4
+ """
5
+
6
+ import math
7
+
8
+ import cv2
9
+ import matplotlib
10
+ import numpy as np
11
+
12
+ eps = 0.01
13
+
14
+
15
+ def alpha_blend_color(color, alpha):
16
+ """blend color according to point conf"""
17
+ return [int(c * alpha) for c in color]
18
+
19
+
20
+ def draw_bodypose(canvas, candidate, subset, score):
21
+ H, W, C = canvas.shape
22
+ candidate = np.array(candidate)
23
+ subset = np.array(subset)
24
+
25
+ stickwidth = 4
26
+
27
+ limbSeq = [
28
+ [2, 3],
29
+ [2, 6],
30
+ [3, 4],
31
+ [4, 5],
32
+ [6, 7],
33
+ [7, 8],
34
+ [2, 9],
35
+ [9, 10],
36
+ [10, 11],
37
+ [2, 12],
38
+ [12, 13],
39
+ [13, 14],
40
+ [2, 1],
41
+ [1, 15],
42
+ [15, 17],
43
+ [1, 16],
44
+ [16, 18],
45
+ [3, 17],
46
+ [6, 18],
47
+ ]
48
+
49
+ colors = [
50
+ [255, 0, 0],
51
+ [255, 85, 0],
52
+ [255, 170, 0],
53
+ [255, 255, 0],
54
+ [170, 255, 0],
55
+ [85, 255, 0],
56
+ [0, 255, 0],
57
+ [0, 255, 85],
58
+ [0, 255, 170],
59
+ [0, 255, 255],
60
+ [0, 170, 255],
61
+ [0, 85, 255],
62
+ [0, 0, 255],
63
+ [85, 0, 255],
64
+ [170, 0, 255],
65
+ [255, 0, 255],
66
+ [255, 0, 170],
67
+ [255, 0, 85],
68
+ ]
69
+
70
+ for i in range(17):
71
+ for n in range(len(subset)):
72
+ index = subset[n][np.array(limbSeq[i]) - 1]
73
+ conf = score[n][np.array(limbSeq[i]) - 1]
74
+ if conf[0] < 0.3 or conf[1] < 0.3:
75
+ continue
76
+ Y = candidate[index.astype(int), 0] * float(W)
77
+ X = candidate[index.astype(int), 1] * float(H)
78
+ mX = np.mean(X)
79
+ mY = np.mean(Y)
80
+ length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
81
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
82
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
83
+ cv2.fillConvexPoly(canvas, polygon, alpha_blend_color(colors[i], conf[0] * conf[1]))
84
+
85
+ canvas = (canvas * 0.6).astype(np.uint8)
86
+
87
+ for i in range(18):
88
+ for n in range(len(subset)):
89
+ index = int(subset[n][i])
90
+ if index == -1:
91
+ continue
92
+ x, y = candidate[index][0:2]
93
+ conf = score[n][i]
94
+ x = int(x * W)
95
+ y = int(y * H)
96
+ cv2.circle(canvas, (int(x), int(y)), 4, alpha_blend_color(colors[i], conf), thickness=-1)
97
+
98
+ return canvas
99
+
100
+
101
+ def draw_handpose(canvas, all_hand_peaks, all_hand_scores):
102
+ H, W, C = canvas.shape
103
+
104
+ edges = [
105
+ [0, 1],
106
+ [1, 2],
107
+ [2, 3],
108
+ [3, 4],
109
+ [0, 5],
110
+ [5, 6],
111
+ [6, 7],
112
+ [7, 8],
113
+ [0, 9],
114
+ [9, 10],
115
+ [10, 11],
116
+ [11, 12],
117
+ [0, 13],
118
+ [13, 14],
119
+ [14, 15],
120
+ [15, 16],
121
+ [0, 17],
122
+ [17, 18],
123
+ [18, 19],
124
+ [19, 20],
125
+ ]
126
+
127
+ for peaks, scores in zip(all_hand_peaks, all_hand_scores):
128
+ for ie, e in enumerate(edges):
129
+ x1, y1 = peaks[e[0]]
130
+ x2, y2 = peaks[e[1]]
131
+ x1 = int(x1 * W)
132
+ y1 = int(y1 * H)
133
+ x2 = int(x2 * W)
134
+ y2 = int(y2 * H)
135
+ score = int(scores[e[0]] * scores[e[1]] * 255)
136
+ if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
137
+ cv2.line(
138
+ canvas,
139
+ (x1, y1),
140
+ (x2, y2),
141
+ matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * score,
142
+ thickness=2,
143
+ )
144
+
145
+ for i, keyponit in enumerate(peaks):
146
+ x, y = keyponit
147
+ x = int(x * W)
148
+ y = int(y * H)
149
+ score = int(scores[i] * 255)
150
+ if x > eps and y > eps:
151
+ cv2.circle(canvas, (x, y), 4, (0, 0, score), thickness=-1)
152
+ return canvas
153
+
154
+
155
+ def draw_facepose(canvas, all_lmks, all_scores):
156
+ H, W, C = canvas.shape
157
+ for lmks, scores in zip(all_lmks, all_scores):
158
+ for lmk, score in zip(lmks, scores):
159
+ x, y = lmk
160
+ x = int(x * W)
161
+ y = int(y * H)
162
+ conf = int(score * 255)
163
+ if x > eps and y > eps:
164
+ cv2.circle(canvas, (x, y), 3, (conf, conf, conf), thickness=-1)
165
+ return canvas
166
+
167
+
168
+ def draw_pose(pose, height, width, ref_w=2160):
169
+ """vis dwpose outputs
170
+
171
+ Args:
172
+ pose (List): DWposeDetector outputs in dwpose_detector.py
173
+ H (int): height
174
+ W (int): width
175
+ ref_w (int, optional) Defaults to 2160.
176
+
177
+ Returns:
178
+ np.ndarray: image pixel value in RGB mode
179
+ """
180
+ bodies = pose["bodies"]
181
+ body_scores = pose["body_scores"]
182
+ # candidate = bodies['candidate']
183
+ # subset = bodies['subset']
184
+ faces = pose["faces"]
185
+ hands = pose["hands"]
186
+
187
+ sz = min(height, width)
188
+ sr = (ref_w / sz) if sz != ref_w else 1
189
+
190
+ ########################################## create zero canvas ##################################################
191
+ canvas = np.zeros(shape=(int(height * sr), int(width * sr), 3), dtype=np.uint8)
192
+
193
+ ########################################### draw body pose #####################################################
194
+ canvas = draw_bodypose(canvas, bodies, body_scores, score=body_scores)
195
+
196
+ ########################################### draw hand pose #####################################################
197
+ canvas = draw_handpose(canvas, hands, pose["hands_scores"])
198
+
199
+ ########################################### draw face pose #####################################################
200
+ canvas = draw_facepose(canvas, faces, pose["faces_scores"])
201
+
202
+ return cv2.cvtColor(cv2.resize(canvas, (width, height)), cv2.COLOR_BGR2RGB)
libs/easy_dwpose/draw/musepose.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reference drawing function from the MusePose
3
+ https://github.com/TMElyralab/MusePose/blob/main/pose/script/dwpose.py
4
+ """
5
+
6
+ import math
7
+
8
+ import cv2
9
+ import numpy as np
10
+
11
+ eps = 0.01
12
+
13
+
14
+ def smart_width(d):
15
+ if d < 5:
16
+ return 1
17
+ elif d < 10:
18
+ return 2
19
+ elif d < 20:
20
+ return 3
21
+ elif d < 40:
22
+ return 4
23
+ elif d < 80:
24
+ return 5
25
+ elif d < 160:
26
+ return 6
27
+ elif d < 320:
28
+ return 7
29
+ else:
30
+ return 8
31
+
32
+
33
+ def draw_bodypose(canvas, candidate, subset):
34
+ H, W, C = canvas.shape
35
+ candidate = np.array(candidate)
36
+ subset = np.array(subset)
37
+
38
+ limbSeq = [
39
+ [2, 3],
40
+ [2, 6],
41
+ [3, 4],
42
+ [4, 5],
43
+ [6, 7],
44
+ [7, 8],
45
+ [2, 9],
46
+ [9, 10],
47
+ [10, 11],
48
+ [2, 12],
49
+ [12, 13],
50
+ [13, 14],
51
+ [2, 1],
52
+ [1, 15],
53
+ [15, 17],
54
+ [1, 16],
55
+ [16, 18],
56
+ [3, 17],
57
+ [6, 18],
58
+ ]
59
+
60
+ colors = [
61
+ [255, 0, 0],
62
+ [255, 85, 0],
63
+ [255, 170, 0],
64
+ [255, 255, 0],
65
+ [170, 255, 0],
66
+ [85, 255, 0],
67
+ [0, 255, 0],
68
+ [0, 255, 85],
69
+ [0, 255, 170],
70
+ [0, 255, 255],
71
+ [0, 170, 255],
72
+ [0, 85, 255],
73
+ [0, 0, 255],
74
+ [85, 0, 255],
75
+ [170, 0, 255],
76
+ [255, 0, 255],
77
+ [255, 0, 170],
78
+ [255, 0, 85],
79
+ ]
80
+
81
+ for i in range(17):
82
+ for n in range(len(subset)):
83
+ index = subset[n][np.array(limbSeq[i]) - 1]
84
+ if -1 in index:
85
+ continue
86
+ Y = candidate[index.astype(int), 0] * float(W)
87
+ X = candidate[index.astype(int), 1] * float(H)
88
+ mX = np.mean(X)
89
+ mY = np.mean(Y)
90
+ length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
91
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
92
+
93
+ width = smart_width(length)
94
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), width), int(angle), 0, 360, 1)
95
+ cv2.fillConvexPoly(canvas, polygon, colors[i])
96
+
97
+ canvas = (canvas * 0.6).astype(np.uint8)
98
+
99
+ for i in range(18):
100
+ for n in range(len(subset)):
101
+ index = int(subset[n][i])
102
+ if index == -1:
103
+ continue
104
+ x, y = candidate[index][0:2]
105
+ x = int(x * W)
106
+ y = int(y * H)
107
+ radius = 4
108
+ cv2.circle(canvas, (int(x), int(y)), radius, colors[i], thickness=-1)
109
+
110
+ return canvas
111
+
112
+
113
+ def draw_handpose(canvas, all_hand_peaks):
114
+ import matplotlib
115
+
116
+ H, W, C = canvas.shape
117
+
118
+ edges = [
119
+ [0, 1],
120
+ [1, 2],
121
+ [2, 3],
122
+ [3, 4],
123
+ [0, 5],
124
+ [5, 6],
125
+ [6, 7],
126
+ [7, 8],
127
+ [0, 9],
128
+ [9, 10],
129
+ [10, 11],
130
+ [11, 12],
131
+ [0, 13],
132
+ [13, 14],
133
+ [14, 15],
134
+ [15, 16],
135
+ [0, 17],
136
+ [17, 18],
137
+ [18, 19],
138
+ [19, 20],
139
+ ]
140
+
141
+ # (person_number*2, 21, 2)
142
+ for i in range(len(all_hand_peaks)):
143
+ peaks = all_hand_peaks[i]
144
+ peaks = np.array(peaks)
145
+
146
+ for ie, e in enumerate(edges):
147
+ x1, y1 = peaks[e[0]]
148
+ x2, y2 = peaks[e[1]]
149
+
150
+ x1 = int(x1 * W)
151
+ y1 = int(y1 * H)
152
+ x2 = int(x2 * W)
153
+ y2 = int(y2 * H)
154
+ if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
155
+ length = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
156
+ width = smart_width(length)
157
+ cv2.line(
158
+ canvas,
159
+ (x1, y1),
160
+ (x2, y2),
161
+ matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255,
162
+ thickness=width,
163
+ )
164
+
165
+ for _, keyponit in enumerate(peaks):
166
+ x, y = keyponit
167
+
168
+ x = int(x * W)
169
+ y = int(y * H)
170
+ if x > eps and y > eps:
171
+ radius = 3
172
+ cv2.circle(canvas, (x, y), radius, (0, 0, 255), thickness=-1)
173
+ return canvas
174
+
175
+
176
+ def draw_facepose(canvas, all_lmks):
177
+ H, W, C = canvas.shape
178
+ for lmks in all_lmks:
179
+ lmks = np.array(lmks)
180
+ for lmk in lmks:
181
+ x, y = lmk
182
+ x = int(x * W)
183
+ y = int(y * H)
184
+ if x > eps and y > eps:
185
+ radius = 3
186
+ cv2.circle(canvas, (x, y), radius, (255, 255, 255), thickness=-1)
187
+ return canvas
188
+
189
+
190
+ # Calculate the resolution
191
+ def size_calculate(h, w, resolution):
192
+ H = float(h)
193
+ W = float(w)
194
+
195
+ # resize the short edge to the resolution
196
+ k = float(resolution) / min(H, W) # short edge
197
+ H *= k
198
+ W *= k
199
+
200
+ # resize to the nearest integer multiple of 64
201
+ H = int(np.round(H / 64.0)) * 64
202
+ W = int(np.round(W / 64.0)) * 64
203
+ return H, W
204
+
205
+
206
+ def warpAffine_kps(kps, M):
207
+ a = M[:, :2]
208
+ t = M[:, 2]
209
+ kps = np.dot(kps, a.T) + t
210
+ return kps
211
+
212
+
213
+ def draw_pose(pose, height, width, draw_face):
214
+ # bodies = pose["bodies"]
215
+
216
+ # only the most significant person
217
+ faces = pose["faces"][:1]
218
+ hands = pose["hands"][:2]
219
+
220
+ # candidate = bodies["candidate"][:18]
221
+ # subset = bodies["subset"][:1]
222
+ bodies = pose["bodies"][:18]
223
+ body_scores = pose["body_scores"][:1]
224
+
225
+ # draw
226
+ canvas = np.zeros(shape=(height, width, 3), dtype=np.uint8)
227
+ canvas = draw_bodypose(canvas, bodies, body_scores)
228
+ canvas = draw_handpose(canvas, hands)
229
+ if draw_face == True:
230
+ canvas = draw_facepose(canvas, faces)
231
+
232
+ return canvas
libs/easy_dwpose/draw/openpose.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ eps = 0.01
7
+
8
+
9
+ def draw_bodypose(canvas, candidate, subset):
10
+ H, W, C = canvas.shape
11
+ candidate = np.array(candidate)
12
+ subset = np.array(subset)
13
+
14
+ stickwidth = 4
15
+
16
+ limbSeq = [
17
+ [2, 3],
18
+ [2, 6],
19
+ [3, 4],
20
+ [4, 5],
21
+ [6, 7],
22
+ [7, 8],
23
+ [2, 9],
24
+ [9, 10],
25
+ [10, 11],
26
+ [2, 12],
27
+ [12, 13],
28
+ [13, 14],
29
+ [2, 1],
30
+ [1, 15],
31
+ [15, 17],
32
+ [1, 16],
33
+ [16, 18],
34
+ [3, 17],
35
+ [6, 18],
36
+ ]
37
+
38
+ colors = [
39
+ [255, 0, 0],
40
+ [255, 85, 0],
41
+ [255, 170, 0],
42
+ [255, 255, 0],
43
+ [170, 255, 0],
44
+ [85, 255, 0],
45
+ [0, 255, 0],
46
+ [0, 255, 85],
47
+ [0, 255, 170],
48
+ [0, 255, 255],
49
+ [0, 170, 255],
50
+ [0, 85, 255],
51
+ [0, 0, 255],
52
+ [85, 0, 255],
53
+ [170, 0, 255],
54
+ [255, 0, 255],
55
+ [255, 0, 170],
56
+ [255, 0, 85],
57
+ ]
58
+
59
+ for i in range(17):
60
+ for n in range(len(subset)):
61
+ index = subset[n][np.array(limbSeq[i]) - 1]
62
+ if -1 in index:
63
+ continue
64
+ Y = candidate[index.astype(int), 0] * float(W)
65
+ X = candidate[index.astype(int), 1] * float(H)
66
+ mX = np.mean(X)
67
+ mY = np.mean(Y)
68
+ length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
69
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
70
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
71
+ cv2.fillConvexPoly(canvas, polygon, colors[i])
72
+
73
+ canvas = (canvas * 0.6).astype(np.uint8)
74
+
75
+ for i in range(18):
76
+ for n in range(len(subset)):
77
+ index = int(subset[n][i])
78
+ if index == -1:
79
+ continue
80
+ x, y = candidate[index][0:2]
81
+ x = int(x * W)
82
+ y = int(y * H)
83
+ cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
84
+
85
+ return canvas
86
+
87
+
88
+ def draw_handpose(canvas, all_hand_peaks):
89
+ import matplotlib
90
+
91
+ H, W, C = canvas.shape
92
+
93
+ edges = [
94
+ [0, 1],
95
+ [1, 2],
96
+ [2, 3],
97
+ [3, 4],
98
+ [0, 5],
99
+ [5, 6],
100
+ [6, 7],
101
+ [7, 8],
102
+ [0, 9],
103
+ [9, 10],
104
+ [10, 11],
105
+ [11, 12],
106
+ [0, 13],
107
+ [13, 14],
108
+ [14, 15],
109
+ [15, 16],
110
+ [0, 17],
111
+ [17, 18],
112
+ [18, 19],
113
+ [19, 20],
114
+ ]
115
+
116
+ # (person_number*2, 21, 2)
117
+ for i in range(len(all_hand_peaks)):
118
+ peaks = all_hand_peaks[i]
119
+ peaks = np.array(peaks)
120
+
121
+ for ie, e in enumerate(edges):
122
+ x1, y1 = peaks[e[0]]
123
+ x2, y2 = peaks[e[1]]
124
+
125
+ x1 = int(x1 * W)
126
+ y1 = int(y1 * H)
127
+ x2 = int(x2 * W)
128
+ y2 = int(y2 * H)
129
+ if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
130
+ cv2.line(
131
+ canvas,
132
+ (x1, y1),
133
+ (x2, y2),
134
+ matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255,
135
+ thickness=2,
136
+ )
137
+
138
+ for _, keyponit in enumerate(peaks):
139
+ x, y = keyponit
140
+
141
+ x = int(x * W)
142
+ y = int(y * H)
143
+ if x > eps and y > eps:
144
+ cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
145
+ return canvas
146
+
147
+
148
+ def draw_facepose(canvas, all_lmks):
149
+ H, W, C = canvas.shape
150
+ for lmks in all_lmks:
151
+ lmks = np.array(lmks)
152
+ for lmk in lmks:
153
+ x, y = lmk
154
+ x = int(x * W)
155
+ y = int(y * H)
156
+ if x > eps and y > eps:
157
+ cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1)
158
+ return canvas
159
+
160
+
161
+ def draw_pose(pose, height: int, width: int, include_face: bool = True, include_hands: bool = True) -> np.ndarray:
162
+ canvas = np.zeros(shape=(height, width, 3), dtype=np.uint8)
163
+
164
+ candidate = pose["bodies"]
165
+ subset = pose["body_scores"]
166
+ canvas = draw_bodypose(canvas, candidate, subset)
167
+
168
+ if include_face:
169
+ faces = pose["faces"]
170
+ canvas = draw_facepose(canvas, faces)
171
+
172
+ if include_hands:
173
+ hands = pose["hands"]
174
+ canvas = draw_handpose(canvas, hands)
175
+
176
+ return canvas
libs/easy_dwpose/dwpose.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Dict, Optional, Union
2
+
3
+ import cv2
4
+ import numpy as np
5
+ import PIL
6
+ import PIL.Image
7
+ import torch
8
+ from huggingface_hub import hf_hub_download
9
+
10
+ from easy_dwpose.body_estimation import Wholebody, resize_image
11
+ from easy_dwpose.draw import draw_openpose
12
+
13
+
14
+ class DWposeDetector:
15
+ def __init__(self, device: str = "сpu"):
16
+ hf_hub_download("RedHash/DWPose", "yolox_l.onnx", local_dir="./checkpoints")
17
+ hf_hub_download("RedHash/DWPose", "dw-ll_ucoco_384.onnx", local_dir="./checkpoints")
18
+ self.pose_estimation = Wholebody(
19
+ device=device, model_det="checkpoints/yolox_l.onnx", model_pose="checkpoints/dw-ll_ucoco_384.onnx"
20
+ )
21
+
22
+ def _format_pose(self, candidates, scores, width, height):
23
+ num_candidates, _, locs = candidates.shape
24
+
25
+ candidates[..., 0] /= float(width)
26
+ candidates[..., 1] /= float(height)
27
+
28
+ bodies = candidates[:, :18].copy()
29
+ bodies = bodies.reshape(num_candidates * 18, locs)
30
+
31
+ body_scores = scores[:, :18]
32
+ for i in range(len(body_scores)):
33
+ for j in range(len(body_scores[i])):
34
+ if body_scores[i][j] > 0.3:
35
+ body_scores[i][j] = int(18 * i + j)
36
+ else:
37
+ body_scores[i][j] = -1
38
+
39
+ faces = candidates[:, 24:92]
40
+ faces_scores = scores[:, 24:92]
41
+
42
+ hands = np.vstack([candidates[:, 92:113], candidates[:, 113:]])
43
+ hands_scores = np.vstack([scores[:, 92:113], scores[:, 113:]])
44
+
45
+ pose = dict(
46
+ bodies=bodies,
47
+ body_scores=body_scores,
48
+ hands=hands,
49
+ hands_scores=hands_scores,
50
+ faces=faces,
51
+ faces_scores=faces_scores,
52
+ )
53
+
54
+ return pose
55
+
56
+ @torch.inference_mode()
57
+ def __call__(
58
+ self,
59
+ image: Union[PIL.Image.Image, np.ndarray],
60
+ detect_resolution: int = 512,
61
+ draw_pose: Optional[Callable] = draw_openpose,
62
+ output_type: str = "pil",
63
+ **kwargs,
64
+ ) -> Union[PIL.Image.Image, np.ndarray, Dict]:
65
+ if type(image) != np.ndarray:
66
+ image = np.array(image.convert("RGB"))
67
+
68
+ image = image.copy()
69
+ original_height, original_width, _ = image.shape
70
+
71
+ image = resize_image(image, target_resolution=detect_resolution)
72
+ height, width, _ = image.shape
73
+
74
+ candidates, scores = self.pose_estimation(image)
75
+
76
+ pose = self._format_pose(candidates, scores, width, height)
77
+
78
+ if not draw_pose:
79
+ return pose
80
+
81
+ pose_image = draw_pose(pose, height=height, width=width, **kwargs)
82
+ pose_image = cv2.resize(pose_image, (original_width, original_height), cv2.INTER_LANCZOS4)
83
+
84
+ if output_type == "pil":
85
+ pose_image = PIL.Image.fromarray(pose_image)
86
+ elif output_type == "np":
87
+ pass
88
+ else:
89
+ raise ValueError("output_type should be 'pil' or 'np'")
90
+
91
+ return pose_image, pose
main.py CHANGED
@@ -48,7 +48,7 @@ from src.pipelines.PCDMs_pipeline import PCDMsPipeline
48
 
49
 
50
  import spaces
51
- from easy_dwpose import DWposeDetector
52
  from PIL import Image
53
  import cv2
54
  import os
@@ -212,7 +212,7 @@ def get_pose(img, dwpose, outfile, crop=False):
212
  #skeleton = dwpose(pil_image, output_type="np", include_hands=True, include_face=False)
213
 
214
  #img.thumbnail((512,512))
215
- out_img = dwpose(img, include_hands=True, include_face=False)
216
 
217
  #print(pose['bodies'])
218
 
 
48
 
49
 
50
  import spaces
51
+ from .libs.easy_dwpose import DWposeDetector
52
  from PIL import Image
53
  import cv2
54
  import os
 
212
  #skeleton = dwpose(pil_image, output_type="np", include_hands=True, include_face=False)
213
 
214
  #img.thumbnail((512,512))
215
+ out_img, _ = dwpose(img, include_hands=True, include_face=False)
216
 
217
  #print(pose['bodies'])
218