id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
13,206 | import torch
import torch.nn as nn
from torch import searchsorted
def sample_pdf(bins, weights, N_samples, det=False):
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[...,:1]), cdf], -1) # (batch, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0., 1., steps=N_samples)
u = u.expand(list(cdf.shape[:-1]) + [N_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [N_samples])
# Invert CDF
u = u.contiguous().to(cdf.device)
inds = searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds-1), inds-1)
above = torch.min((cdf.shape[-1]-1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
# cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
# bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[...,1]-cdf_g[...,0])
denom = torch.where(denom<1e-5, torch.ones_like(denom), denom)
t = (u-cdf_g[...,0])/denom
samples = bins_g[...,0] + t * (bins_g[...,1]-bins_g[...,0])
if False:
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
pdf0 = pdf[0:1].detach().cpu().numpy()
cdf0 = cdf[0:1].detach().cpu().numpy()
plt.figure(1)
plt.title('pdf')
plt.plot(bins[0].detach().cpu().numpy()[:-1], pdf0.T)
plt.vlines(samples[0].detach().cpu().numpy(), ymin=0, ymax=1, colors='r')
plt.figure(2)
plt.title('cdf')
plt.plot(cdf0.T)
plt.show()
import ipdb;ipdb.set_trace()
return samples | null |
13,207 | import torch
import torch.nn as nn
from torch import searchsorted
def get_near_far(ray_o, ray_d, bounds):
def get_near_far_RTBBox(ray_o, ray_d, bounds, R, T):
# sample the near far in canonical coordinate
ray_o_rt = (ray_o - T) @ R
ray_d_rt = ray_d @ R
near, far, mask_at_box = get_near_far(ray_o_rt, ray_d_rt, bounds)
return near, far, mask_at_box | null |
13,208 | import torch
import torch.nn as nn
from torch import searchsorted
def concat(retlist, dim=0, unsqueeze=True, mask=None):
res = {}
if len(retlist) == 0:
return res
for key in retlist[0].keys():
val = torch.cat([r[key] for r in retlist], dim=dim)
if mask is not None and val.shape[0] != mask.shape[0]:
val_ = torch.zeros((mask.shape[0], *val.shape[1:]), device=val.device, dtype=val.dtype)
val_[mask] = val
val = val_
if unsqueeze:
val = val.unsqueeze(0)
res[key] = val
return res | null |
13,209 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
import torch.nn as nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `pts_to_can_pts` function. Write a Python function `def pts_to_can_pts(pts, sp_input)` to solve the following problem:
transform pts from the world coordinate to the smpl coordinate
Here is the function:
def pts_to_can_pts(pts, sp_input):
"""transform pts from the world coordinate to the smpl coordinate"""
Th = sp_input['Th']
pts = pts - Th
R = sp_input['R']
pts = torch.matmul(pts, R)
if 'scale' in sp_input.keys():
pts = pts / sp_input['scale'].float()
return pts | transform pts from the world coordinate to the smpl coordinate |
13,210 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
import torch.nn as nn
import torch.nn.functional as F
def get_grid_coords(pts, sp_input, voxel_size):
# convert xyz to the voxel coordinate dhw
dhw = pts[..., [2, 1, 0]]
# min_dhw = sp_input['bounds'][:, 0, [2, 1, 0]]
min_dhw = sp_input['min_dhw']
dhw = dhw - min_dhw[:, None]
dhw = dhw / voxel_size
# convert the voxel coordinate to [-1, 1]
out_sh = torch.tensor(sp_input['out_sh']).to(dhw)
dhw = dhw / out_sh * 2 - 1
# convert dhw to whd, since the occupancy is indexed by dhw
grid_coords = dhw[..., [2, 1, 0]]
if True: # clamp points
grid_coords[grid_coords>1.] = 1.
grid_coords[grid_coords<-1.] = -1
return grid_coords | null |
13,211 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
try:
if spconv.__version__.split('.')[0] == '2':
import spconv.pytorch as spconv
except:
pass
import torch.nn as nn
import torch.nn.functional as F
def encode_sparse_voxels(xyzc_net, sp_input, code):
coord = sp_input['coord']
out_sh = sp_input['out_sh']
batch_size = sp_input['batch_size']
xyzc = spconv.SparseConvTensor(code, coord, out_sh, batch_size)
feature_volume = xyzc_net(xyzc)
return feature_volume | null |
13,212 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
import torch.nn as nn
import torch.nn.functional as F
def my_grid_sample(feat, grid, mode='bilinear', align_corners=True, padding_mode='border'):
B, C, ID, IH, IW = feat.shape
assert(B==1)
feat = feat[0]
grid = grid[0, 0, 0]
N_g, _ = grid.shape
ix, iy, iz = grid[..., 0], grid[..., 1], grid[..., 2]
ix = ((ix+1)/2) * (IW-1)
iy = ((iy+1)/2) * (IH-1)
iz = ((iz+1)/2) * (ID-1)
with torch.no_grad():
ix_floor = torch.floor(ix).long()
iy_floor = torch.floor(iy).long()
iz_floor = torch.floor(iz).long()
ix_ceil = ix_floor + 1
iy_ceil = iy_floor + 1
iz_ceil = iz_floor + 1
# w_000: xyz
w_111 = (ix-ix_floor) * (iy-iy_floor) * (iz-iz_floor)
w_110 = (ix-ix_floor) * (iy-iy_floor) * (iz_ceil-iz)
w_101 = (ix-ix_floor) * (iy_ceil-iy) * (iz-iz_floor)
w_011 = (ix_ceil-ix) * (iy-iy_floor) * (iz-iz_floor)
w_100 = (ix-ix_floor) * (iy_ceil-iy) * (iz_ceil-iz)
w_010 = (ix_ceil-ix) * (iy-iy_floor) * (iz_ceil-iz)
w_001 = (ix_ceil-ix) * (iy_ceil-iy) * (iz-iz_floor)
w_000 = (ix_ceil-ix) * (iy_ceil-iy) * (iz_ceil-iz)
weights = [w_000, w_001, w_010, w_100, w_011, w_101, w_110, w_111]
with torch.no_grad():
torch.clamp(ix_floor, 0, IW-1, out=ix_floor)
torch.clamp(iy_floor, 0, IH-1, out=iy_floor)
torch.clamp(iz_floor, 0, ID-1, out=iz_floor)
torch.clamp(ix_ceil, 0, IW-1, out=ix_ceil)
torch.clamp(iy_ceil, 0, IH-1, out=iy_ceil)
torch.clamp(iz_ceil, 0, ID-1, out=iz_ceil)
v_000 = feat[:, iz_floor, iy_floor, ix_floor]
v_001 = feat[:, iz_ceil, iy_floor, ix_floor]
v_010 = feat[:, iz_floor, iy_ceil, ix_floor]
v_100 = feat[:, iz_floor, iy_floor, ix_ceil]
v_011 = feat[:, iz_ceil, iy_ceil, ix_floor]
v_101 = feat[:, iz_ceil, iy_floor, ix_ceil]
v_110 = feat[:, iz_floor, iy_ceil, ix_ceil]
v_111 = feat[:, iz_ceil, iy_ceil, ix_ceil]
val = v_000 * w_000[None] + v_001 * w_001[None] + v_010 * w_010[None] + v_100 * w_100[None] + \
v_011 * w_011[None] + v_101 * w_101[None] + v_110 * w_110[None] + v_111 * w_111[None]
return val[None, :, None, None] | null |
13,213 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
import torch.nn as nn
import torch.nn.functional as F
def interpolate_features(grid_coords, feature_volume, padding_mode):
features = []
for volume in feature_volume:
feature = F.grid_sample(volume,
grid_coords,
padding_mode=padding_mode,
align_corners=True)
# feature = my_grid_sample(volume, grid_coords)
features.append(feature)
features = torch.cat(features, dim=1)
# features: (nFeatures, nPoints)
features = features.view(-1, features.size(4))
features = features.transpose(0, 1)
return features | null |
13,214 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
import torch.nn as nn
import torch.nn.functional as F
def prepare_sp_input(batch, voxel_pad, voxel_size):
vertices = batch['vertices'][0]
R, Th = batch['R'][0], batch['Th'][0]
# Here: R^-1 @ (X - T) => (X - T) @ R^-1.T
can_xyz = torch.matmul(vertices - Th, R.transpose(0, 1).transpose(0, 1))
# construct the coordinate
min_xyz, _ = torch.min(can_xyz - voxel_pad, dim=0)
max_xyz, _ = torch.max(can_xyz + voxel_pad, dim=0)
dhw = can_xyz[:, [2, 1, 0]]
min_dhw = min_xyz[[2, 1, 0]]
max_dhw = max_xyz[[2, 1, 0]]
# coordinate in the canonical space
coord = torch.round((dhw - min_dhw)/voxel_size).to(torch.int)
# construct the output shape
out_sh = torch.ceil((max_dhw - min_dhw) / voxel_size).to(torch.int)
x = 32
out_sh = (out_sh | (x - 1)) + 1
# feature, coordinate, shape, batch size
sp_input = {}
# coordinate: [N, 4], batch_idx, z, y, x
coord = coord[None]
sh = coord.shape
idx = [torch.full([sh[1]], i, dtype=torch.long) for i in range(sh[0])]
idx = torch.cat(idx).to(coord)
out_sh, _ = torch.max(out_sh, dim=0)
sp_input = {
'coord': torch.cat([idx[:, None], coord[0]], dim=1),
'out_sh': out_sh.tolist(),
'batch_size': sh[0],
# used for feature interpolation
'min_dhw': min_dhw[None],
'max_dhw': max_dhw[None],
'min_xyz': min_xyz[None],
'max_xyz': max_xyz[None],
'R': R,
'Th': Th,
# 'scale': ,
}
return sp_input | null |
13,215 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
try:
if spconv.__version__.split('.')[0] == '2':
import spconv.pytorch as spconv
except:
pass
import torch.nn as nn
import torch.nn.functional as F
def single_conv(in_channels, out_channels, indice_key=None):
return spconv.SparseSequential(
spconv.SubMConv3d(in_channels,
out_channels,
1,
bias=False,
indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
) | null |
13,216 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
try:
if spconv.__version__.split('.')[0] == '2':
import spconv.pytorch as spconv
except:
pass
import torch.nn as nn
import torch.nn.functional as F
def double_conv(in_channels, out_channels, indice_key=None):
return spconv.SparseSequential(
spconv.SubMConv3d(in_channels,
out_channels,
3,
bias=False,
indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
spconv.SubMConv3d(out_channels,
out_channels,
3,
bias=False,
indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
) | null |
13,217 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
try:
if spconv.__version__.split('.')[0] == '2':
import spconv.pytorch as spconv
except:
pass
import torch.nn as nn
import torch.nn.functional as F
def triple_conv(in_channels, out_channels, indice_key=None):
return spconv.SparseSequential(
spconv.SubMConv3d(in_channels,
out_channels,
3,
bias=False,
indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
spconv.SubMConv3d(out_channels,
out_channels,
3,
bias=False,
indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
spconv.SubMConv3d(out_channels,
out_channels,
3,
bias=False,
indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
) | null |
13,218 | from .nerf import Nerf, EmbedMLP
import torch
import spconv
try:
if spconv.__version__.split('.')[0] == '2':
import spconv.pytorch as spconv
except:
pass
import torch.nn as nn
import torch.nn.functional as F
def stride_conv(in_channels, out_channels, indice_key=None):
return spconv.SparseSequential(
spconv.SparseConv3d(in_channels,
out_channels,
3,
2,
padding=1,
bias=False,
indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), nn.ReLU()) | null |
13,219 | import torch
import torch.nn as nn
from .nerf import Nerf, EmbedMLP, MultiLinear
from os.path import join
from ...mytools.file_utils import read_json
import numpy as np
class EmbedMLP(nn.Module):
def __init__(self, input_ch, output_ch, multi_res, W, D, bounds) -> None:
super().__init__()
self.embed, ch_time = get_embedder(multi_res, input_ch)
self.bounds = bounds
self.linear = MultiLinear(
input_ch=ch_time,
output_ch=output_ch, init_bias=0, act_fn='none',
D=D, W=W, skips=[])
def forward(self, time):
embed = self.embed(time.reshape(1, -1).float()/self.bounds)
output = self.linear(embed)
return output
def create_dynamic_embedding(mode, embed):
if mode == 'dense':
embedding = nn.Embedding(embed.shape[0], embed.shape[1])
elif mode == 'mlp':
if 'D' not in embed.keys():
embedding = EmbedMLP(
input_ch=1,
multi_res=32,
W=128,
D=2,
bounds=embed.shape[0],
output_ch=embed.shape[1])
else:
embedding = EmbedMLP(
input_ch=1,
multi_res=32,
W=embed.W,
D=embed.D,
bounds=embed.shape[0],
output_ch=embed.shape[1])
else:
raise NotImplementedError
return embedding | null |
13,220 | import numpy as np
import cv2
import os
from os.path import join
from ..mytools import plot_cross, plot_line, plot_bbox, plot_keypoints, get_rgb, merge
from ..mytools.file_utils import get_bbox_from_pose
from ..dataset import CONFIG
def plot_bbox_body(img, annots, **kwargs):
annots = annots['annots']
for data in annots:
bbox = data['bbox']
# 画一个X形
x1, y1, x2, y2 = bbox[:4]
pid = data['personID']
color = get_rgb(pid)
lw = max(1, int((x2 - x1)//100))
plot_line(img, (x1, y1), (x2, y2), lw, color)
plot_line(img, (x1, y2), (x2, y1), lw, color)
# border
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), color, lw+1)
ratio = (y2-y1)/(x2-x1)
w = 10*lw
cv2.rectangle(img,
(int((x1+x2)/2-w), int((y1+y2)/2-w*ratio)),
(int((x1+x2)/2+w), int((y1+y2)/2+w*ratio)),
color, -1)
cv2.putText(img, '{}'.format(pid), (int(x1), int(y1)+20), cv2.FONT_HERSHEY_SIMPLEX, 5, color, 2)
return img | null |
13,221 | import os
import json
import numpy as np
from os.path import join
import shutil
from ..mytools.file_utils import myarray2string
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
return data
def load_annot_to_tmp(annotname):
if annotname is None:
return {}
if not os.path.exists(annotname):
dirname = os.path.dirname(annotname)
os.makedirs(dirname, exist_ok=True)
shutil.copyfile(annotname.replace('_tmp', ''), annotname)
annot = read_json(annotname)
if isinstance(annot, list):
annot = {'annots': annot, 'isKeyframe': False, 'isList': True}
return annot | null |
13,222 | import cv2
The provided code snippet includes necessary dependencies for implementing the `point_callback` function. Write a Python function `def point_callback(event, x, y, flags, param)` to solve the following problem:
OpenCV使用的简单的回调函数,主要实现两个基础功能: 1. 对于按住拖动的情况,记录起始点与终止点(当前点) 2. 对于点击的情况,记录选择的点 3. 记录当前是否按住了键
Here is the function:
def point_callback(event, x, y, flags, param):
"""
OpenCV使用的简单的回调函数,主要实现两个基础功能:
1. 对于按住拖动的情况,记录起始点与终止点(当前点)
2. 对于点击的情况,记录选择的点
3. 记录当前是否按住了键
"""
if event not in [cv2.EVENT_LBUTTONDOWN, cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONUP]:
return 0
param['button_down'] = flags == cv2.EVENT_FLAG_LBUTTON
# 判断出了选择了的点的位置,直接写入这个位置
if event == cv2.EVENT_LBUTTONDOWN:
# 如果选中了框,那么在按下的时候,就不能清零
param['click'] = None
param['start'] = (x, y)
param['end'] = (x, y)
elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:
param['end'] = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
if x == param['start'][0] and y == param['start'][1]:
param['click'] = param['start']
param['start'] = None
param['end'] = None
else:
param['click'] = None
return 1 | OpenCV使用的简单的回调函数,主要实现两个基础功能: 1. 对于按住拖动的情况,记录起始点与终止点(当前点) 2. 对于点击的情况,记录选择的点 3. 记录当前是否按住了键 |
13,223 | import numpy as np
import cv2
from func_timeout import func_set_timeout
colors_chessboard_bar = [
[0, 0, 255],
[0, 128, 255],
[0, 200, 200],
[0, 255, 0],
[200, 200, 0],
[255, 0, 0],
[255, 0, 250]
]
def get_lines_chessboard(pattern=(9, 6)):
w, h = pattern[0], pattern[1]
lines = []
lines_cols = []
for i in range(w*h-1):
lines.append([i, i+1])
lines_cols.append(colors_chessboard_bar[(i//w)%len(colors_chessboard_bar)])
return lines, lines_cols | null |
13,224 | import numpy as np
import cv2
from func_timeout import func_set_timeout
def detect_charuco(image, aruco_type, long, short, squareLength, aruco_len):
ARUCO_DICT = {
"4X4_50": cv2.aruco.DICT_4X4_50,
"4X4_100": cv2.aruco.DICT_4X4_100,
"5X5_100": cv2.aruco.DICT_5X5_100,
"5X5_250": cv2.aruco.DICT_5X5_250,
}
# 创建ChArUco标定板
dictionary = cv2.aruco.getPredefinedDictionary(dict=ARUCO_DICT[aruco_type])
board = cv2.aruco.CharucoBoard_create(
squaresY=long,
squaresX=short,
squareLength=squareLength,
markerLength=aruco_len,
dictionary=dictionary,
)
corners = board.chessboardCorners
# ATTN: exchange the XY
corners3d = corners[:, [1, 0, 2]]
keypoints2d = np.zeros_like(corners3d)
# 查找标志块的左上角点
corners, ids, _ = cv2.aruco.detectMarkers(
image=image, dictionary=dictionary, parameters=None
)
# 棋盘格黑白块内角点
if ids is not None:
retval, charucoCorners, charucoIds = cv2.aruco.interpolateCornersCharuco(
markerCorners=corners, markerIds=ids, image=image, board=board
)
if retval:
ids = charucoIds[:, 0]
pts = charucoCorners[:, 0]
keypoints2d[ids, :2] = pts
keypoints2d[ids, 2] = 1.
else:
retval = False
return retval, keypoints2d, corners3d | null |
13,225 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
def print_help(annotator, **kwargs):
"""print the help"""
print('Here is the help:')
print( '------------------')
for key, val in annotator.register_keys.items():
if isinstance(val, list):
print(' {}: '.format(key, ': '), str(val[0].__doc__))
for v in val[1:]:
print(' ', str(v.__doc__))
else:
print(' {}: '.format(key, ': '), str(val.__doc__))
for key in 'wasdfg':
register_keys[key] = get_move(key)
def print_help_mv(annotator, **kwargs):
print_help(annotator)
print( '------------------')
print('Here is the help for each view:')
print( '------------------')
for key, val in annotator.register_keys_view.items():
print(' {}: '.format(key, ': '), str(val.__doc__)) | null |
13,226 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
The provided code snippet includes necessary dependencies for implementing the `close` function. Write a Python function `def close(annotator, **kwargs)` to solve the following problem:
quit the annotation
Here is the function:
def close(annotator, **kwargs):
"""quit the annotation"""
if annotator.working:
annotator.set_frame(annotator.frame)
else:
annotator.save_and_quit()
# annotator.pbar.close() | quit the annotation |
13,227 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
for key in 'wasdfg':
register_keys[key] = get_move(key)
The provided code snippet includes necessary dependencies for implementing the `close_wo_save` function. Write a Python function `def close_wo_save(annotator, **kwargs)` to solve the following problem:
quit the annotation without saving
Here is the function:
def close_wo_save(annotator, **kwargs):
"""quit the annotation without saving"""
annotator.save_and_quit(key='n') | quit the annotation without saving |
13,228 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
for key in 'wasdfg':
register_keys[key] = get_move(key)
The provided code snippet includes necessary dependencies for implementing the `skip` function. Write a Python function `def skip(annotator, **kwargs)` to solve the following problem:
skip the annotation
Here is the function:
def skip(annotator, **kwargs):
"""skip the annotation"""
annotator.save_and_quit(key='y') | skip the annotation |
13,229 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
def get_move(wasd):
get_frame = {
'a': lambda x, f: f - 1,
'd': lambda x, f: f + 1,
'w': lambda x, f: f - 10,
's': lambda x, f: f + 10,
'f': lambda x, f: f + 100,
'g': lambda x, f: f - 100,
}[wasd]
text = {
'a': 'Move to last frame',
'd': 'Move to next frame',
'w': 'Move to last step frame',
's': 'Move to next step frame',
'f': 'Move to last step frame',
'g': 'Move to next step frame'
}
clip_frame = lambda x, f: max(x.start, min(x.nFrames-1, min(x.end-1, f)))
def move(annotator, **kwargs):
newframe = get_frame(annotator, annotator.frame)
newframe = clip_frame(annotator, newframe)
annotator.frame = newframe
move.__doc__ = text[wasd]
return move | null |
13,230 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
def set_personID(i):
def func(self, param, **kwargs):
active = param['select']['bbox']
if active == -1 and active >= len(param['annots']['annots']):
return 0
else:
param['annots']['annots'][active]['personID'] = i
return 0
func.__doc__ = "set the bbox ID to {}".format(i)
return func | null |
13,231 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
def choose_personID(i):
def func(self, param, **kwargs):
for idata, data in enumerate(param['annots']['annots']):
if data['personID'] == i:
param['select']['bbox'] = idata
return 0
func.__doc__ = "choose the bbox of ID {}".format(i)
return func | null |
13,232 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
remain = 0
keys_pre = []
for key in 'wasdfg':
register_keys[key] = get_move(key)
def get_key():
k = cv2.waitKey(10) & 0xFF
if k == CV_KEY.LSHIFT:
key1 = cv2.waitKey(500) & 0xFF
if key1 == CV_KEY.NONE:
return key1
# 转换为大写
k = key1 - ord('a') + ord('A')
return k
The provided code snippet includes necessary dependencies for implementing the `cont_automatic` function. Write a Python function `def cont_automatic(self, param)` to solve the following problem:
continue automatic
Here is the function:
def cont_automatic(self, param):
"continue automatic"
global remain, keys_pre
if remain > 0:
keys = keys_pre
repeats = remain
else:
print('Examples: ')
print(' - noshow r t: automatic removing and tracking')
print(' - noshow nostop r t r c: automatic removing and tracking, if missing, just copy')
keys = input('Enter the ordered key(separate with blank): ').split(' ')
keys_pre = keys
try:
repeats = int(input('Input the repeat times(0->{}): '.format(len(self.dataset)-self.frame)))
except:
repeats = 0
if repeats == -1:
repeats = len(self.dataset)
repeats = min(repeats, len(self.dataset)-self.frame+1)
if len(keys) < 1:
return 0
noshow = 'noshow' in keys
if noshow:
self.no_img = True
nostop = 'nostop' in keys
param['stop'] = False
for nf in tqdm(range(repeats), desc='auto {}'.format('->'.join(keys))):
for key in keys:
self.run(key=key, noshow=noshow)
if chr(get_key()) == 'q' or (param['stop'] and not nostop):
remain = repeats - nf
break
self.run(key='d', noshow=noshow)
else:
remain = 0
keys_pre = []
self.no_img = False | continue automatic |
13,233 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
remain = 0
keys_pre = []
for key in 'wasdfg':
register_keys[key] = get_move(key)
def get_key():
k = cv2.waitKey(10) & 0xFF
if k == CV_KEY.LSHIFT:
key1 = cv2.waitKey(500) & 0xFF
if key1 == CV_KEY.NONE:
return key1
# 转换为大写
k = key1 - ord('a') + ord('A')
return k
The provided code snippet includes necessary dependencies for implementing the `automatic` function. Write a Python function `def automatic(self, param)` to solve the following problem:
Automatic running
Here is the function:
def automatic(self, param):
"Automatic running"
global remain, keys_pre
print('Examples: ')
print(' - noshow r t: automatic removing and tracking')
print(' - noshow nostop r t r c: automatic removing and tracking, if missing, just copy')
keys = input('Enter the ordered key(separate with blank): ').split(' ')
keys_pre = keys
try:
repeats = int(input('Input the repeat times(0->{}): '.format(self.nFrames-self.frame)))
except:
repeats = 0
repeats = min(repeats, self.nFrames-self.frame+1)
if len(keys) < 1:
return 0
noshow = 'noshow' in keys
if noshow:
self.no_img = True
nostop = 'nostop' in keys
param['stop'] = False
for nf in tqdm(range(repeats), desc='auto {}'.format('->'.join(keys))):
for key in keys:
self.run(key=key, noshow=noshow)
if chr(get_key()) == 'q' or (param['stop'] and not nostop):
remain = repeats - nf
break
self.run(key='d', noshow=noshow)
else:
remain = 0
keys_pre = []
self.no_img = False | Automatic running |
13,234 | from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
The provided code snippet includes necessary dependencies for implementing the `set_keyframe` function. Write a Python function `def set_keyframe(self, param)` to solve the following problem:
set/unset the key-frame
Here is the function:
def set_keyframe(self, param):
"set/unset the key-frame"
param['annots']['isKeyframe'] = not param['annots']['isKeyframe'] | set/unset the key-frame |
13,235 | import numpy as np
from ..dataset.config import CONFIG
def findNearestPoint(points, click):
def callback_select_bbox_corner(start, end, annots, select, bbox_name, **kwargs):
if start is None or end is None:
select['corner'] = -1
return 0
if start[0] == end[0] and start[1] == end[1]:
return 0
# 判断选择了哪个角点
annots = annots['annots']
if len(annots) == 0:
return 0
# not select a bbox
if select[bbox_name] == -1 and select['corner'] == -1:
corners = []
for i in range(len(annots)):
l, t, r, b = annots[i][bbox_name][:4]
corner = np.array([(l, t), (l, b), (r, t), (r, b), ((l+r)/2, (t+b)/2)])
corners.append(corner)
corners = np.stack(corners)
flag, minid = findNearestPoint(corners, start)
if flag:
select[bbox_name] = minid[0]
select['corner'] = minid[1]
else:
select['corner'] = -1
# have selected a bbox, not select a corner
elif select[bbox_name] != -1 and select['corner'] == -1:
i = select[bbox_name]
l, t, r, b = annots[i][bbox_name][:4]
corners = np.array([(l, t), (l, b), (r, t), (r, b), ((l+r)/2, (t+b)/2)])
flag, minid = findNearestPoint(corners, start)
if flag:
select['corner'] = minid[0]
# have selected a bbox, and select a corner
elif select[bbox_name] != -1 and select['corner'] != -1:
x, y = end
# Move the corner
if select['corner'] < 4:
(i, j) = [(0, 1), (0, 3), (2, 1), (2, 3)][select['corner']]
data = annots[select[bbox_name]]
data[bbox_name][i] = x
data[bbox_name][j] = y
# Move the center
else:
bbox = annots[select[bbox_name]][bbox_name]
w = (bbox[2] - bbox[0])/2
h = (bbox[3] - bbox[1])/2
bbox[0] = x - w
bbox[1] = y - h
bbox[2] = x + w
bbox[3] = y + h
elif select[bbox_name] == -1 and select['corner'] != -1:
select['corner'] = -1 | null |
13,236 | import numpy as np
from ..dataset.config import CONFIG
def get_auto_track(mode='kpts'):
MAX_SPEED = 100
if mode == 'bbox':
MAX_SPEED = 0.2
def auto_track(self, param, **kwargs):
if self.frame == 0:
return 0
previous = self.previous()
annots = param['annots']['annots']
bbox_name = param['bbox_name']
kpts_name = param['kpts_name']
if len(annots) == 0:
return 0
if len(previous['annots']) == 0:
return 0
if mode == 'kpts':
keypoints_pre = np.array([d[kpts_name] for d in previous['annots']])
keypoints_now = np.array([d[kpts_name] for d in annots])
conf = np.sqrt(keypoints_now[:, None, :, -1] * keypoints_pre[None, :, :, -1])
diff = np.linalg.norm(keypoints_now[:, None, :, :2] - keypoints_pre[None, :, :, :2], axis=-1)
dist = np.sum(diff * conf, axis=-1)/np.sum(conf, axis=-1)
elif mode == bbox_name:
# 计算IoU
bbox_pre = np.array([d[bbox_name] for d in previous['annots']])
bbox_now = np.array([d[bbox_name] for d in annots])
bbox_pre = bbox_pre[None]
bbox_now = bbox_now[:, None]
areas_pre = (bbox_pre[..., 2] - bbox_pre[..., 0]) * (bbox_pre[..., 3] - bbox_pre[..., 1])
areas_now = (bbox_now[..., 2] - bbox_now[..., 0]) * (bbox_now[..., 3] - bbox_now[..., 1])
# 左边界的大值
xx1 = np.maximum(bbox_pre[..., 0], bbox_now[..., 0])
yy1 = np.maximum(bbox_pre[..., 1], bbox_now[..., 1])
# 右边界的小值
xx2 = np.minimum(bbox_pre[..., 2], bbox_now[..., 2])
yy2 = np.minimum(bbox_pre[..., 3], bbox_now[..., 3])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
over = inter / (areas_pre + areas_now - inter)
dist = 1 - over
# diff = np.linalg.norm(bbox_now[:, None, :4] - bbox_pre[None, :, :4], axis=-1)
# bbox_size = np.max(bbox_pre[:, [2, 3]] - bbox_pre[:, [0, 1]], axis=1)[None, :]
# diff = diff / bbox_size
# dist = diff
else:
raise NotImplementedError
nows, pres = np.where(dist < MAX_SPEED)
edges = []
for n, p in zip(nows, pres):
edges.append((n, p, dist[n, p]))
edges.sort(key=lambda x:x[2])
used_n, used_p = [], []
for n, p, _ in edges:
if n in used_n or p in used_p:
continue
annots[n]['personID'] = previous['annots'][p]['personID']
used_n.append(n)
used_p.append(p)
# TODO:stop when missing
pre_ids = [d['personID'] for d in previous['annots']]
if len(used_p) != len(pre_ids):
param['stop'] = True
print('>>> Stop because missing key: {}'.format(
[i for i in pre_ids if i not in used_p]))
print(dist)
max_id = max(pre_ids) + 1
for i in range(len(annots)):
if i in used_n:
continue
annots[i]['personID'] = max_id
max_id += 1
auto_track.__doc__ = 'auto track the {}'.format(mode)
return auto_track | null |
13,237 | import numpy as np
from ..dataset.config import CONFIG
The provided code snippet includes necessary dependencies for implementing the `copy_previous_missing` function. Write a Python function `def copy_previous_missing(self, param, **kwargs)` to solve the following problem:
copy the missing person of previous frame
Here is the function:
def copy_previous_missing(self, param, **kwargs):
"copy the missing person of previous frame"
if self.frame == 0:
return 0
previous = self.previous()
annots = param['annots']['annots']
pre_ids = [d.get('personID', d.get('id')) for d in previous['annots']]
now_ids = [d.get('personID', d.get('id')) for d in annots]
for i in range(len(pre_ids)):
if pre_ids[i] not in now_ids:
annots.append(previous['annots'][i]) | copy the missing person of previous frame |
13,238 | import numpy as np
from ..dataset.config import CONFIG
The provided code snippet includes necessary dependencies for implementing the `copy_previous_bbox` function. Write a Python function `def copy_previous_bbox(self, param, **kwargs)` to solve the following problem:
copy the annots of previous frame
Here is the function:
def copy_previous_bbox(self, param, **kwargs):
"copy the annots of previous frame"
if self.frame == 0:
return 0
previous = self.previous()
annots = param['annots']['annots'] = previous['annots'] | copy the annots of previous frame |
13,239 | import numpy as np
from ..dataset.config import CONFIG
The provided code snippet includes necessary dependencies for implementing the `create_bbox` function. Write a Python function `def create_bbox(self, param, **kwargs)` to solve the following problem:
add new boundbox
Here is the function:
def create_bbox(self, param, **kwargs):
"add new boundbox"
start, end = param['start'], param['end']
if start is None or end is None:
return 0
annots = param['annots']['annots']
nowids = [d['personID'] for d in annots]
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if len(nowids) == 0:
maxID = 0
else:
maxID = max(nowids) + 1
data = {
'personID': maxID,
bbox_name: [start[0], start[1], end[0], end[1], 1],
kpts_name: [[0., 0., 0.] for _ in range(25)]
}
annots.append(data)
param['start'], param['end'] = None, None | add new boundbox |
13,240 | import numpy as np
from ..dataset.config import CONFIG
CONFIG = {
'points': {
'nJoints': 1,
'kintree': []
}
}
CONFIG['smpl'] = {'nJoints': 24, 'kintree':
[
[ 0, 1 ],
[ 0, 2 ],
[ 0, 3 ],
[ 1, 4 ],
[ 2, 5 ],
[ 3, 6 ],
[ 4, 7 ],
[ 5, 8 ],
[ 6, 9 ],
[ 7, 10],
[ 8, 11],
[ 9, 12],
[ 9, 13],
[ 9, 14],
[12, 15],
[13, 16],
[14, 17],
[16, 18],
[17, 19],
[18, 20],
[19, 21],
[20, 22],
[21, 23],
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['smplh'] = {'nJoints': 52, 'kintree':
[
[ 1, 0],
[ 2, 0],
[ 3, 0],
[ 4, 1],
[ 5, 2],
[ 6, 3],
[ 7, 4],
[ 8, 5],
[ 9, 6],
[ 10, 7],
[ 11, 8],
[ 12, 9],
[ 13, 9],
[ 14, 9],
[ 15, 12],
[ 16, 13],
[ 17, 14],
[ 18, 16],
[ 19, 17],
[ 20, 18],
[ 21, 19],
[ 22, 20],
[ 23, 22],
[ 24, 23],
[ 25, 20],
[ 26, 25],
[ 27, 26],
[ 28, 20],
[ 29, 28],
[ 30, 29],
[ 31, 20],
[ 32, 31],
[ 33, 32],
[ 34, 20],
[ 35, 34],
[ 36, 35],
[ 37, 21],
[ 38, 37],
[ 39, 38],
[ 40, 21],
[ 41, 40],
[ 42, 41],
[ 43, 21],
[ 44, 43],
[ 45, 44],
[ 46, 21],
[ 47, 46],
[ 48, 47],
[ 49, 21],
[ 50, 49],
[ 51, 50]
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['coco'] = {
'nJoints': 17,
'kintree': [
[0, 1], [0, 2], [1, 3], [2, 4], [0, 5], [0, 6], [5, 6], [5, 7], [6, 8], [7, 9], [8, 10], [5, 11], [5, 12], [11, 12], [11, 13], [12, 14], [13, 15], [14, 16]
],
}
CONFIG['coco_17'] = CONFIG['coco']
CONFIG['body25'] = {'nJoints': 25, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11]],
'joint_names': [
"Nose", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "MidHip", "RHip","RKnee","RAnkle","LHip","LKnee","LAnkle","REye","LEye","REar","LEar","LBigToe","LSmallToe","LHeel","RBigToe","RSmallToe","RHeel"]}
CONFIG['body25']['kintree_order'] = [
[1, 8], # 躯干放在最前面
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[8, 9],
[8, 12],
[9, 10],
[10, 11],
[12, 13],
[13, 14],
[1, 0],
[0, 15],
[0, 16],
[15, 17],
[16, 18],
[11, 22],
[11, 24],
[22, 23],
[14, 19],
[19, 20],
[14, 21]
]
CONFIG['body25']['colors'] = ['k', 'r', 'r', 'r', 'b', 'b', 'b', 'k', 'r', 'r', 'r', 'b', 'b', 'b', 'r', 'b', 'r', 'b', 'b', 'b', 'b', 'r', 'r', 'r']
CONFIG['body25']['skeleton'] = \
{
( 0, 1): {'mean': 0.228, 'std': 0.046}, # Nose ->Neck
( 1, 2): {'mean': 0.144, 'std': 0.029}, # Neck ->RShoulder
( 2, 3): {'mean': 0.283, 'std': 0.057}, # RShoulder->RElbow
( 3, 4): {'mean': 0.258, 'std': 0.052}, # RElbow ->RWrist
( 1, 5): {'mean': 0.145, 'std': 0.029}, # Neck ->LShoulder
( 5, 6): {'mean': 0.281, 'std': 0.056}, # LShoulder->LElbow
( 6, 7): {'mean': 0.258, 'std': 0.052}, # LElbow ->LWrist
( 1, 8): {'mean': 0.483, 'std': 0.097}, # Neck ->MidHip
( 8, 9): {'mean': 0.106, 'std': 0.021}, # MidHip ->RHip
( 9, 10): {'mean': 0.438, 'std': 0.088}, # RHip ->RKnee
(10, 11): {'mean': 0.406, 'std': 0.081}, # RKnee ->RAnkle
( 8, 12): {'mean': 0.106, 'std': 0.021}, # MidHip ->LHip
(12, 13): {'mean': 0.438, 'std': 0.088}, # LHip ->LKnee
(13, 14): {'mean': 0.408, 'std': 0.082}, # LKnee ->LAnkle
( 0, 15): {'mean': 0.043, 'std': 0.009}, # Nose ->REye
( 0, 16): {'mean': 0.043, 'std': 0.009}, # Nose ->LEye
(15, 17): {'mean': 0.105, 'std': 0.021}, # REye ->REar
(16, 18): {'mean': 0.104, 'std': 0.021}, # LEye ->LEar
(14, 19): {'mean': 0.180, 'std': 0.036}, # LAnkle ->LBigToe
(19, 20): {'mean': 0.038, 'std': 0.008}, # LBigToe ->LSmallToe
(14, 21): {'mean': 0.044, 'std': 0.009}, # LAnkle ->LHeel
(11, 22): {'mean': 0.182, 'std': 0.036}, # RAnkle ->RBigToe
(22, 23): {'mean': 0.038, 'std': 0.008}, # RBigToe ->RSmallToe
(11, 24): {'mean': 0.044, 'std': 0.009}, # RAnkle ->RHeel
}
CONFIG['body25vis'] = {
'nJoints': 25,
'kintree': [
[8, 1], # 躯干放在最前面
[8, 9],
[8, 12],
[9, 10],
[12, 13],
[10, 11],
[13, 14],
[11, 22],
[14, 19],
[1, 2],
[1, 5],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[1, 0]]
}
CONFIG['handvis'] = {
'nJoints': 21,
'kintree': [
[0, 1],
[0, 5],
[0, 9],
[0, 13],
[0, 17],
[1, 2],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[7, 8],
[9, 10],
[10, 11],
[11, 12],
[13, 14],
[14, 15],
[15, 16],
[17, 18],
[18, 19],
[19, 20]
]
}
CONFIG['body15'] = {'nJoints': 15, 'root': 8,
'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13]], 'root': 8,}
CONFIG['body15']['joint_names'] = CONFIG['body25']['joint_names'][:15]
CONFIG['body15']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 15 and key[1] < 15}
CONFIG['body15']['kintree_order'] = CONFIG['body25']['kintree_order'][:14]
CONFIG['body15']['colors'] = CONFIG['body25']['colors'][:15]
CONFIG['body19'] = {'nJoints': 19, 'kintree': [[i, j] for (i, j) in CONFIG['body25']['kintree'] if i < 19 and j < 19]}
CONFIG['body19']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 19 and key[1] < 19}
CONFIG['panoptic'] = {
'nJoints': 19,
'joint_names': ['Neck', 'Nose', 'MidHip', 'LShoulder', 'LElbow', 'LWrist', 'LHip', 'LKnee', 'LAnkle', 'RShoulder','RElbow', 'RWrist', 'RHip','RKnee', 'RAnkle', 'LEye', 'LEar', 'REye', 'REar'],
'kintree': [[0, 1],
[0, 2],
[0, 3],
[3, 4],
[4, 5],
[0, 9],
[9, 10],
[10, 11],
[2, 6],
[2, 12],
[6, 7],
[7, 8],
[12, 13],
[13, 14]],
'colors': ['b' for _ in range(19)]
}
CONFIG['panoptic15'] = {
'nJoints': 15,
'root': 2,
'joint_names': CONFIG['panoptic']['joint_names'][:15],
'kintree': [[i, j] for (i, j) in CONFIG['panoptic']['kintree'] if i < 15 and j < 15],
'limb_mean': [0.1129,0.4957,0.1382,0.2547,0.2425,0.1374,0.2549,0.2437,0.1257,0.1256, 0.4641,0.4580,0.4643,0.4589],
'limb_std': [0.0164,0.0333,0.0078,0.0237,0.0233,0.0085,0.0233,0.0237,0.0076,0.0076, 0.0273,0.0247,0.0272,0.0242],
'colors': CONFIG['panoptic']['colors'][:15]
}
CONFIG['mpii_16'] = {
'nJoints': 16,
'joint_names': ['rankle', 'rknee', 'rhip', 'lhip', 'lknee', 'lankle', 'pelvis', 'thorax', 'upper_neck', 'head_top', 'rwrist', 'relbow', 'rshoulder', 'lshoulder', 'lelbow', 'lwrist'],
'kintree': [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [12, 7], [13, 14], [14, 15], [13, 7]],
'colors': ['b' for _ in range(16)]
}
CONFIG['ochuman_19'] = {
'nJoints': 19,
'joint_names': ["right_shoulder", "right_elbow", "right_wrist",
"left_shoulder", "left_elbow", "left_wrist",
"right_hip", "right_knee", "right_ankle",
"left_hip", "left_knee", "left_ankle",
"head", "neck"] + ['right_ear', 'left_ear', 'nose', 'right_eye', 'left_eye'],
'kintree': [
[0, 1], [1, 2], [3, 4], [4, 5],
[6, 7], [7, 8], [9, 10], [10, 11],
[13, 0], [13, 3], [0, 3], [6, 9],
[12, 16], [16, 13], [16, 17], [16, 18], [18, 15], [17, 14],
],
'colors': ['b' for _ in range(19)]
}
CONFIG['chi3d_25'] = {
'nJoints': 25,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
[13, 21], [13, 22], [16, 23], [16, 24], [3, 17], [3, 18], [6, 19], [6, 20]],
'colors': ['b' for _ in range(25)]
}
CONFIG['chi3d_17'] = {
'nJoints': 17,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
],
'colors': ['b' for _ in range(17)]
}
CONFIG['hand'] = {'nJoints': 21, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 0],
[ 6, 5],
[ 7, 6],
[ 8, 7],
[ 9, 0],
[10, 9],
[11, 10],
[12, 11],
[13, 0],
[14, 13],
[15, 14],
[16, 15],
[17, 0],
[18, 17],
[19, 18],
[20, 19]],
'colors': [
'_k', '_k', '_k', '_k', '_r', '_r', '_r', '_r',
'_g', '_g', '_g', '_g', '_b', '_b', '_b', '_b',
'_y', '_y', '_y', '_y'],
'colorsrhand': [
'_pink', '_pink', '_pink', '_pink', '_mint', '_mint', '_mint', '_mint',
'_orange', '_orange', '_orange', '_orange', '_mint2', '_mint2', '_mint2', '_mint2',
'purple', 'purple', 'purple', 'purple'],
'joint_names':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
}
CONFIG['handl'] = CONFIG['hand']
CONFIG['handr'] = CONFIG['hand']
CONFIG['handlr'] = {
'nJoints': 42,
'colors': CONFIG['hand']['colors'] + CONFIG['hand']['colorsrhand'],
'joint_names': CONFIG['hand']['joint_names'] + CONFIG['hand']['joint_names'],
'kintree': np.vstack((np.array(CONFIG['hand']['kintree']), np.array(CONFIG['hand']['kintree'])+21)).tolist()
}
CONFIG['bodyhand'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65]
],
'nJoints': 67,
'colors': CONFIG['body25']['colors'] + CONFIG['hand']['colors'] + CONFIG['hand']['colors'],
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
}
}
CONFIG['bodyhandface'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65],
[ 67, 68],
[ 68, 69],
[ 69, 70],
[ 70, 71],
[ 72, 73],
[ 73, 74],
[ 74, 75],
[ 75, 76],
[ 77, 78],
[ 78, 79],
[ 79, 80],
[ 81, 82],
[ 82, 83],
[ 83, 84],
[ 84, 85],
[ 86, 87],
[ 87, 88],
[ 88, 89],
[ 89, 90],
[ 90, 91],
[ 91, 86],
[ 92, 93],
[ 93, 94],
[ 94, 95],
[ 95, 96],
[ 96, 97],
[ 97, 92],
[ 98, 99],
[ 99, 100],
[100, 101],
[101, 102],
[102, 103],
[103, 104],
[104, 105],
[105, 106],
[106, 107],
[107, 108],
[108, 109],
[109, 98],
[110, 111],
[111, 112],
[112, 113],
[113, 114],
[114, 115],
[115, 116],
[116, 117],
[117, 110]
],
'nJoints': 118,
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
(67, 68): {'mean': 0.012, 'std': 0.002},
(68, 69): {'mean': 0.013, 'std': 0.003},
(69, 70): {'mean': 0.014, 'std': 0.003},
(70, 71): {'mean': 0.012, 'std': 0.002},
(72, 73): {'mean': 0.014, 'std': 0.003},
(73, 74): {'mean': 0.014, 'std': 0.003},
(74, 75): {'mean': 0.015, 'std': 0.003},
(75, 76): {'mean': 0.013, 'std': 0.003},
(77, 78): {'mean': 0.014, 'std': 0.003},
(78, 79): {'mean': 0.014, 'std': 0.003},
(79, 80): {'mean': 0.015, 'std': 0.003},
(81, 82): {'mean': 0.009, 'std': 0.002},
(82, 83): {'mean': 0.010, 'std': 0.002},
(83, 84): {'mean': 0.010, 'std': 0.002},
(84, 85): {'mean': 0.010, 'std': 0.002},
(86, 87): {'mean': 0.009, 'std': 0.002},
(87, 88): {'mean': 0.009, 'std': 0.002},
(88, 89): {'mean': 0.008, 'std': 0.002},
(89, 90): {'mean': 0.008, 'std': 0.002},
(90, 91): {'mean': 0.009, 'std': 0.002},
(86, 91): {'mean': 0.008, 'std': 0.002},
(92, 93): {'mean': 0.009, 'std': 0.002},
(93, 94): {'mean': 0.009, 'std': 0.002},
(94, 95): {'mean': 0.009, 'std': 0.002},
(95, 96): {'mean': 0.009, 'std': 0.002},
(96, 97): {'mean': 0.009, 'std': 0.002},
(92, 97): {'mean': 0.009, 'std': 0.002},
(98, 99): {'mean': 0.016, 'std': 0.003},
(99, 100): {'mean': 0.013, 'std': 0.003},
(100, 101): {'mean': 0.008, 'std': 0.002},
(101, 102): {'mean': 0.008, 'std': 0.002},
(102, 103): {'mean': 0.012, 'std': 0.002},
(103, 104): {'mean': 0.014, 'std': 0.003},
(104, 105): {'mean': 0.015, 'std': 0.003},
(105, 106): {'mean': 0.012, 'std': 0.002},
(106, 107): {'mean': 0.009, 'std': 0.002},
(107, 108): {'mean': 0.009, 'std': 0.002},
(108, 109): {'mean': 0.013, 'std': 0.003},
(98, 109): {'mean': 0.016, 'std': 0.003},
(110, 111): {'mean': 0.021, 'std': 0.004},
(111, 112): {'mean': 0.009, 'std': 0.002},
(112, 113): {'mean': 0.008, 'std': 0.002},
(113, 114): {'mean': 0.019, 'std': 0.004},
(114, 115): {'mean': 0.018, 'std': 0.004},
(115, 116): {'mean': 0.008, 'std': 0.002},
(116, 117): {'mean': 0.009, 'std': 0.002},
(110, 117): {'mean': 0.020, 'std': 0.004},
}
}
CONFIG['face'] = {'nJoints': 70,
'kintree':[ [0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16], #outline (ignored)
[17,18],[18,19],[19,20],[20,21], #right eyebrow
[22,23],[23,24],[24,25],[25,26], #left eyebrow
[27,28],[28,29],[29,30], #nose upper part
[31,32],[32,33],[33,34],[34,35], #nose lower part
[36,37],[37,38],[38,39],[39,40],[40,41],[41,36], #right eye
[42,43],[43,44],[44,45],[45,46],[46,47],[47,42], #left eye
[48,49],[49,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,58],[58,59],[59,48], #Lip outline
[60,61],[61,62],[62,63],[63,64],[64,65],[65,66],[66,67],[67,60] #Lip inner line
], 'colors': ['g' for _ in range(100)]}
CONFIG['h36m'] = {
'kintree': [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [
12, 13], [8, 14], [14, 15], [15, 16]],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': [
'hip', # 0
'LHip', # 1
'LKnee', # 2
'LAnkle', # 3
'RHip', # 4
'RKnee', # 5
'RAnkle', # 6
'Spine (H36M)', # 7
'Neck', # 8
'Head (H36M)', # 9
'headtop', # 10
'LShoulder', # 11
'LElbow', # 12
'LWrist', # 13
'RShoulder', # 14
'RElbow', # 15
'RWrist', # 16
],
'nJoints': 17}
CONFIG['h36m_17'] = CONFIG['h36m']
CONFIG['total'] = compose(['body25', 'hand', 'hand', 'face'])
CONFIG['bodyhandface']['joint_names'] = CONFIG['body25']['joint_names']
CONFIG['keypoints2d'] = CONFIG['body25']
CONFIG['handl2d'] = CONFIG['hand']
CONFIG['handr2d'] = CONFIG['hand']
CONFIG['face2d'] = CONFIG['face']
CONFIG['mpbody'] = {}
CONFIG['mpbody']['kintree'] = [
(0, 1),
(0, 4),
(1, 2),
(2, 3),
(3, 7),
(4, 5),
(5, 6),
(6, 8),
(9, 10),
(11, 12),
(11, 13),
(11, 23),
(12, 14),
(12, 24),
(13, 15),
(14, 16),
(15, 17),
(15, 19),
(15, 21),
(16, 18),
(16, 20),
(16, 22),
(17, 19),
(18, 20),
(23, 24),
(23, 25),
(24, 26),
(25, 27),
(26, 28),
(27, 29),
(27, 31),
(28, 30),
(28, 32),
(29, 31),
(30, 32)
]
CONFIG['mpbody']['nJoints'] = 33
CONFIG['mpbody']['colors'] = ['b', 'r', 'b', 'b', 'b', 'r', 'r', 'r', 'k', 'k', 'b', 'b', 'r', 'r', 'b', 'r',
'y', 'r', 'y', 'g', 'b', 'g', 'y', 'g', 'k', 'b', 'r', 'b', 'r', 'b', 'b', 'r', 'r', 'b', 'b']
CONFIG['mpface'] = {}
CONFIG['mpface']['kintree'] = [(270, 409), (176, 149), (37, 0), (84, 17), (318, 324), (293, 334), (386, 385), (7, 163), (33, 246), (17, 314), (374, 380), (251, 389), (390, 373), (267, 269), (295, 285), (389, 356), (173, 133), (33, 7), (377, 152), (158, 157), (405, 321), (54, 103), (263, 466), (324, 308), (67, 109), (409, 291), (157, 173), (454, 323), (388, 387), (78, 191), (148, 176), (311, 310), (39, 37), (249, 390), (144, 145), (402, 318), (80, 81), (310, 415), (153, 154), (384, 398), (397, 365), (234, 127), (103, 67), (282, 295), (338, 297), (378, 400), (127, 162), (321, 375), (375, 291), (317, 402), (81, 82), (154, 155), (91, 181), (334, 296), (297, 332), (269, 270), (150, 136), (109, 10), (356, 454), (58, 132), (312, 311), (152, 148), (415, 308), (161, 160), (296, 336), (65, 55), (61, 146), (78, 95), (380, 381), (398, 362), (361, 288), (246, 161), (162, 21), (0, 267), (82, 13), (132, 93), (314, 405), (10, 338), (178, 87), (387, 386), (381, 382), (70, 63), (61, 185), (14, 317), (105, 66), (300, 293), (382, 362), (88, 178), (185, 40), (46, 53), (284, 251), (400, 377), (136, 172), (323, 361), (13, 312), (21, 54), (172, 58), (373, 374), (163, 144), (276, 283), (53, 52), (365, 379), (379, 378), (146, 91), (263, 249), (283, 282), (87, 14), (145, 153), (155, 133), (93, 234), (66, 107), (95, 88), (159, 158), (52, 65), (332, 284), (40, 39), (191, 80), (63, 105), (181, 84), (466, 388), (149, 150), (288, 397), (160, 159), (385, 384)]
CONFIG['mpface']['nJoints'] = 468
CONFIG['mptotal'] = compose(['mpbody', 'hand', 'hand', 'mpface'])
CONFIG['bodyhandmpface'] = compose(['body25', 'hand', 'hand', 'mpface'])
CONFIG['iris'] = {
'nJoints': 10,
'kintree': [[0, 1], [1, 2], [2, 3], [3, 4]]
}
CONFIG['onepoint'] = {
'nJoints': 1,
'kintree': []
}
CONFIG['up'] = {
'nJoints': 79,
'kintree': []
}
CONFIG['ochuman'] = {
'nJoints': 19,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13], [14, 17], [15, 18], [17, 16], [18, 16]]
}
CONFIG['mpii'] = {
'nJoints': 16,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [2, 6], [3, 6], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [7, 12], [7, 13], \
[13, 14], [14, 15]],
'joint_names': ['rank', 'rkne', 'rhip', 'lhip', 'lkne', 'lank', 'pelv', 'thrx', 'neck', 'head', 'rwri', 'relb', 'rsho', 'lsho', 'lelb', 'lwri'],
}
CONFIG['h36mltri_17'] = {
'kintree': [(0, 1), (1, 2), (2, 6), (5, 4), (4, 3), (3, 6), (6, 7), (7, 8), (8, 16), (9, 16), (8, 12), (11, 12), (10, 11), (8, 13), (13, 14), (14, 15)],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': CONFIG['mpii']['joint_names'] + ['Neck/Nose'],
'nJoints': 17}
The provided code snippet includes necessary dependencies for implementing the `create_bbox_mv` function. Write a Python function `def create_bbox_mv(self, param, **kwargs)` to solve the following problem:
add new boundbox
Here is the function:
def create_bbox_mv(self, param, **kwargs):
"add new boundbox"
start, end = param['start'], param['end']
if start is None or end is None:
return 0
nv = param['select']['camera']
if nv == -1:
return 0
ranges = param['ranges']
start = (start[0]-ranges[nv][0], start[1]-ranges[nv][1])
end = (end[0]-ranges[nv][0], end[1]-ranges[nv][1])
annots = param['annots'][nv]['annots']
nowids = [d['personID'] for d in annots]
body = param['body']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if len(nowids) == 0:
maxID = 0
else:
maxID = max(nowids) + 1
data = {
'personID': maxID,
bbox_name: [start[0], start[1], end[0], end[1], 1],
kpts_name: [[0., 0., 0.] for _ in range(CONFIG[body]['nJoints'])]
}
annots.append(data)
param['start'], param['end'] = None, None | add new boundbox |
13,241 | import numpy as np
from ..dataset.config import CONFIG
The provided code snippet includes necessary dependencies for implementing the `delete_bbox` function. Write a Python function `def delete_bbox(self, param, **kwargs)` to solve the following problem:
delete the person
Here is the function:
def delete_bbox(self, param, **kwargs):
"delete the person"
bbox_name = param['bbox_name']
active = param['select'][bbox_name]
if active == -1:
return 0
else:
param['annots']['annots'].pop(active)
param['select'][bbox_name] = -1
return 0 | delete the person |
13,242 | import numpy as np
from ..dataset.config import CONFIG
The provided code snippet includes necessary dependencies for implementing the `delete_all_bbox` function. Write a Python function `def delete_all_bbox(self, param, **kwargs)` to solve the following problem:
delete the person
Here is the function:
def delete_all_bbox(self, param, **kwargs):
"delete the person"
bbox_name = param['bbox_name']
param['annots']['annots'] = []
param['select'][bbox_name] = -1
return 0 | delete the person |
13,243 | import numpy as np
from ..dataset.config import CONFIG
def callback_select_image(click, select, ranges, **kwargs):
if click is None:
return 0
ranges = np.array(ranges)
click = np.array(click).reshape(1, -1)
res = (click[:, 0]>ranges[:, 0])&(click[:, 0]<ranges[:, 2])&(click[:, 1]>ranges[:, 1])&(click[:, 1]<ranges[:, 3])
if res.any():
select['camera'] = int(np.where(res)[0]) | null |
13,244 | import numpy as np
from ..dataset.config import CONFIG
MIN_PIXEL = 50
def callback_select_bbox_center(click, annots, select, bbox_name, min_pixel=-1, **kwargs):
def callback_select_image_bbox(click, start, end, select, ranges, annots, bbox_name='bbox', **kwargs):
if click is None:
return 0
ranges = np.array(ranges)
click = np.array(click).reshape(1, -1)
res = (click[:, 0]>ranges[:, 0])&(click[:, 0]<ranges[:, 2])&(click[:, 1]>ranges[:, 1])&(click[:, 1]<ranges[:, 3])
if res.any():
select['camera'] = int(np.where(res)[0])
# 判断是否在人体bbox里面
nv = select['camera']
if nv == -1:
return 0
click_view = click[0] - ranges[nv][:2]
callback_select_bbox_center(click_view, annots[nv], select, bbox_name, min_pixel=MIN_PIXEL*2) | null |
13,245 | import numpy as np
from ..dataset.config import CONFIG
def findNearestPoint(points, click):
# points: (N, 2)
# click : [x, y]
click = np.array(click)
if len(points.shape) == 2:
click = click[None, :]
elif len(points.shape) == 3:
click = click[None, None, :]
dist = np.linalg.norm(points - click, axis=-1)
if dist.min() < MIN_PIXEL:
idx = np.unravel_index(dist.argmin(), dist.shape)
return True, idx
else:
return False, (-1, -1)
def callback_move_bbox(start, end, click, select, annots, ranges, bbox_name='bbox', **kwargs):
if start is None or end is None:
return 0
nv, nb = select['camera'], select[bbox_name]
if nv == -1 or nb == -1:
return 0
start = (start[0]-ranges[nv][0], start[1]-ranges[nv][1])
end = (end[0]-ranges[nv][0], end[1]-ranges[nv][1])
annots = annots[nv]['annots']
# 判断start是否在bbox的角点附近
i = select[bbox_name]
if select['corner'] == -1:
l, t, r, b = annots[i][bbox_name][:4]
corners = np.array([(l, t), (l, b), (r, t), (r, b), ((l+r)/2, (t+b)/2)])
flag, minid = findNearestPoint(corners, start)
if flag:
select['corner'] = minid[0]
else:
flag, minid = findNearestPoint(corners, end)
if flag:
select['corner'] = minid[0]
else:
select['corner'] = -1
if select['corner'] == -1:
return 0
x, y = end
# Move the corner
if select['corner'] < 4:
(i, j) = [(0, 1), (0, 3), (2, 1), (2, 3)][select['corner']]
data = annots[select[bbox_name]]
data[bbox_name][i] = x
data[bbox_name][j] = y
# Move the center
else:
bbox = annots[select[bbox_name]][bbox_name]
w = (bbox[2] - bbox[0])/2
h = (bbox[3] - bbox[1])/2
bbox[0] = x - w
bbox[1] = y - h
bbox[2] = x + w
bbox[3] = y + h | null |
13,246 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `set_face_unvisible` function. Write a Python function `def set_face_unvisible(self, param, **kwargs)` to solve the following problem:
set the face unvisible
Here is the function:
def set_face_unvisible(self, param, **kwargs):
"set the face unvisible"
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
for i in [15, 16, 17, 18]:
param['annots']['annots'][select[bbox_name]][kpts_name][i][-1] = 0. | set the face unvisible |
13,247 | import shutil
import cv2
import os
from tqdm import tqdm
from .basic_keyboard import print_help, register_keys
from .basic_visualize import plot_text, resize_to_screen, merge
from .basic_callback import point_callback, CV_KEY, get_key
from .bbox_callback import callback_select_image
from .file_utils import load_annot_to_tmp, read_json, save_annot
import copy
def get_valid_yn():
while True:
key = input('Saving this annotations? [y/n]')
if key in ['y', 'n']:
break
print('Please specify [y/n]')
return key | null |
13,248 | import shutil
import cv2
import os
from tqdm import tqdm
from .basic_keyboard import print_help, register_keys
from .basic_visualize import plot_text, resize_to_screen, merge
from .basic_callback import point_callback, CV_KEY, get_key
from .bbox_callback import callback_select_image
from .file_utils import load_annot_to_tmp, read_json, save_annot
import copy
def load_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('path', type=str)
parser.add_argument('--out', type=str, default=None)
parser.add_argument('--sub', type=str, nargs='+', default=[],
help='the sub folder lists when in video mode')
parser.add_argument('--from_file', type=str, default=None)
parser.add_argument('--image', type=str, default='images')
parser.add_argument('--annot', type=str, default='annots')
parser.add_argument('--body', type=str, default='body25')
parser.add_argument('--step', type=int, default=100)
parser.add_argument('--vis', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--ext', type=str, default='.jpg', choices=['.jpg', '.png'])
# new arguments
parser.add_argument('--start', type=int, default=0, help='frame start')
parser.add_argument('--end', type=int, default=100000, help='frame end')
return parser | null |
13,249 | import shutil
import cv2
import os
from tqdm import tqdm
from .basic_keyboard import print_help, register_keys
from .basic_visualize import plot_text, resize_to_screen, merge
from .basic_callback import point_callback, CV_KEY, get_key
from .bbox_callback import callback_select_image
from .file_utils import load_annot_to_tmp, read_json, save_annot
import copy
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
return data
def parse_parser(parser):
import os
from os.path import join
args = parser.parse_args()
if args.from_file is not None and args.from_file.endswith('.txt'):
assert os.path.exists(args.from_file), args.from_file
with open(args.from_file) as f:
datas = f.readlines()
subs = [d for d in datas if not d.startswith('#')]
subs = [d.rstrip().replace('https://www.youtube.com/watch?v=', '') for d in subs]
newsubs = sorted(os.listdir(join(args.path, args.image)))
clips = []
for newsub in newsubs:
if newsub in subs:
continue
if newsub.split('+')[0] in subs:
clips.append(newsub)
for sub in subs:
if os.path.exists(join(args.path, args.image, sub)):
clips.append(sub)
args.sub = sorted(clips)
elif args.from_file is not None and args.from_file.endswith('.json'):
data = read_json(args.from_file)
args.sub = sorted([v['vid'] for v in data])
elif len(args.sub) == 0:
if not os.path.exists(join(args.path, args.image)):
print('{} not exists, Please run extract_image first'.format(join(args.path, args.image)))
raise FileNotFoundError
subs = sorted(os.listdir(join(args.path, args.image)))
subs = [s for s in subs if os.path.isdir(join(args.path, args.image, s)) and not s.startswith('._')]
if len(subs) > 0 and subs[0].isdigit():
subs = sorted(subs, key=lambda x:int(x))
args.sub = subs
helps = """
Demo code for annotation:
- Input : {}
- => "{}"
- => {}
""".format(args.path, '", "'.join(args.sub), args.annot)
print(helps)
return args | null |
13,250 | from os.path import join
import os
from glob import glob
import numpy as np
from easymocap.dataset.config import coco17tobody25
from ..mytools.vis_base import merge, plot_keypoints_auto, plot_keypoints_total
from ..mytools.camera_utils import Undistort, unproj, read_cameras
from ..mytools.file_utils import read_json, write_keypoints3d, save_json
import cv2
from tqdm import tqdm
from ..estimator.wrapper_base import bbox_from_keypoints
from ..annotator.file_utils import save_annot
from ..mytools.debug_utils import log_time, myerror, mywarn, log
import time
smooth_bbox_cache = {}
def smooth_bbox(bbox, name, W=5):
if name not in smooth_bbox_cache.keys():
smooth_bbox_cache[name] = [bbox] * W
smooth_bbox_cache[name].append(bbox)
bbox_ = np.stack(smooth_bbox_cache[name][-W:] + [bbox])
bbox_mean = np.sum(bbox_[:, :4] * bbox_[:, 4:], axis=0)/(1e-5 + np.sum(bbox_[:, 4:], axis=0))
vel_mean = (bbox_[1:, :4] - bbox_[:-1, :4]).mean()
bbox_pred = bbox_mean[:4] + vel_mean * (W-1)//2
conf_mean = bbox_[:, 4].mean()
bbox_ = list(bbox_pred[:4]) + [conf_mean]
return bbox_ | null |
13,251 | from os.path import join
import os
from glob import glob
import numpy as np
from easymocap.dataset.config import coco17tobody25
from ..mytools.vis_base import merge, plot_keypoints_auto, plot_keypoints_total
from ..mytools.camera_utils import Undistort, unproj, read_cameras
from ..mytools.file_utils import read_json, write_keypoints3d, save_json
import cv2
from tqdm import tqdm
from ..estimator.wrapper_base import bbox_from_keypoints
from ..annotator.file_utils import save_annot
from ..mytools.debug_utils import log_time, myerror, mywarn, log
import time
def myerror(text):
myprint(text, 'error')
def get_allname(root0, subs, ranges, root, ext, **kwargs):
image_names = []
count = 0
for sub in subs:
imgnames = sorted(glob(join(root0, root, sub, '*'+ext)))
if len(imgnames) == 0:
myerror('No image found in {}'.format(join(root0, root, sub)))
continue
if ranges[1] == -1:
_ranges = [ranges[0], len(imgnames), ranges[-1]]
else:
_ranges = ranges
nv = subs.index(sub)
if len(imgnames) < _ranges[1]:
raise ValueError('The number of images in {} is less than the range: {} vs {}'.format(join(root0, root, sub), len(imgnames), _ranges[1]))
for nnf, nf in enumerate(range(*_ranges)):
image_names.append({
'sub': sub,
'index': count,
'frame': int(os.path.basename(imgnames[nf]).split('.')[0]),
'nv': subs.index(sub),
'nf': nnf,
'imgname': imgnames[nf],
})
count += 1
return image_names | null |
13,252 | from os.path import join
import os
from glob import glob
import numpy as np
from easymocap.dataset.config import coco17tobody25
from ..mytools.vis_base import merge, plot_keypoints_auto, plot_keypoints_total
from ..mytools.camera_utils import Undistort, unproj, read_cameras
from ..mytools.file_utils import read_json, write_keypoints3d, save_json
import cv2
from tqdm import tqdm
from ..estimator.wrapper_base import bbox_from_keypoints
from ..annotator.file_utils import save_annot
from ..mytools.debug_utils import log_time, myerror, mywarn, log
import time
def crop_image(img, bbox, crop_square=True):
l, t, r, b, c = bbox
if c <0.001: # consider the failed bbox
l, t = 0, 0
r, b = img.shape[1], img.shape[0]
if crop_square:
if b - t > r - l:
diff = (b - t) - (r - l)
l -= diff//2
r += diff//2
else:
diff = (r - l) - (b - t)
t -= diff//2
b += diff//2
l = max(0, int(l+0.5))
t = max(0, int(t+0.5))
r = min(img.shape[1], int(r+0.5))
b = min(img.shape[0], int(b+0.5))
crop_img = img[t:b, l:r, :]
if crop_square:
# 先padding黑边
if crop_img.shape[0] < crop_img.shape[1] - 1:
length = crop_img.shape[1] - crop_img.shape[0]
padding0 = np.zeros((length//2, crop_img.shape[1], 3), dtype=np.uint8)
padding1 = np.zeros((length - length//2, crop_img.shape[1], 3), dtype=np.uint8)
crop_img = np.concatenate([padding0, crop_img, padding1], axis=0)
elif crop_img.shape[0] > crop_img.shape[1] + 1:
length = crop_img.shape[0] - crop_img.shape[1]
padding0 = np.zeros((crop_img.shape[0], length//2, 3), dtype=np.uint8)
padding1 = np.zeros((crop_img.shape[0], length - length//2, 3), dtype=np.uint8)
crop_img = np.concatenate([padding0, crop_img, padding1], axis=1)
crop_img = cv2.resize(crop_img, (256, 256))
return crop_img | null |
13,253 | from os.path import join
import os
from glob import glob
import numpy as np
from easymocap.dataset.config import coco17tobody25
from ..mytools.vis_base import merge, plot_keypoints_auto, plot_keypoints_total
from ..mytools.camera_utils import Undistort, unproj, read_cameras
from ..mytools.file_utils import read_json, write_keypoints3d, save_json
import cv2
from tqdm import tqdm
from ..estimator.wrapper_base import bbox_from_keypoints
from ..annotator.file_utils import save_annot
from ..mytools.debug_utils import log_time, myerror, mywarn, log
import time
logo = cv2.imread(join(os.path.dirname(__file__), '..', '..', 'logo.png'), cv2.IMREAD_UNCHANGED)
def add_logo(img, logo_size=0.1):
H, W = img.shape[:2]
scale = H*logo_size / logo.shape[0]
logo_ = cv2.resize(logo, (int(logo.shape[1]*scale), int(logo.shape[0]*scale)), interpolation=cv2.INTER_NEAREST)
local = img[:logo_.shape[0], :logo_.shape[1], :]
mask = logo_[..., 3:]/255.
local = 1.*logo_[..., :3]*mask + local*(1.-mask)
local = local.astype(np.uint8)
img[:logo_.shape[0], :logo_.shape[1], :] = local
return img | null |
13,254 | from os.path import join
import os
from glob import glob
import numpy as np
from easymocap.dataset.config import coco17tobody25
from ..mytools.vis_base import merge, plot_keypoints_auto, plot_keypoints_total
from ..mytools.camera_utils import Undistort, unproj, read_cameras
from ..mytools.file_utils import read_json, write_keypoints3d, save_json
import cv2
from tqdm import tqdm
from ..estimator.wrapper_base import bbox_from_keypoints
from ..annotator.file_utils import save_annot
from ..mytools.debug_utils import log_time, myerror, mywarn, log
import time
def create_skeleton_model(nJoints):
config = {
'body_type': 'body25vis',
'joint_radius': 0.02,
'vis_type': 'cone',
'res': 20,
}
from ..visualize.skelmodel import SkelModelFast
if nJoints == 21:
config['body_type'] = 'handvis'
config['joint_radius'] = 0.005
return SkelModelFast(**config) | null |
13,255 | from ..annotator.file_utils import read_json
from .wrapper_base import check_result, create_annot_file, save_annot
from glob import glob
from os.path import join
from tqdm import tqdm
import os
import cv2
import numpy as np
def detect_frame(detector, img, pid=0, only_bbox=False):
lDetections = detector.detect([img], only_bbox=only_bbox)[0]
annots = []
for i in range(len(lDetections)):
annot = {
'bbox': [float(d) for d in lDetections[i]['bbox']],
'personID': pid + i,
'isKeyframe': False
}
if not only_bbox:
annot['keypoints'] = lDetections[i]['keypoints'].tolist()
annots.append(annot)
return annots | null |
13,256 | from ..annotator.file_utils import read_json
from .wrapper_base import check_result, create_annot_file, save_annot
from glob import glob
from os.path import join
from tqdm import tqdm
import os
import cv2
import numpy as np
def check_result(image_root, annot_root):
if os.path.exists(annot_root):
# check the number of images and keypoints
nimg = len(os.listdir(image_root))
nann = len(os.listdir(annot_root))
print('Check {} == {}'.format(nimg, nann))
if nimg == nann:
return True
return False
def create_annot_file(annotname, imgname):
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
height, width = img.shape[0], img.shape[1]
imgnamesep = imgname.split(os.sep)
filename = os.sep.join(imgnamesep[imgnamesep.index('images'):])
annot = {
'filename':filename,
'height':height,
'width':width,
'annots': [],
'isKeyframe': False
}
save_annot(annotname, annot)
return annot
def extract_bbox(image_root, annot_root, ext, **config):
force = config.pop('force')
if check_result(image_root, annot_root) and not force:
return 0
import torch
from .YOLOv4 import YOLOv4
device = torch.device('cuda') \
if torch.cuda.is_available() else torch.device('cpu')
detector = YOLOv4(device=device, **config)
imgnames = sorted(glob(join(image_root, '*'+ext)))
if len(imgnames) == 0:
ext = '.png'
imgnames = sorted(glob(join(image_root, '*'+ext)))
# run_yolo(image_root, )
for imgname in tqdm(imgnames, desc='{:10s}'.format(os.path.basename(annot_root))):
base = os.path.basename(imgname).replace(ext, '')
annotname = join(annot_root, base+'.json')
annot = create_annot_file(annotname, imgname)
image = cv2.imread(imgname)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
detections = detector.predict_single(image_rgb)
annots = []
pid = 0
for i in range(len(detections)):
annot_ = {
'bbox': [float(d) for d in detections[i]],
'isKeyframe': False
}
annot_['area'] = max(annot_['bbox'][2] - annot_['bbox'][0], annot_['bbox'][3] - annot_['bbox'][1])**2
annots.append(annot_)
annots.sort(key=lambda x:-x['area'])
# re-assign the person ID
for i in range(len(annots)):
annots[i]['personID'] = i + pid
annot['annots'] = annots
save_annot(annotname, annot) | null |
13,257 | from ..annotator.file_utils import read_json
from .wrapper_base import check_result, create_annot_file, save_annot
from glob import glob
from os.path import join
from tqdm import tqdm
import os
import cv2
import numpy as np
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
return data
def extract_hrnet(image_root, annot_root, ext, **config):
config.pop('force')
import torch
imgnames = sorted(glob(join(image_root, '*'+ext)))
import torch
device = torch.device('cuda') \
if torch.cuda.is_available() else torch.device('cpu')
from .HRNet import SimpleHRNet
estimator = SimpleHRNet(device=device, **config)
for imgname in tqdm(imgnames, desc='{:10s}'.format(os.path.basename(annot_root))):
base = os.path.basename(imgname).replace(ext, '')
annotname = join(annot_root, base+'.json')
annots = read_json(annotname)
detections = np.array([data['bbox'] for data in annots['annots']])
image = cv2.imread(imgname)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
points2d = estimator.predict(image_rgb, detections)
for i in range(detections.shape[0]):
annot_ = annots['annots'][i]
annot_['keypoints'] = points2d[i]
save_annot(annotname, annots) | null |
13,258 | from ..annotator.file_utils import read_json
from .wrapper_base import check_result, create_annot_file, save_annot
from glob import glob
from os.path import join
from tqdm import tqdm
import os
import cv2
import numpy as np
def create_annot_file(annotname, imgname):
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
height, width = img.shape[0], img.shape[1]
imgnamesep = imgname.split(os.sep)
filename = os.sep.join(imgnamesep[imgnamesep.index('images'):])
annot = {
'filename':filename,
'height':height,
'width':width,
'annots': [],
'isKeyframe': False
}
save_annot(annotname, annot)
return annot
def extract_yolo_hrnet(image_root, annot_root, ext, config_yolo, config_hrnet):
config_yolo.pop('ext', None)
imgnames = sorted(glob(join(image_root, '*{}'.format(ext))))
import torch
device = torch.device('cuda')
from .YOLOv4 import YOLOv4
device = torch.device('cuda') \
if torch.cuda.is_available() else torch.device('cpu')
detector = YOLOv4(device=device, **config_yolo)
from .HRNet import SimpleHRNet
estimator = SimpleHRNet(device=device, **config_hrnet)
for nf, imgname in enumerate(tqdm(imgnames, desc=os.path.basename(image_root))):
base = os.path.basename(imgname).replace(ext, '')
annotname = join(annot_root, base+'.json')
annot = create_annot_file(annotname, imgname)
img0 = cv2.imread(imgname)
annot = create_annot_file(annotname, imgname)
image = cv2.imread(imgname)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
detections = detector.predict_single(image_rgb)
# forward_hrnet
points2d = estimator.predict(image_rgb, detections)
annots = []
pid = 0
for i in range(len(detections)):
annot_ = {
'bbox': [float(d) for d in detections[i]],
'keypoints': points2d[i],
'isKeyframe': False
}
annot_['area'] = max(annot_['bbox'][2] - annot_['bbox'][0], annot_['bbox'][3] - annot_['bbox'][1])**2
annots.append(annot_)
annots.sort(key=lambda x:-x['area'])
# re-assign the person ID
for i in range(len(annots)):
annots[i]['personID'] = i + pid
annot['annots'] = annots
save_annot(annotname, annot) | null |
13,259 | import torch
import torch.nn as nn
import torchvision.models.resnet as resnet
import numpy as np
import math
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `rot6d_to_rotmat` function. Write a Python function `def rot6d_to_rotmat(x)` to solve the following problem:
Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices
Here is the function:
def rot6d_to_rotmat(x):
"""Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3,3) Batch of corresponding rotation matrices
"""
x = x.view(-1,3,2)
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1) | Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices |
13,260 | import torch
import torch.nn as nn
import torchvision.models.resnet as resnet
import numpy as np
import math
from torch.nn import functional as F
class Bottleneck(nn.Module):
""" Redefinition of Bottleneck residual block
Adapted from the official PyTorch implementation
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HMR(nn.Module):
""" SMPL Iterative Regressor with ResNet50 backbone
"""
def __init__(self, block, layers, smpl_mean_params):
self.inplanes = 64
super(HMR, self).__init__()
npose = 24 * 6
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion + npose + 13, 1024)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(1024, 1024)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(1024, npose)
self.decshape = nn.Linear(1024, 10)
self.deccam = nn.Linear(1024, 3)
nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
mean_params = np.load(smpl_mean_params)
init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)
init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3):
batch_size = x.shape[0]
if init_pose is None:
init_pose = self.init_pose.expand(batch_size, -1)
if init_shape is None:
init_shape = self.init_shape.expand(batch_size, -1)
if init_cam is None:
init_cam = self.init_cam.expand(batch_size, -1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
xf = self.avgpool(x4)
xf = xf.view(xf.size(0), -1)
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for i in range(n_iter):
xc = torch.cat([xf, pred_pose, pred_shape, pred_cam],1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = self.decpose(xc) + pred_pose
pred_shape = self.decshape(xc) + pred_shape
pred_cam = self.deccam(xc) + pred_cam
pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
return pred_rotmat, pred_shape, pred_cam
The provided code snippet includes necessary dependencies for implementing the `hmr` function. Write a Python function `def hmr(smpl_mean_params, pretrained=True, **kwargs)` to solve the following problem:
Constructs an HMR model with ResNet50 backbone. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def hmr(smpl_mean_params, pretrained=True, **kwargs):
""" Constructs an HMR model with ResNet50 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = HMR(Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs)
if pretrained:
resnet_imagenet = resnet.resnet50(pretrained=True)
model.load_state_dict(resnet_imagenet.state_dict(),strict=False)
return model | Constructs an HMR model with ResNet50 backbone. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
13,261 | import torch
import numpy as np
import cv2
from .models import hmr
The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize(tensor, mean, std, inplace: bool = False)` to solve the following problem:
Normalize a tensor image with mean and standard deviation. .. note:: This transform acts out of place by default, i.e., it does not mutates the input tensor. See :class:`~torchvision.transforms.Normalize` for more details. Args: tensor (Tensor): Tensor image of size (C, H, W) or (B, C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. inplace(bool,optional): Bool to make this operation inplace. Returns: Tensor: Normalized Tensor image.
Here is the function:
def normalize(tensor, mean, std, inplace: bool = False):
"""Normalize a tensor image with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
if tensor.ndim < 3:
raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
'{}.'.format(tensor.size()))
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
if (std == 0).any():
raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
if mean.ndim == 1:
mean = mean.view(-1, 1, 1)
if std.ndim == 1:
std = std.view(-1, 1, 1)
tensor.sub_(mean).div_(std)
return tensor | Normalize a tensor image with mean and standard deviation. .. note:: This transform acts out of place by default, i.e., it does not mutates the input tensor. See :class:`~torchvision.transforms.Normalize` for more details. Args: tensor (Tensor): Tensor image of size (C, H, W) or (B, C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. inplace(bool,optional): Bool to make this operation inplace. Returns: Tensor: Normalized Tensor image. |
13,262 | import torch
import numpy as np
import cv2
from .models import hmr
class constants:
FOCAL_LENGTH = 5000.
IMG_RES = 224
# Mean and standard deviation for normalizing input image
IMG_NORM_MEAN = [0.485, 0.456, 0.406]
IMG_NORM_STD = [0.229, 0.224, 0.225]
class Normalize(torch.nn.Module):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutate the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
def __init__(self, mean, std, inplace=False):
super().__init__()
self.mean = mean
self.std = std
self.inplace = inplace
def forward(self, tensor):
"""
Args:
tensor (Tensor): Tensor image to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
def crop(img, center, scale, res, rot=0, bias=0):
"""Crop image according to the supplied bounding box."""
# Upper left point
ul = np.array(transform([1, 1], center, scale, res, invert=1))-1
# Bottom right point
br = np.array(transform([res[0]+1,
res[1]+1], center, scale, res, invert=1))-1
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape) + bias
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1],
old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = cv2.resize(new_img, (res[0], res[1]))
return new_img
The provided code snippet includes necessary dependencies for implementing the `process_image` function. Write a Python function `def process_image(img, bbox, input_res=224)` to solve the following problem:
Read image, do preprocessing and possibly crop it according to the bounding box. If there are bounding box annotations, use them to crop the image. If no bounding box is specified but openpose detections are available, use them to get the bounding box.
Here is the function:
def process_image(img, bbox, input_res=224):
"""Read image, do preprocessing and possibly crop it according to the bounding box.
If there are bounding box annotations, use them to crop the image.
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
"""
img = img[:, :, ::-1].copy()
normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)
l, t, r, b = bbox[:4]
center = [(l+r)/2, (t+b)/2]
width = max(r-l, b-t)
scale = width/200.0
img = crop(img, center, scale, (input_res, input_res))
img = img.astype(np.float32) / 255.
img = torch.from_numpy(img).permute(2,0,1)
norm_img = normalize_img(img.clone())[None]
return img, norm_img | Read image, do preprocessing and possibly crop it according to the bounding box. If there are bounding box annotations, use them to crop the image. If no bounding box is specified but openpose detections are available, use them to get the bounding box. |
13,263 | import torch
import numpy as np
import cv2
from .models import hmr
def solve_translation(X, x, K):
A = np.zeros((2*X.shape[0], 3))
b = np.zeros((2*X.shape[0], 1))
fx, fy = K[0, 0], K[1, 1]
cx, cy = K[0, 2], K[1, 2]
for nj in range(X.shape[0]):
A[2*nj, 0] = 1
A[2*nj + 1, 1] = 1
A[2*nj, 2] = -(x[nj, 0] - cx)/fx
A[2*nj+1, 2] = -(x[nj, 1] - cy)/fy
b[2*nj, 0] = X[nj, 2]*(x[nj, 0] - cx)/fx - X[nj, 0]
b[2*nj+1, 0] = X[nj, 2]*(x[nj, 1] - cy)/fy - X[nj, 1]
A[2*nj:2*nj+2, :] *= x[nj, 2]
b[2*nj:2*nj+2, :] *= x[nj, 2]
trans = np.linalg.inv(A.T @ A) @ A.T @ b
return trans.T[0] | null |
13,264 | import torch
import numpy as np
import cv2
from .models import hmr
def estimate_translation_np(S, joints_2d, joints_conf, K):
def init_with_spin(body_model, spin_model, img, bbox, kpts, camera):
body_params = spin_model.forward(img.copy(), bbox)
body_params = body_model.check_params(body_params)
# only use body joints to estimation translation
nJoints = 15
keypoints3d = body_model(return_verts=False, return_tensor=False, **body_params)[0]
trans = estimate_translation_np(keypoints3d[:nJoints], kpts[:nJoints, :2], kpts[:nJoints, 2], camera['K'])
body_params['Th'] += trans[None, :]
# convert to world coordinate
Rhold = cv2.Rodrigues(body_params['Rh'])[0]
Thold = body_params['Th']
Rh = camera['R'].T @ Rhold
Th = (camera['R'].T @ (Thold.T - camera['T'])).T
body_params['Th'] = Th
body_params['Rh'] = cv2.Rodrigues(Rh)[0].reshape(1, 3)
vertices = body_model(return_verts=True, return_tensor=False, **body_params)[0]
keypoints3d = body_model(return_verts=False, return_tensor=False, **body_params)[0]
results = {'body_params': body_params, 'vertices': vertices, 'keypoints3d': keypoints3d}
return results | null |
13,265 | import numpy as np
import cv2
import mediapipe as mp
from ..mytools import Timer
The provided code snippet includes necessary dependencies for implementing the `bbox_from_keypoints` function. Write a Python function `def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.05, MIN_PIXEL=5)` to solve the following problem:
Get center and scale for bounding box from openpose detections.
Here is the function:
def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.05, MIN_PIXEL=5):
"""Get center and scale for bounding box from openpose detections."""
valid = keypoints[:,-1] > detection_thresh
if valid.sum() < 3:
return [0, 0, 100, 100, 0]
valid_keypoints = keypoints[valid][:,:-1]
center = (valid_keypoints.max(axis=0) + valid_keypoints.min(axis=0))/2
bbox_size = valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)
# adjust bounding box tightness
if bbox_size[0] < MIN_PIXEL or bbox_size[1] < MIN_PIXEL:
return [0, 0, 100, 100, 0]
bbox_size = bbox_size * rescale
bbox = [
center[0] - bbox_size[0]/2,
center[1] - bbox_size[1]/2,
center[0] + bbox_size[0]/2,
center[1] + bbox_size[1]/2,
keypoints[valid, 2].mean()
]
return bbox | Get center and scale for bounding box from openpose detections. |
13,266 | import numpy as np
import cv2
import mediapipe as mp
from ..mytools import Timer
class Detector:
def __init__(self, nViews, to_openpose, model_type, show=False, **cfg) -> None:
def to_array(pose, W, H, start=0):
def get_body(self, pose, W, H):
def get_hand(self, pose, W, H):
def get_face(self, pose, W, H):
def vis(self, image, annots, nv=0):
def process_body(self, data, results, image_width, image_height):
def process_hand(self, data, results, image_width, image_height):
def process_face(self, data, results, image_width, image_height, image=None):
def __call__(self, images):
def check_result(image_root, annot_root):
def extract_2d(image_root, annot_root, config, mode='holistic'):
from .wrapper_base import check_result, save_annot
force = config.pop('force')
if check_result(image_root, annot_root) and not force:
return 0
from glob import glob
from os.path import join
ext = config.pop('ext')
import os
from tqdm import tqdm
if mode == 'holistic' or mode == 'pose':
to_openpose = True
else:
to_openpose = False
detector = Detector(nViews=1, to_openpose=to_openpose, model_type=mode, show=False, **config)
imgnames = sorted(glob(join(image_root, '*'+ext)))
for imgname in tqdm(imgnames, desc='{:10s}'.format(os.path.basename(annot_root))):
base = os.path.basename(imgname).replace(ext, '')
annotname = join(annot_root, base+'.json')
image = cv2.imread(imgname)
annots = detector([image])[0]
annots['filename'] = os.sep.join(imgname.split(os.sep)[-2:])
save_annot(annotname, annots) | null |
13,267 | import sys
import os
import time
import math
import numpy as np
import itertools
import struct
import imghdr
def sigmoid(x):
return 1.0 / (np.exp(-x) + 1.) | null |
13,268 | import sys
import os
import time
import math
import numpy as np
import itertools
import struct
import imghdr
def softmax(x):
x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
x = x / np.expand_dims(x.sum(axis=1), axis=1)
return x | null |
13,269 | import sys
import os
import time
import math
import numpy as np
import itertools
import struct
import imghdr
def bbox_iou(box1, box2, x1y1x2y2=True):
# print('iou box1:', box1)
# print('iou box2:', box2)
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
mx = min(box1[0], box2[0])
Mx = max(box1[0] + w1, box2[0] + w2)
my = min(box1[1], box2[1])
My = max(box1[1] + h1, box2[1] + h2)
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea / uarea | null |
13,270 | import sys
import os
import time
import math
import numpy as np
import itertools
import struct
import imghdr
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
import cv2
img = np.copy(img)
colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
width = img.shape[1]
height = img.shape[0]
for i in range(len(boxes)):
box = boxes[i]
x1 = int(box[0] * width)
y1 = int(box[1] * height)
x2 = int(box[2] * width)
y2 = int(box[3] * height)
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)
if savename:
print("save plot results to %s" % savename)
cv2.imwrite(savename, img)
return img | null |
13,271 | import sys
import os
import time
import math
import numpy as np
import itertools
import struct
import imghdr
def read_truths(lab_path):
if not os.path.exists(lab_path):
return np.array([])
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem
return truths
else:
return np.array([]) | null |
13,272 | import sys
import os
import time
import math
import numpy as np
import itertools
import struct
import imghdr
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def post_processing(img, conf_thresh, nms_thresh, output):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
t1 = time.time()
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
t2 = time.time()
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3], ll_max_conf[k], ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
t3 = time.time()
print('-----------------------------------')
print(' max and argmax : %f' % (t2 - t1))
print(' nms : %f' % (t3 - t2))
print('Post processing total : %f' % (t3 - t1))
print('-----------------------------------')
return bboxes_batch | null |
13,273 | import sys
import os
import time
import math
import torch
import numpy as np
from torch.autograd import Variable
def get_region_boxes(boxes_and_confs):
# print('Getting boxes from boxes and confs ...')
boxes_list = []
confs_list = []
for item in boxes_and_confs:
boxes_list.append(item[0])
confs_list.append(item[1])
# boxes: [batch, num1 + num2 + num3, 1, 4]
# confs: [batch, num1 + num2 + num3, num_classes]
boxes = torch.cat(boxes_list, dim=1)
confs = torch.cat(confs_list, dim=1)
return [boxes, confs] | null |
13,274 | import sys
import os
import time
import math
import torch
import numpy as np
from torch.autograd import Variable
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix) | null |
13,275 | import sys
import os
import time
import math
import torch
import numpy as np
from torch.autograd import Variable
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
model.eval()
t0 = time.time()
if type(img) == np.ndarray and len(img.shape) == 3: # cv2 image
img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
elif type(img) == np.ndarray and len(img.shape) == 4:
img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
else:
print("unknow image type")
exit(-1)
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t1 = time.time()
output = model(img)
t2 = time.time()
print('-----------------------------------')
print(' Preprocess : %f' % (t1 - t0))
print(' Model Inference : %f' % (t2 - t1))
print('-----------------------------------')
return utils.post_processing(img, conf_thresh, nms_thresh, output) | null |
13,276 | import torch
from .torch_utils import convert2cpu
def parse_cfg(cfgfile):
blocks = []
fp = open(cfgfile, 'r')
block = None
line = fp.readline()
while line != '':
line = line.rstrip()
if line == '' or line[0] == '#':
line = fp.readline()
continue
elif line[0] == '[':
if block:
blocks.append(block)
block = dict()
block['type'] = line.lstrip('[').rstrip(']')
# set default value
if block['type'] == 'convolutional':
block['batch_normalize'] = 0
else:
key, value = line.split('=')
key = key.strip()
if key == 'type':
key = '_type'
value = value.strip()
block[key] = value
line = fp.readline()
if block:
blocks.append(block)
fp.close()
return blocks | null |
13,277 | import torch
from .torch_utils import convert2cpu
def print_cfg(blocks):
print('layer filters size input output');
prev_width = 416
prev_height = 416
prev_filters = 3
out_filters = []
out_widths = []
out_heights = []
ind = -2
for block in blocks:
ind = ind + 1
if block['type'] == 'net':
prev_width = int(block['width'])
prev_height = int(block['height'])
continue
elif block['type'] == 'convolutional':
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size - 1) // 2 if is_pad else 0
width = (prev_width + 2 * pad - kernel_size) // stride + 1
height = (prev_height + 2 * pad - kernel_size) // stride + 1
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width,
height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'maxpool':
pool_size = int(block['size'])
stride = int(block['stride'])
width = prev_width // stride
height = prev_height // stride
print('%5d %-6s %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'max', pool_size, pool_size, stride, prev_width, prev_height, prev_filters, width, height,
filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'avgpool':
width = 1
height = 1
print('%5d %-6s %3d x %3d x%4d -> %3d' % (
ind, 'avg', prev_width, prev_height, prev_filters, prev_filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'softmax':
print('%5d %-6s -> %3d' % (ind, 'softmax', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'cost':
print('%5d %-6s -> %3d' % (ind, 'cost', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'reorg':
stride = int(block['stride'])
filters = stride * stride * prev_filters
width = prev_width // stride
height = prev_height // stride
print('%5d %-6s / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'reorg', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'upsample':
stride = int(block['stride'])
filters = prev_filters
width = prev_width * stride
height = prev_height * stride
print('%5d %-6s * %d %3d x %3d x%4d -> %3d x %3d x%4d' % (
ind, 'upsample', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers]
if len(layers) == 1:
print('%5d %-6s %d' % (ind, 'route', layers[0]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert (prev_width == out_widths[layers[1]])
assert (prev_height == out_heights[layers[1]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
elif len(layers) == 4:
print('%5d %-6s %d %d %d %d' % (ind, 'route', layers[0], layers[1], layers[2], layers[3]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert (prev_width == out_widths[layers[1]] == out_widths[layers[2]] == out_widths[layers[3]])
assert (prev_height == out_heights[layers[1]] == out_heights[layers[2]] == out_heights[layers[3]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]] + out_filters[layers[2]] + out_filters[
layers[3]]
else:
print("route error !!! {} {} {}".format(sys._getframe().f_code.co_filename,
sys._getframe().f_code.co_name, sys._getframe().f_lineno))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] in ['region', 'yolo']:
print('%5d %-6s' % (ind, 'detection'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'shortcut':
from_id = int(block['from'])
from_id = from_id if from_id > 0 else from_id + ind
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
prev_width = out_widths[from_id]
prev_height = out_heights[from_id]
prev_filters = out_filters[from_id]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'connected':
filters = int(block['output'])
print('%5d %-6s %d -> %3d' % (ind, 'connected', prev_filters, filters))
prev_filters = filters
out_widths.append(1)
out_heights.append(1)
out_filters.append(prev_filters)
else:
print('unknown type %s' % (block['type'])) | null |
13,278 | import torch
from .torch_utils import convert2cpu
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]));
start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape));
start = start + num_w
return start | null |
13,279 | import torch
from .torch_utils import convert2cpu
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def save_conv(fp, conv_model):
if conv_model.bias.is_cuda:
convert2cpu(conv_model.bias.data).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
conv_model.bias.data.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp) | null |
13,280 | import torch
from .torch_utils import convert2cpu
def load_conv_bn(buf, start, conv_model, bn_model):
num_w = conv_model.weight.numel()
num_b = bn_model.bias.numel()
bn_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]));
start = start + num_b
bn_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_b]));
start = start + num_b
bn_model.running_mean.copy_(torch.from_numpy(buf[start:start + num_b]));
start = start + num_b
bn_model.running_var.copy_(torch.from_numpy(buf[start:start + num_b]));
start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]).reshape(conv_model.weight.data.shape));
start = start + num_w
return start | null |
13,281 | import torch
from .torch_utils import convert2cpu
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def save_conv_bn(fp, conv_model, bn_model):
if bn_model.bias.is_cuda:
convert2cpu(bn_model.bias.data).numpy().tofile(fp)
convert2cpu(bn_model.weight.data).numpy().tofile(fp)
convert2cpu(bn_model.running_mean).numpy().tofile(fp)
convert2cpu(bn_model.running_var).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
bn_model.bias.data.numpy().tofile(fp)
bn_model.weight.data.numpy().tofile(fp)
bn_model.running_mean.numpy().tofile(fp)
bn_model.running_var.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp) | null |
13,282 | import torch
from .torch_utils import convert2cpu
def load_fc(buf, start, fc_model):
num_w = fc_model.weight.numel()
num_b = fc_model.bias.numel()
fc_model.bias.data.copy_(torch.from_numpy(buf[start:start + num_b]));
start = start + num_b
fc_model.weight.data.copy_(torch.from_numpy(buf[start:start + num_w]));
start = start + num_w
return start | null |
13,283 | import torch
from .torch_utils import convert2cpu
def save_fc(fp, fc_model):
fc_model.bias.data.numpy().tofile(fp)
fc_model.weight.data.numpy().tofile(fp) | null |
13,284 | import torch.nn as nn
import torch.nn.functional as F
from .torch_utils import *
import math
import torch
from torch.autograd import Variable
def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
def build_targets(pred_boxes, target, anchors, num_anchors, num_classes, nH, nW, noobject_scale, object_scale,
sil_thresh, seen):
nB = target.size(0)
nA = num_anchors
nC = num_classes
anchor_step = len(anchors) / num_anchors
conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale
coord_mask = torch.zeros(nB, nA, nH, nW)
cls_mask = torch.zeros(nB, nA, nH, nW)
tx = torch.zeros(nB, nA, nH, nW)
ty = torch.zeros(nB, nA, nH, nW)
tw = torch.zeros(nB, nA, nH, nW)
th = torch.zeros(nB, nA, nH, nW)
tconf = torch.zeros(nB, nA, nH, nW)
tcls = torch.zeros(nB, nA, nH, nW)
nAnchors = nA * nH * nW
nPixels = nH * nW
for b in range(nB):
cur_pred_boxes = pred_boxes[b * nAnchors:(b + 1) * nAnchors].t()
cur_ious = torch.zeros(nAnchors)
for t in range(50):
if target[b][t * 5 + 1] == 0:
break
gx = target[b][t * 5 + 1] * nW
gy = target[b][t * 5 + 2] * nH
gw = target[b][t * 5 + 3] * nW
gh = target[b][t * 5 + 4] * nH
cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors, 1).t()
cur_ious = torch.max(cur_ious, bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
conf_mask[b][cur_ious > sil_thresh] = 0
if seen < 12800:
if anchor_step == 4:
tx = torch.FloatTensor(anchors).view(nA, anchor_step).index_select(1, torch.LongTensor([2])).view(1, nA, 1,
1).repeat(
nB, 1, nH, nW)
ty = torch.FloatTensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([2])).view(
1, nA, 1, 1).repeat(nB, 1, nH, nW)
else:
tx.fill_(0.5)
ty.fill_(0.5)
tw.zero_()
th.zero_()
coord_mask.fill_(1)
nGT = 0
nCorrect = 0
for b in range(nB):
for t in range(50):
if target[b][t * 5 + 1] == 0:
break
nGT = nGT + 1
best_iou = 0.0
best_n = -1
min_dist = 10000
gx = target[b][t * 5 + 1] * nW
gy = target[b][t * 5 + 2] * nH
gi = int(gx)
gj = int(gy)
gw = target[b][t * 5 + 3] * nW
gh = target[b][t * 5 + 4] * nH
gt_box = [0, 0, gw, gh]
for n in range(nA):
aw = anchors[anchor_step * n]
ah = anchors[anchor_step * n + 1]
anchor_box = [0, 0, aw, ah]
iou = bbox_iou(anchor_box, gt_box, x1y1x2y2=False)
if anchor_step == 4:
ax = anchors[anchor_step * n + 2]
ay = anchors[anchor_step * n + 3]
dist = pow(((gi + ax) - gx), 2) + pow(((gj + ay) - gy), 2)
if iou > best_iou:
best_iou = iou
best_n = n
elif anchor_step == 4 and iou == best_iou and dist < min_dist:
best_iou = iou
best_n = n
min_dist = dist
gt_box = [gx, gy, gw, gh]
pred_box = pred_boxes[b * nAnchors + best_n * nPixels + gj * nW + gi]
coord_mask[b][best_n][gj][gi] = 1
cls_mask[b][best_n][gj][gi] = 1
conf_mask[b][best_n][gj][gi] = object_scale
tx[b][best_n][gj][gi] = target[b][t * 5 + 1] * nW - gi
ty[b][best_n][gj][gi] = target[b][t * 5 + 2] * nH - gj
tw[b][best_n][gj][gi] = math.log(gw / anchors[anchor_step * best_n])
th[b][best_n][gj][gi] = math.log(gh / anchors[anchor_step * best_n + 1])
iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False) # best_iou
tconf[b][best_n][gj][gi] = iou
tcls[b][best_n][gj][gi] = target[b][t * 5]
if iou > 0.5:
nCorrect = nCorrect + 1
return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls | null |
13,285 | from .darknet2pytorch import Darknet
import cv2
import torch
from os.path import join
import os
import numpy as np
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names | null |
13,286 | from .darknet2pytorch import Darknet
import cv2
import torch
from os.path import join
import os
import numpy as np
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def post_processing(conf_thresh, nms_thresh, output):
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for class person
j = 0
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
bboxes = np.hstack([ll_box_array, ll_max_conf[:, None]])
bboxes_batch.append(bboxes)
return bboxes_batch | null |
13,287 | import torch.nn as nn
import torch.nn.functional as F
from .torch_utils import *
import torch
from torch.autograd import Variable
def yolo_forward(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1,
validation=False):
# Output would be invalid if it does not satisfy this assert
# assert (output.size(1) == (5 + num_classes) * num_anchors)
# print(output.size())
# Slice the second dimension (channel) of output into:
# [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ]
# And then into
# bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ]
batch = output.size(0)
H = output.size(2)
W = output.size(3)
bxy_list = []
bwh_list = []
det_confs_list = []
cls_confs_list = []
for i in range(num_anchors):
begin = i * (5 + num_classes)
end = (i + 1) * (5 + num_classes)
bxy_list.append(output[:, begin : begin + 2])
bwh_list.append(output[:, begin + 2 : begin + 4])
det_confs_list.append(output[:, begin + 4 : begin + 5])
cls_confs_list.append(output[:, begin + 5 : end])
# Shape: [batch, num_anchors * 2, H, W]
bxy = torch.cat(bxy_list, dim=1)
# Shape: [batch, num_anchors * 2, H, W]
bwh = torch.cat(bwh_list, dim=1)
# Shape: [batch, num_anchors, H, W]
det_confs = torch.cat(det_confs_list, dim=1)
# Shape: [batch, num_anchors * H * W]
det_confs = det_confs.view(batch, num_anchors * H * W)
# Shape: [batch, num_anchors * num_classes, H, W]
cls_confs = torch.cat(cls_confs_list, dim=1)
# Shape: [batch, num_anchors, num_classes, H * W]
cls_confs = cls_confs.view(batch, num_anchors, num_classes, H * W)
# Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes]
cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(batch, num_anchors * H * W, num_classes)
# Apply sigmoid(), exp() and softmax() to slices
#
bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1)
bwh = torch.exp(bwh)
det_confs = torch.sigmoid(det_confs)
cls_confs = torch.sigmoid(cls_confs)
# Prepare C-x, C-y, P-w, P-h (None of them are torch related)
grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, W - 1, W), axis=0).repeat(H, 0), axis=0), axis=0)
grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, H - 1, H), axis=1).repeat(W, 1), axis=0), axis=0)
# grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1)
# grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W)
anchor_w = []
anchor_h = []
for i in range(num_anchors):
anchor_w.append(anchors[i * 2])
anchor_h.append(anchors[i * 2 + 1])
device = None
cuda_check = output.is_cuda
if cuda_check:
device = output.get_device()
bx_list = []
by_list = []
bw_list = []
bh_list = []
# Apply C-x, C-y, P-w, P-h
for i in range(num_anchors):
ii = i * 2
# Shape: [batch, 1, H, W]
bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
bw = bwh[:, ii : ii + 1] * anchor_w[i]
# Shape: [batch, 1, H, W]
bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i]
bx_list.append(bx)
by_list.append(by)
bw_list.append(bw)
bh_list.append(bh)
########################################
# Figure out bboxes from slices #
########################################
# Shape: [batch, num_anchors, H, W]
bx = torch.cat(bx_list, dim=1)
# Shape: [batch, num_anchors, H, W]
by = torch.cat(by_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bw = torch.cat(bw_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bh = torch.cat(bh_list, dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
bx_bw = torch.cat((bx, bw), dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
by_bh = torch.cat((by, bh), dim=1)
# normalize coordinates to [0, 1]
bx_bw /= W
by_bh /= H
# Shape: [batch, num_anchors * H * W, 1]
bx = bx_bw[:, :num_anchors].view(batch, num_anchors * H * W, 1)
by = by_bh[:, :num_anchors].view(batch, num_anchors * H * W, 1)
bw = bx_bw[:, num_anchors:].view(batch, num_anchors * H * W, 1)
bh = by_bh[:, num_anchors:].view(batch, num_anchors * H * W, 1)
bx1 = bx - bw * 0.5
by1 = by - bh * 0.5
bx2 = bx1 + bw
by2 = by1 + bh
# Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4]
boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(batch, num_anchors * H * W, 1, 4)
# boxes = boxes.repeat(1, 1, num_classes, 1)
# boxes: [batch, num_anchors * H * W, 1, 4]
# cls_confs: [batch, num_anchors * H * W, num_classes]
# det_confs: [batch, num_anchors * H * W]
det_confs = det_confs.view(batch, num_anchors * H * W, 1)
confs = cls_confs * det_confs
# boxes: [batch, num_anchors * H * W, 1, 4]
# confs: [batch, num_anchors * H * W, num_classes]
return boxes, confs | null |
13,288 | import torch.nn as nn
import torch.nn.functional as F
from .torch_utils import *
import torch
from torch.autograd import Variable
def yolo_forward_dynamic(output, conf_thresh, num_classes, anchors, num_anchors, scale_x_y, only_objectness=1,
validation=False):
# Output would be invalid if it does not satisfy this assert
# assert (output.size(1) == (5 + num_classes) * num_anchors)
# print(output.size())
# Slice the second dimension (channel) of output into:
# [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ]
# And then into
# bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ]
# batch = output.size(0)
# H = output.size(2)
# W = output.size(3)
bxy_list = []
bwh_list = []
det_confs_list = []
cls_confs_list = []
for i in range(num_anchors):
begin = i * (5 + num_classes)
end = (i + 1) * (5 + num_classes)
bxy_list.append(output[:, begin : begin + 2])
bwh_list.append(output[:, begin + 2 : begin + 4])
det_confs_list.append(output[:, begin + 4 : begin + 5])
cls_confs_list.append(output[:, begin + 5 : end])
# Shape: [batch, num_anchors * 2, H, W]
bxy = torch.cat(bxy_list, dim=1)
# Shape: [batch, num_anchors * 2, H, W]
bwh = torch.cat(bwh_list, dim=1)
# Shape: [batch, num_anchors, H, W]
det_confs = torch.cat(det_confs_list, dim=1)
# Shape: [batch, num_anchors * H * W]
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3))
# Shape: [batch, num_anchors * num_classes, H, W]
cls_confs = torch.cat(cls_confs_list, dim=1)
# Shape: [batch, num_anchors, num_classes, H * W]
cls_confs = cls_confs.view(output.size(0), num_anchors, num_classes, output.size(2) * output.size(3))
# Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes]
cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(output.size(0), num_anchors * output.size(2) * output.size(3), num_classes)
# Apply sigmoid(), exp() and softmax() to slices
#
bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1)
bwh = torch.exp(bwh)
det_confs = torch.sigmoid(det_confs)
cls_confs = torch.sigmoid(cls_confs)
# Prepare C-x, C-y, P-w, P-h (None of them are torch related)
grid_x = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(3) - 1, output.size(3)), axis=0).repeat(output.size(2), 0), axis=0), axis=0)
grid_y = np.expand_dims(np.expand_dims(np.expand_dims(np.linspace(0, output.size(2) - 1, output.size(2)), axis=1).repeat(output.size(3), 1), axis=0), axis=0)
# grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1)
# grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W)
anchor_w = []
anchor_h = []
for i in range(num_anchors):
anchor_w.append(anchors[i * 2])
anchor_h.append(anchors[i * 2 + 1])
device = None
cuda_check = output.is_cuda
if cuda_check:
device = output.get_device()
bx_list = []
by_list = []
bw_list = []
bh_list = []
# Apply C-x, C-y, P-w, P-h
for i in range(num_anchors):
ii = i * 2
# Shape: [batch, 1, H, W]
bx = bxy[:, ii : ii + 1] + torch.tensor(grid_x, device=device, dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
by = bxy[:, ii + 1 : ii + 2] + torch.tensor(grid_y, device=device, dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
bw = bwh[:, ii : ii + 1] * anchor_w[i]
# Shape: [batch, 1, H, W]
bh = bwh[:, ii + 1 : ii + 2] * anchor_h[i]
bx_list.append(bx)
by_list.append(by)
bw_list.append(bw)
bh_list.append(bh)
########################################
# Figure out bboxes from slices #
########################################
# Shape: [batch, num_anchors, H, W]
bx = torch.cat(bx_list, dim=1)
# Shape: [batch, num_anchors, H, W]
by = torch.cat(by_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bw = torch.cat(bw_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bh = torch.cat(bh_list, dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
bx_bw = torch.cat((bx, bw), dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
by_bh = torch.cat((by, bh), dim=1)
# normalize coordinates to [0, 1]
bx_bw /= output.size(3)
by_bh /= output.size(2)
# Shape: [batch, num_anchors * H * W, 1]
bx = bx_bw[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
by = by_bh[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bw = bx_bw[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bh = by_bh[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bx1 = bx - bw * 0.5
by1 = by - bh * 0.5
bx2 = bx1 + bw
by2 = by1 + bh
# Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4]
boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(output.size(0), num_anchors * output.size(2) * output.size(3), 1, 4)
# boxes = boxes.repeat(1, 1, num_classes, 1)
# boxes: [batch, num_anchors * H * W, 1, 4]
# cls_confs: [batch, num_anchors * H * W, num_classes]
# det_confs: [batch, num_anchors * H * W]
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
confs = cls_confs * det_confs
# boxes: [batch, num_anchors * H * W, 1, 4]
# confs: [batch, num_anchors * H * W, num_classes]
return boxes, confs | null |
13,289 | from os.path import join
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from .hrnet import HRNet
COCO17_IN_BODY25 = [0,16,15,18,17,5,2,6,3,7,4,12,9,13,10,14,11]
import math
def coco17tobody25(points2d):
kpts = np.zeros((points2d.shape[0], 25, 3))
kpts[:, COCO17_IN_BODY25, :2] = points2d[:, :, :2]
kpts[:, COCO17_IN_BODY25, 2:3] = points2d[:, :, 2:3]
kpts[:, 8, :2] = kpts[:, [9, 12], :2].mean(axis=1)
kpts[:, 8, 2] = kpts[:, [9, 12], 2].min(axis=1)
kpts[:, 1, :2] = kpts[:, [2, 5], :2].mean(axis=1)
kpts[:, 1, 2] = kpts[:, [2, 5], 2].min(axis=1)
# 需要交换一下
# kpts = kpts[:, :, [1,0,2]]
return kpts | null |
13,290 | from os.path import join
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from .hrnet import HRNet
tmp_size = sigma * 3
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
return g, tmp_siz
import math
def generate_gauss(sigma):
tmp_size = sigma * 3
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
return g, tmp_size | null |
13,291 | from os.path import join
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from .hrnet import HRNet
import math
The provided code snippet includes necessary dependencies for implementing the `box_to_center_scale` function. Write a Python function `def box_to_center_scale(box, model_image_width, model_image_height, scale_factor=1.25)` to solve the following problem:
convert a box to center,scale information required for pose transformation Parameters ---------- box : list of tuple list of length 2 with two tuples of floats representing bottom left and top right corner of a box model_image_width : int model_image_height : int Returns ------- (numpy array, numpy array) Two numpy arrays, coordinates for the center of the box and the scale of the box
Here is the function:
def box_to_center_scale(box, model_image_width, model_image_height, scale_factor=1.25):
"""convert a box to center,scale information required for pose transformation
Parameters
----------
box : list of tuple
list of length 2 with two tuples of floats representing
bottom left and top right corner of a box
model_image_width : int
model_image_height : int
Returns
-------
(numpy array, numpy array)
Two numpy arrays, coordinates for the center of the box and the scale of the box
"""
center = np.zeros((2), dtype=np.float32)
bottom_left_corner = (box[0], box[1])
top_right_corner = (box[2], box[3])
box_width = top_right_corner[0]-bottom_left_corner[0]
box_height = top_right_corner[1]-bottom_left_corner[1]
bottom_left_x = bottom_left_corner[0]
bottom_left_y = bottom_left_corner[1]
center[0] = bottom_left_x + box_width * 0.5
center[1] = bottom_left_y + box_height * 0.5
aspect_ratio = model_image_width * 1.0 / model_image_height
pixel_std = 200
if box_width > aspect_ratio * box_height:
box_height = box_width * 1.0 / aspect_ratio
elif box_width < aspect_ratio * box_height:
box_width = box_height * aspect_ratio
scale = np.array(
[box_width * 1.0 / pixel_std, box_height * 1.0 / pixel_std],
dtype=np.float32)
scale = scale * scale_factor
return center, scale | convert a box to center,scale information required for pose transformation Parameters ---------- box : list of tuple list of length 2 with two tuples of floats representing bottom left and top right corner of a box model_image_width : int model_image_height : int Returns ------- (numpy array, numpy array) Two numpy arrays, coordinates for the center of the box and the scale of the box |
13,292 | from os.path import join
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from .hrnet import HRNet
import math
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2] | null |
13,293 | from os.path import join
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from .hrnet import HRNet
size = 2 * tmp_size + 1
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
config_ = {'kintree': [[1, 0], [2, 0], [3, 1], [4, 2], [5, 0], [6, 0], [7, 5], [8, 6], [9, 7], [10, 8], [11, 5], [12, 6], [13, 11], [
14, 12], [15, 13], [16, 14], [6, 5], [12, 11]], 'color': ['g', 'r', 'g', 'r', 'g', 'r', 'g', 'r', 'g', 'r', 'g', 'r', 'g', 'r', 'g', 'r', 'k', 'k']}
colors_table = {
# colorblind/print/copy safe:
'_blue': [0.65098039, 0.74117647, 0.85882353],
'_pink': [.9, .7, .7],
'_mint': [ 166/255., 229/255., 204/255.],
'_mint2': [ 202/255., 229/255., 223/255.],
'_green': [ 153/255., 216/255., 201/255.],
'_green2': [ 171/255., 221/255., 164/255.],
'_red': [ 251/255., 128/255., 114/255.],
'_orange': [ 253/255., 174/255., 97/255.],
'_yellow': [ 250/255., 230/255., 154/255.],
'r':[255/255,0,0],
'g':[0,255/255,0],
'b':[0,0,255/255],
'k':[0,0,0],
'y':[255/255,255/255,0],
'purple':[128/255,0,128/255]
}
import math
The provided code snippet includes necessary dependencies for implementing the `save_batch_heatmaps` function. Write a Python function `def save_batch_heatmaps(batch_image, batch_heatmaps, file_name, normalize=True)` to solve the following problem:
batch_image: [batch_size, channel, height, width] batch_heatmaps: ['batch_size, num_joints, height, width] file_name: saved file name
Here is the function:
def save_batch_heatmaps(batch_image, batch_heatmaps, file_name,
normalize=True):
'''
batch_image: [batch_size, channel, height, width]
batch_heatmaps: ['batch_size, num_joints, height, width]
file_name: saved file name
'''
if normalize:
batch_image = batch_image.clone()
min = float(batch_image.min())
max = float(batch_image.max())
batch_image.add_(-min).div_(max - min + 1e-5)
batch_size = batch_heatmaps.size(0)
num_joints = batch_heatmaps.size(1)
heatmap_height = batch_heatmaps.size(2)
heatmap_width = batch_heatmaps.size(3)
grid_image = np.zeros((batch_size*heatmap_height,
(num_joints+2)*heatmap_width,
3),
dtype=np.uint8)
preds, maxvals = get_max_preds(batch_heatmaps.detach().cpu().numpy())
for i in range(batch_size):
image = batch_image[i].mul(255)\
.clamp(0, 255)\
.byte()\
.permute(1, 2, 0)\
.cpu().numpy()
heatmaps = batch_heatmaps[i].mul(255)\
.clamp(0, 255)\
.byte()\
.cpu().numpy()
resized_image = cv2.resize(image,
(int(heatmap_width), int(heatmap_height)))
resized_image_copy = resized_image.copy()
height_begin = heatmap_height * i
height_end = heatmap_height * (i + 1)
for ip in range(len(config_['kintree'])):
src, dst = config_['kintree'][ip]
c = config_['color'][ip]
if maxvals[i][src] < 0.1 or maxvals[i][dst] < 0.1:
continue
plot_line(resized_image_copy, preds[i][src], preds[i][dst], colors_table[c], 1)
for j in range(num_joints):
cv2.circle(resized_image,
(int(preds[i][j][0]), int(preds[i][j][1])),
1, [0, 0, 255], 1)
heatmap = heatmaps[j, :, :]
mask = (heatmap > 0.1)[:,:,None]
colored_heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
masked_image = (colored_heatmap*0.7 + resized_image*0.3)*mask + resized_image*(1-mask)
cv2.circle(masked_image,
(int(preds[i][j][0]), int(preds[i][j][1])),
1, [0, 0, 255], 1)
width_begin = heatmap_width * (j+2)
width_end = heatmap_width * (j+2+1)
grid_image[height_begin:height_end, width_begin:width_end, :] = \
masked_image
# grid_image[height_begin:height_end, width_begin:width_end, :] = \
# colored_heatmap*0.7 + resized_image*0.3
grid_image[height_begin:height_end, 0:heatmap_width, :] = resized_image
grid_image[height_begin:height_end, heatmap_width:heatmap_width+heatmap_width, :] = resized_image_copy
cv2.imwrite(file_name, grid_image) | batch_image: [batch_size, channel, height, width] batch_heatmaps: ['batch_size, num_joints, height, width] file_name: saved file name |
13,294 | from os.path import join
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from .hrnet import HRNet
def get_max_preds(batch_heatmaps):
def transform_preds(coords, center, scale, rot, output_size):
import math
def get_final_preds(batch_heatmaps, center, scale, rot=None, flip=None):
coords, maxvals = get_max_preds(batch_heatmaps)
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
# post-processing
if True:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:
diff = np.array(
[
hm[py][px+1] - hm[py][px-1],
hm[py+1][px]-hm[py-1][px]
]
)
coords[n][p] += np.sign(diff) * .25
preds = coords.copy()
# Transform back
for i in range(coords.shape[0]):
if flip is not None:
if flip[i]:
coords[i, :, 0] = heatmap_width - 1 - coords[i, :, 0]
if rot is None:
_rot = 0
else:
_rot = rot[i]
preds[i] = transform_preds(
coords[i], center[i], scale[i], _rot, [heatmap_width, heatmap_height]
)
return preds, maxvals | null |
13,295 | from os.path import join
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from .hrnet import HRNet
gauss =
import math
def get_gaussian_maps(net_out, keypoints, sigma):
radius, kernel = gauss[sigma]['radius'], gauss[sigma]['kernel']
weights = np.ones(net_out.shape, dtype=np.float32)
for i in range(weights.shape[0]):
for nj in range(weights.shape[1]):
if keypoints[i][nj][2] < 0:
weights[i][nj] = 0
continue
elif keypoints[i][nj][2] < 0.01:
weights[i][nj] = 0
continue
weights[i][nj] = 0
mu_x, mu_y = keypoints[i][nj][:2]
mu_x, mu_y = int(mu_x + 0.5), int(mu_y + 0.5)
# Usable gaussian range
ul = [mu_x - radius, mu_y - radius]
br = [mu_x + radius + 1, mu_y + radius + 1]
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], weights.shape[3]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], weights.shape[2]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], weights.shape[3])
img_y = max(0, ul[1]), min(br[1], weights.shape[2])
weights[i][nj][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
kernel[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return weights | null |
13,296 | import os
import shutil
from tqdm import tqdm
from .wrapper_base import bbox_from_keypoints, create_annot_file, check_result
from ..mytools import read_json
from ..annotator.file_utils import save_annot
from os.path import join
import numpy as np
import cv2
from glob import glob
from multiprocessing import Process
def run_openpose(image_root, annot_root, config):
def convert_from_openpose(src, dst, image_root, ext):
global_tasks = []
def check_result(image_root, annot_root):
def extract_2d(image_root, annot_root, tmp_root, config):
if check_result(image_root, annot_root):
return global_tasks
if not check_result(image_root, tmp_root):
run_openpose(image_root, tmp_root, config)
# TODO: add current task to global_tasks
thread = Process(target=convert_from_openpose,
args=(tmp_root, annot_root, image_root, config['ext'])) # 应该不存在任何数据竞争
thread.start()
global_tasks.append(thread)
return global_tasks | null |
13,297 | import os
import shutil
from tqdm import tqdm
from .wrapper_base import bbox_from_keypoints, create_annot_file, check_result
from ..mytools import read_json
from ..annotator.file_utils import save_annot
from os.path import join
import numpy as np
import cv2
from glob import glob
from multiprocessing import Process
def get_crop(image, bbox, rot, scale=1.2):
l, t, r, b, _ = bbox
cx = (l+r)/2
cy = (t+b)/2
wx = (r-l)*scale/2
wy = (b-t)*scale/2
l = cx - wx
r = cx + wx
t = cy - wy
b = cy + wy
l = max(0, int(l+0.5))
t = max(0, int(t+0.5))
r = min(image.shape[1], int(r+0.5))
b = min(image.shape[0], int(b+0.5))
crop = image[t:b, l:r].copy()
crop = np.ascontiguousarray(crop)
# rotate the image
if rot == 180:
crop = cv2.flip(crop, -1)
return crop, (l, t) | null |
13,298 | import os
import shutil
from tqdm import tqdm
from .wrapper_base import bbox_from_keypoints, create_annot_file, check_result
from ..mytools import read_json
from ..annotator.file_utils import save_annot
from os.path import join
import numpy as np
import cv2
from glob import glob
from multiprocessing import Process
def transoform_foot(crop_shape, start, rot, keypoints, kpts_old=None):
l, t = start
if rot == 180:
keypoints[..., 0] = crop_shape[1] - keypoints[..., 0] - 1
keypoints[..., 1] = crop_shape[0] - keypoints[..., 1] - 1
keypoints[..., 0] += l
keypoints[..., 1] += t
if kpts_old is None:
kpts_op = keypoints[0]
return kpts_op
# 选择最好的
kpts_np = np.array(kpts_old)
dist = np.linalg.norm(kpts_np[None, :15, :2] - keypoints[:, :15, :2], axis=-1)
conf = np.minimum(kpts_np[None, :15, 2], keypoints[:, :15, 2])
dist = (dist * conf).sum(axis=-1)/conf.sum(axis=-1)*conf.shape[1]/(conf>0).sum(axis=-1)
best = dist.argmin()
kpts_op = keypoints[best]
# TODO:判断一下关键点
# 这里以HRNet的估计为准
# WARN: disable feet
# 判断OpenPose的脚与HRNet的脚是否重合
if (kpts_np[[11, 14], -1] > 0.3).all() and (kpts_op[[11, 14], -1] > 0.3).all():
dist_ll = np.linalg.norm(kpts_np[11, :2] - kpts_op[11, :2])
dist_rr = np.linalg.norm(kpts_np[14, :2] - kpts_op[14, :2])
dist_lr = np.linalg.norm(kpts_np[11, :2] - kpts_op[14, :2])
dist_rl = np.linalg.norm(kpts_np[14, :2] - kpts_op[11, :2])
if dist_lr < dist_ll and dist_rl < dist_rr:
kpts_op[[19, 20, 21, 22, 23, 24]] = kpts_op[[22, 23, 24, 19, 20, 21]]
# if (kpts_np[[11, 14], -1] > 0.3).all() and (kpts_op[[19, 22], -1] > 0.3).all():
# if np.linalg.norm(kpts_op[19, :2] - kpts_np[11, :2]) \
# < np.linalg.norm(kpts_op[19, :2] - kpts_np[14, :2])\
# and np.linalg.norm(kpts_op[22, :2] - kpts_np[11, :2]) \
# > np.linalg.norm(kpts_op[22, :2] - kpts_np[14, :2]):
# kpts_op[[19, 20, 21, 22, 23, 24]] = kpts_op[[22, 23, 24, 19, 20, 21]]
# print('[info] swap left and right')
# 直接选择第一个
kpts_np[19:] = kpts_op[19:]
return kpts_np | null |
13,299 | import os
import shutil
from tqdm import tqdm
from .wrapper_base import bbox_from_keypoints, create_annot_file, check_result
from ..mytools import read_json
from ..annotator.file_utils import save_annot
from os.path import join
import numpy as np
import cv2
from glob import glob
from multiprocessing import Process
def filter_feet(kpts):
# 判断左脚
if (kpts[[13, 14, 19], -1]>0).all():
l_feet = ((kpts[[19,20,21],-1]>0)*np.linalg.norm(kpts[[19, 20, 21], :2] - kpts[14, :2], axis=-1)).max()
l_leg = np.linalg.norm(kpts[13, :2] - kpts[14, :2])
if l_leg < 1.5 * l_feet:
kpts[[19, 20, 21]] = 0.
print('[LOG] remove left ankle {} < {}'.format(l_leg, l_feet))
# 判断右脚
if (kpts[[10, 11], -1]>0).all():
l_feet = ((kpts[[22, 23, 24],-1]>0)*np.linalg.norm(kpts[[22, 23, 24], :2] - kpts[11, :2], axis=-1)).max()
l_leg = np.linalg.norm(kpts[10, :2] - kpts[11, :2])
if l_leg < 1.5 * l_feet:
kpts[[22, 23, 24]] = 0.
print('[LOG] remove right ankle {} < {}'.format(l_leg, l_feet))
return kpts | null |
13,300 | import numpy as np
def dist_pl(query_points, line, moment):
moment_q = moment - np.cross(query_points, line)
dist = np.linalg.norm(moment_q, axis=1)
return dist | null |
13,301 | import numpy as np
def reciprocal_product(l1, m1, l2, m2):
l1 = l1[:, None]
m1 = m1[:, None]
l2 = l2[None, :]
m2 = m2[None, :]
product = np.sum(l1*m2, axis=2) + np.sum(l2*m1, axis=2)
return np.abs(product) | null |
13,302 | import numpy as np
def dist_pl_pointwise(p0, p1):
moment_q = p1[..., 3:6] - np.cross(p0[..., :3], p1[..., :3])
dist = np.linalg.norm(moment_q, axis=-1)
return dist | null |
13,303 | import numpy as np
def dist_ll_pointwise(p0, p1):
product = np.sum(p0[..., :3] * p1[..., 3:6], axis=-1) + np.sum(p1[..., :3] * p0[..., 3:6], axis=-1)
return np.abs(product)
def dist_ll_pointwise_conf(p0, p1):
dist = dist_ll_pointwise(p0, p1)
conf = np.sqrt(p0[..., -1] * p1[..., -1])
dist = np.sum(dist*conf, axis=-1)/(1e-5 + conf.sum(axis=-1))
dist[conf.sum(axis=-1)<0.1] = 1e5
return dist | null |
13,304 | import numpy as np
def plucker_from_pp(point1, point2):
line = point2 - point1
return plucker_from_pl(point1, line)
def computeRay(keypoints2d, invK, R, T):
# 将点转为世界坐标系下plucker坐标
# points: (nJoints, 3)
# invK: (3, 3)
# R: (3, 3)
# T: (3, 1)
# cam_center: (3, 1)
if len(keypoints2d.shape) == 3:
keypoints2d = keypoints2d[0]
conf = keypoints2d[..., -1:]
cam_center = - R.T @ T
N = keypoints2d.shape[0]
kp_pixel = np.hstack([keypoints2d[..., :2], np.ones_like(conf)])
kp_all_3d = (kp_pixel @ invK.T - T.T) @ R
l, m = plucker_from_pp(cam_center.T, kp_all_3d)
res = np.hstack((l, m, conf))
# 兼容cpp版本,所以补一个维度
return res[None, :, :] | null |
13,305 | import numpy as np
def plucker_from_pp(point1, point2):
def computeRaynd(keypoints2d, invK, R, T):
# keypoints2d: (..., 3)
conf = keypoints2d[..., 2:]
# cam_center: (1, 3)
cam_center = - (R.T @ T).T
kp_pixel = np.concatenate([keypoints2d[..., :2], np.ones_like(conf)], axis=-1)
kp_all_3d = (kp_pixel @ invK.T - T.T) @ R
while len(cam_center.shape) < len(kp_all_3d.shape):
cam_center = cam_center[None]
l, m = plucker_from_pp(cam_center, kp_all_3d)
res = np.concatenate((l, m, conf), axis=-1)
return res | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.