id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
13,106 | import torch
from torch.nn import functional as F
import numpy as np
def quaternion_to_axis_angle(quaternion):
"""
Convert quaternion to axis angle.
based on: https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138
Args:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
Returns:
axis_angle: torch tensor of shape (batch_size, 3)
"""
epsilon = 1.e-8
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta+epsilon)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def quaternion_mul(q, r):
"""
Multiply quaternion(s) q with quaternion(s) r.
Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
Returns q*r as a tensor of shape (*, 4).
"""
assert q.shape[-1] == 4
assert r.shape[-1] == 4
original_shape = q.shape
# Compute outer product
terms = torch.bmm(r.contiguous().view(-1, 4, 1),
q.contiguous().view(-1, 1, 4))
w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
return torch.stack((w, x, y, z), dim=1).view(original_shape)
The provided code snippet includes necessary dependencies for implementing the `geodesic_dist` function. Write a Python function `def geodesic_dist(q1,q2)` to solve the following problem:
@q1: torch tensor of shape (frame, joints, 4) quaternion @q2: same as q1 @output: torch tensor of shape (frame, joints)
Here is the function:
def geodesic_dist(q1,q2):
"""
@q1: torch tensor of shape (frame, joints, 4) quaternion
@q2: same as q1
@output: torch tensor of shape (frame, joints)
"""
q1_conjugate = q1.clone()
q1_conjugate[:,:,1:] *= -1
q1_norm = q1[:,:,1:].norm(dim=-1) + q1[:,:,0]**2
q1_inverse = q1_conjugate/q1_norm.unsqueeze(dim=-1)
q_between = quaternion_mul(q1_inverse,q2)
geodesic_dist = quaternion_to_axis_angle(q_between).norm(dim=-1)
return geodesic_dist | @q1: torch tensor of shape (frame, joints, 4) quaternion @q2: same as q1 @output: torch tensor of shape (frame, joints) |
13,107 | import torch
from torch.nn import functional as F
import numpy as np
def get_extrinsic(translation, rotation):
batch_size = translation.shape[0]
pose = torch.zeros((batch_size, 4, 4))
pose[:,:3, :3] = rotation
pose[:,:3, 3] = translation
pose[:,3, 3] = 1
extrinsic = torch.inverse(pose)
return extrinsic[:,:3, 3], extrinsic[:,:3, :3] | null |
13,108 | import torch
from torch.nn import functional as F
import numpy as np
def euler_fix_old(euler):
frame_num = euler.shape[0]
joint_num = euler.shape[1]
for l in range(3):
for j in range(joint_num):
overall_add = 0.
for i in range(1,frame_num):
add1 = overall_add
add2 = overall_add + 2*np.pi
add3 = overall_add - 2*np.pi
previous = euler[i-1,j,l]
value1 = euler[i,j,l] + add1
value2 = euler[i,j,l] + add2
value3 = euler[i,j,l] + add3
e1 = torch.abs(value1 - previous)
e2 = torch.abs(value2 - previous)
e3 = torch.abs(value3 - previous)
if (e1 <= e2) and (e1 <= e3):
euler[i,j,l] = value1
overall_add = add1
if (e2 <= e1) and (e2 <= e3):
euler[i, j, l] = value2
overall_add = add2
if (e3 <= e1) and (e3 <= e2):
euler[i, j, l] = value3
overall_add = add3
return euler | null |
13,109 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
def dict_of_tensor_to_numpy(body_params):
body_params = {key:val.detach().cpu().numpy() for key, val in body_params.items()}
return body_params | null |
13,110 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
def grad_require(params, flag=False):
if isinstance(params, list):
for par in params:
par.requires_grad = flag
elif isinstance(params, dict):
for key, par in params.items():
par.requires_grad = flag | null |
13,111 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
def rel_change(prev_val, curr_val):
return (prev_val - curr_val) / max([1e-5, abs(prev_val), abs(curr_val)]) | null |
13,112 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
class LBFGS(Optimizer):
def __init__(self,
params,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-5,
tolerance_change=1e-9,
history_size=100,
line_search_fn=None):
def _numel(self):
def _gather_flat_grad(self):
def _add_grad(self, step_size, update):
def _clone_param(self):
def _set_param(self, params_data):
def _directional_evaluate(self, closure, x, t, d):
def step(self, closure):
def obj_func(x, t, d):
def make_optimizer(opt_params, optim_type='lbfgs', max_iter=20,
lr=1e-3, betas=(0.9, 0.999), weight_decay=0.0, **kwargs):
if isinstance(opt_params, dict):
# LBFGS 不支持参数字典
opt_params = list(opt_params.values())
if optim_type == 'lbfgs':
from ..pyfitting.lbfgs import LBFGS
optimizer = LBFGS(opt_params, line_search_fn='strong_wolfe', max_iter=max_iter, **kwargs)
elif optim_type == 'adam':
optimizer = torch.optim.Adam(opt_params, lr=lr, betas=betas, weight_decay=weight_decay)
else:
raise NotImplementedError
return optimizer | null |
13,113 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
def load_object(module_name, module_args, **extra_args):
module_path = '.'.join(module_name.split('.')[:-1])
module = importlib.import_module(module_path)
name = module_name.split('.')[-1]
obj = getattr(module, name)(**extra_args, **module_args)
return obj
def make_lossfuncs(stage, infos, device, irepeat, verbose=False):
loss_funcs, weights = {}, {}
for key, val in stage.loss.items():
loss_args = dict(val.args)
if 'infos' in val.keys():
for k in val.infos:
loss_args[k] = infos[k]
module = load_object(val.module, loss_args)
module.to(device)
if 'weights' in val.keys():
weights[key] = val.weights[irepeat]
else:
weights[key] = val.weight
if weights[key] < 0:
weights.pop(key)
else:
loss_funcs[key] = module
if verbose or True:
print('Loss functions: ')
for key, func in loss_funcs.items():
print(' - {:15s}: {}, {}'.format(key, weights[key], func))
return loss_funcs, weights | null |
13,114 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
def load_object(module_name, module_args, **extra_args):
module_path = '.'.join(module_name.split('.')[:-1])
module = importlib.import_module(module_path)
name = module_name.split('.')[-1]
obj = getattr(module, name)(**extra_args, **module_args)
return obj
def make_before_after(before_after, body_model, body_params, infos):
modules = []
for key, val in before_after.items():
args = dict(val.args)
if 'body_model' in args.keys():
args['body_model'] = body_model
try:
module = load_object(val.module, args)
except:
print('[Fitting] Failed to load module {}'.format(key))
raise NotImplementedError
module.infos = infos
modules.append(module)
return modules | null |
13,115 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
def dict_of_numpy_to_tensor(body_model, body_params, *args, **kwargs):
device = body_model.device
body_params = {key:torch.Tensor(val).to(device) for key, val in body_params.items()}
return body_params
class AddExtra:
def __init__(self, vals) -> None:
self.vals = vals
def __call__(self, body_model, body_params, *args, **kwargs):
shapes = body_params['poses'].shape[:-1]
for key in self.vals:
if key in body_params.keys():
continue
if key.startswith('R_') or key.startswith('T_'):
val = np.zeros((*shapes, 3), dtype=np.float32)
body_params[key] = val
return body_params
def load_object(module_name, module_args, **extra_args):
module_path = '.'.join(module_name.split('.')[:-1])
module = importlib.import_module(module_path)
name = module_name.split('.')[-1]
obj = getattr(module, name)(**extra_args, **module_args)
return obj
def process(start_or_end, body_model, body_params, infos):
for key, val in start_or_end.items():
if isinstance(val, dict):
module = load_object(val.module, val.args)
else:
if key == 'convert' and val == 'numpy_to_tensor':
module = dict_of_numpy_to_tensor
if key == 'add':
module = AddExtra(val)
body_params = module(body_model, body_params, infos)
return body_params | null |
13,116 | from ..annotator.file_utils import read_json
from ..mytools import Timer
from .lossbase import print_table
from ..config.baseconfig import load_object
from ..bodymodel.base import Params
from torch.utils.data import DataLoader
from tqdm import tqdm
def plot_meshes(img, meshes, K, R, T):
import cv2
mesh_camera = []
for mesh in meshes:
vertices = mesh['vertices'] @ R.T + T.T
v2d = vertices @ K.T
v2d[:, :2] = v2d[:, :2] / v2d[:, 2:3]
lw=1
col=(0,0,255)
for (x, y, d) in v2d[::10]:
cv2.circle(img, (int(x+0.5), int(y+0.5)), lw*2, col, -1)
return img | null |
13,117 | import torch
from ..bodymodel.lbs import batch_rodrigues
from .torchgeometry import rotation_matrix_to_axis_angle, rotation_matrix_to_quaternion, quaternion_to_rotation_matrix, quaternion_to_axis_angle
import numpy as np
from .base_ops import BeforeAfterBase
def quaternion_to_rotation_matrix(quaternion):
"""
Convert quaternion coefficients to rotation matrix.
Args:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
Returns:
rotation matrix corresponding to the quaternion, torch tensor of shape (batch_size, 3, 3)
"""
norm_quaternion = quaternion
norm_quaternion = norm_quaternion / \
norm_quaternion.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quaternion[:, 0], norm_quaternion[:,
1], norm_quaternion[:, 2], norm_quaternion[:, 3]
batch_size = quaternion.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotation_matrix = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(batch_size, 3, 3)
return rotation_matrix
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""
Convert rotation matrix to corresponding quaternion
Args:
rotation_matrix: torch tensor of shape (batch_size, 3, 3)
Returns:
quaternion: torch tensor of shape(batch_size, 4) in (w, x, y, z) representation.
"""
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = rmat_t[:, 2, 2] < eps
mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1
mask_c1 = mask_d2 * (~ mask_d0_d1)
mask_c2 = (~ mask_d2) * mask_d0_nd1
mask_c3 = (~ mask_d2) * (~ mask_d0_nd1)
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
def quaternion_to_axis_angle(quaternion):
"""
Convert quaternion to axis angle.
based on: https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138
Args:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
Returns:
axis_angle: torch tensor of shape (batch_size, 3)
"""
epsilon = 1.e-8
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta+epsilon)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
The provided code snippet includes necessary dependencies for implementing the `compute_twist_rotation` function. Write a Python function `def compute_twist_rotation(rotation_matrix, twist_axis)` to solve the following problem:
Compute the twist component of given rotation and twist axis https://stackoverflow.com/questions/3684269/component-of-a-quaternion-rotation-around-an-axis Parameters ---------- rotation_matrix : Tensor (B, 3, 3,) The rotation to convert twist_axis : Tensor (B, 3,) The twist axis Returns ------- Tensor (B, 3, 3) The twist rotation
Here is the function:
def compute_twist_rotation(rotation_matrix, twist_axis):
'''
Compute the twist component of given rotation and twist axis
https://stackoverflow.com/questions/3684269/component-of-a-quaternion-rotation-around-an-axis
Parameters
----------
rotation_matrix : Tensor (B, 3, 3,)
The rotation to convert
twist_axis : Tensor (B, 3,)
The twist axis
Returns
-------
Tensor (B, 3, 3)
The twist rotation
'''
quaternion = rotation_matrix_to_quaternion(rotation_matrix)
twist_axis = twist_axis / (torch.norm(twist_axis, dim=1, keepdim=True) + 1e-9)
projection = torch.einsum('bi,bi->b', twist_axis, quaternion[:, 1:]).unsqueeze(-1) * twist_axis
twist_quaternion = torch.cat([quaternion[:, 0:1], projection], dim=1)
twist_quaternion = twist_quaternion / (torch.norm(twist_quaternion, dim=1, keepdim=True) + 1e-9)
twist_rotation = quaternion_to_rotation_matrix(twist_quaternion)
twist_aa = quaternion_to_axis_angle(twist_quaternion)
twist_angle = torch.sum(twist_aa, dim=1, keepdim=True) / torch.sum(twist_axis, dim=1, keepdim=True)
return twist_rotation, twist_angle | Compute the twist component of given rotation and twist axis https://stackoverflow.com/questions/3684269/component-of-a-quaternion-rotation-around-an-axis Parameters ---------- rotation_matrix : Tensor (B, 3, 3,) The rotation to convert twist_axis : Tensor (B, 3,) The twist axis Returns ------- Tensor (B, 3, 3) The twist rotation |
13,118 | import numpy as np
from os.path import join
import os
import cv2
def flipPoint2D(point):
def mirrorPoint3D(point, M):
point_homo = np.hstack([point, np.ones([point.shape[0], 1])])
point_m = (M @ point_homo.T).T[..., :3]
return flipPoint2D(point_m) | null |
13,119 | import numpy as np
from os.path import join
import os
import cv2
def get_rotation_from_two_directions(direc0, direc1):
direc0 = direc0/np.linalg.norm(direc0)
direc1 = direc1/np.linalg.norm(direc1)
rotdir = np.cross(direc0, direc1)
if np.linalg.norm(rotdir) < 1e-2:
return np.eye(3)
rotdir = rotdir/np.linalg.norm(rotdir)
rotdir = rotdir * np.arccos(np.dot(direc0, direc1))
rotmat, _ = cv2.Rodrigues(rotdir)
return rotmat | null |
13,120 | import numpy as np
class ComposedFilter:
def __init__(self, filters, min_conf) -> None:
self.filters = filters
self.min_conf = min_conf
def __call__(self, keypoints, **kwargs) -> bool:
conf = keypoints[:, 2]
conf[conf<self.min_conf] = 0
valid = conf>self.min_conf
center = keypoints[valid, :2].mean(axis=0, keepdims=True)
keypoints[conf<self.min_conf, :2] = center
for filt in self.filters:
if not filt(keypoints=keypoints, **kwargs):
return False
return True
def nms(self, annots):
# This function do nothing
if len(annots) < 2:
return annots
keypoints = np.stack([annot['keypoints'] for annot in annots])
bbox = np.stack([annot['bbox'] for annot in annots])
bbox_size = np.max(np.abs(bbox[:, [1, 3]] - bbox[:, [0, 2]]), axis=1)
bbox_size = np.maximum(bbox_size[:, None], bbox_size[None, :])
dist = np.linalg.norm(keypoints[:, None, :, :2] - keypoints[None, :, :, :2], axis=-1)
conf = (keypoints[:, None, :, 2] > 0) * (keypoints[None, :, :, 2] > 0)
dist = (dist * conf).sum(axis=2)/conf.sum(axis=2)/bbox_size
return annots
def __str__(self) -> str:
indent = ' ' * 4
res = indent + 'Composed Filters: \n'
for filt in self.filters:
res_ = indent + indent + '{:15s}'.format(filt.__class__.__name__) + ': ' + str(filt) + '\n'
res += res_
return res
def make_filter(param):
filters = []
for key, val in param.filter.items():
filters.append(globals()[key](log=param.log, width=param.width, height=param.height, **val))
comp = ComposedFilter(filters, param.min_conf)
print(comp)
return comp | null |
13,121 | import numpy as np
CONFIG = {
'points': {
'nJoints': 1,
'kintree': []
}
}
CONFIG['smpl'] = {'nJoints': 24, 'kintree':
[
[ 0, 1 ],
[ 0, 2 ],
[ 0, 3 ],
[ 1, 4 ],
[ 2, 5 ],
[ 3, 6 ],
[ 4, 7 ],
[ 5, 8 ],
[ 6, 9 ],
[ 7, 10],
[ 8, 11],
[ 9, 12],
[ 9, 13],
[ 9, 14],
[12, 15],
[13, 16],
[14, 17],
[16, 18],
[17, 19],
[18, 20],
[19, 21],
[20, 22],
[21, 23],
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['smplh'] = {'nJoints': 52, 'kintree':
[
[ 1, 0],
[ 2, 0],
[ 3, 0],
[ 4, 1],
[ 5, 2],
[ 6, 3],
[ 7, 4],
[ 8, 5],
[ 9, 6],
[ 10, 7],
[ 11, 8],
[ 12, 9],
[ 13, 9],
[ 14, 9],
[ 15, 12],
[ 16, 13],
[ 17, 14],
[ 18, 16],
[ 19, 17],
[ 20, 18],
[ 21, 19],
[ 22, 20],
[ 23, 22],
[ 24, 23],
[ 25, 20],
[ 26, 25],
[ 27, 26],
[ 28, 20],
[ 29, 28],
[ 30, 29],
[ 31, 20],
[ 32, 31],
[ 33, 32],
[ 34, 20],
[ 35, 34],
[ 36, 35],
[ 37, 21],
[ 38, 37],
[ 39, 38],
[ 40, 21],
[ 41, 40],
[ 42, 41],
[ 43, 21],
[ 44, 43],
[ 45, 44],
[ 46, 21],
[ 47, 46],
[ 48, 47],
[ 49, 21],
[ 50, 49],
[ 51, 50]
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['coco'] = {
'nJoints': 17,
'kintree': [
[0, 1], [0, 2], [1, 3], [2, 4], [0, 5], [0, 6], [5, 6], [5, 7], [6, 8], [7, 9], [8, 10], [5, 11], [5, 12], [11, 12], [11, 13], [12, 14], [13, 15], [14, 16]
],
}
CONFIG['coco_17'] = CONFIG['coco']
CONFIG['body25'] = {'nJoints': 25, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11]],
'joint_names': [
"Nose", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "MidHip", "RHip","RKnee","RAnkle","LHip","LKnee","LAnkle","REye","LEye","REar","LEar","LBigToe","LSmallToe","LHeel","RBigToe","RSmallToe","RHeel"]}
CONFIG['body25']['kintree_order'] = [
[1, 8], # 躯干放在最前面
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[8, 9],
[8, 12],
[9, 10],
[10, 11],
[12, 13],
[13, 14],
[1, 0],
[0, 15],
[0, 16],
[15, 17],
[16, 18],
[11, 22],
[11, 24],
[22, 23],
[14, 19],
[19, 20],
[14, 21]
]
CONFIG['body25']['colors'] = ['k', 'r', 'r', 'r', 'b', 'b', 'b', 'k', 'r', 'r', 'r', 'b', 'b', 'b', 'r', 'b', 'r', 'b', 'b', 'b', 'b', 'r', 'r', 'r']
CONFIG['body25']['skeleton'] = \
{
( 0, 1): {'mean': 0.228, 'std': 0.046}, # Nose ->Neck
( 1, 2): {'mean': 0.144, 'std': 0.029}, # Neck ->RShoulder
( 2, 3): {'mean': 0.283, 'std': 0.057}, # RShoulder->RElbow
( 3, 4): {'mean': 0.258, 'std': 0.052}, # RElbow ->RWrist
( 1, 5): {'mean': 0.145, 'std': 0.029}, # Neck ->LShoulder
( 5, 6): {'mean': 0.281, 'std': 0.056}, # LShoulder->LElbow
( 6, 7): {'mean': 0.258, 'std': 0.052}, # LElbow ->LWrist
( 1, 8): {'mean': 0.483, 'std': 0.097}, # Neck ->MidHip
( 8, 9): {'mean': 0.106, 'std': 0.021}, # MidHip ->RHip
( 9, 10): {'mean': 0.438, 'std': 0.088}, # RHip ->RKnee
(10, 11): {'mean': 0.406, 'std': 0.081}, # RKnee ->RAnkle
( 8, 12): {'mean': 0.106, 'std': 0.021}, # MidHip ->LHip
(12, 13): {'mean': 0.438, 'std': 0.088}, # LHip ->LKnee
(13, 14): {'mean': 0.408, 'std': 0.082}, # LKnee ->LAnkle
( 0, 15): {'mean': 0.043, 'std': 0.009}, # Nose ->REye
( 0, 16): {'mean': 0.043, 'std': 0.009}, # Nose ->LEye
(15, 17): {'mean': 0.105, 'std': 0.021}, # REye ->REar
(16, 18): {'mean': 0.104, 'std': 0.021}, # LEye ->LEar
(14, 19): {'mean': 0.180, 'std': 0.036}, # LAnkle ->LBigToe
(19, 20): {'mean': 0.038, 'std': 0.008}, # LBigToe ->LSmallToe
(14, 21): {'mean': 0.044, 'std': 0.009}, # LAnkle ->LHeel
(11, 22): {'mean': 0.182, 'std': 0.036}, # RAnkle ->RBigToe
(22, 23): {'mean': 0.038, 'std': 0.008}, # RBigToe ->RSmallToe
(11, 24): {'mean': 0.044, 'std': 0.009}, # RAnkle ->RHeel
}
CONFIG['body25vis'] = {
'nJoints': 25,
'kintree': [
[8, 1], # 躯干放在最前面
[8, 9],
[8, 12],
[9, 10],
[12, 13],
[10, 11],
[13, 14],
[11, 22],
[14, 19],
[1, 2],
[1, 5],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[1, 0]]
}
CONFIG['handvis'] = {
'nJoints': 21,
'kintree': [
[0, 1],
[0, 5],
[0, 9],
[0, 13],
[0, 17],
[1, 2],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[7, 8],
[9, 10],
[10, 11],
[11, 12],
[13, 14],
[14, 15],
[15, 16],
[17, 18],
[18, 19],
[19, 20]
]
}
CONFIG['body15'] = {'nJoints': 15, 'root': 8,
'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13]], 'root': 8,}
CONFIG['body15']['joint_names'] = CONFIG['body25']['joint_names'][:15]
CONFIG['body15']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 15 and key[1] < 15}
CONFIG['body15']['kintree_order'] = CONFIG['body25']['kintree_order'][:14]
CONFIG['body15']['colors'] = CONFIG['body25']['colors'][:15]
CONFIG['body19'] = {'nJoints': 19, 'kintree': [[i, j] for (i, j) in CONFIG['body25']['kintree'] if i < 19 and j < 19]}
CONFIG['body19']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 19 and key[1] < 19}
CONFIG['panoptic'] = {
'nJoints': 19,
'joint_names': ['Neck', 'Nose', 'MidHip', 'LShoulder', 'LElbow', 'LWrist', 'LHip', 'LKnee', 'LAnkle', 'RShoulder','RElbow', 'RWrist', 'RHip','RKnee', 'RAnkle', 'LEye', 'LEar', 'REye', 'REar'],
'kintree': [[0, 1],
[0, 2],
[0, 3],
[3, 4],
[4, 5],
[0, 9],
[9, 10],
[10, 11],
[2, 6],
[2, 12],
[6, 7],
[7, 8],
[12, 13],
[13, 14]],
'colors': ['b' for _ in range(19)]
}
CONFIG['panoptic15'] = {
'nJoints': 15,
'root': 2,
'joint_names': CONFIG['panoptic']['joint_names'][:15],
'kintree': [[i, j] for (i, j) in CONFIG['panoptic']['kintree'] if i < 15 and j < 15],
'limb_mean': [0.1129,0.4957,0.1382,0.2547,0.2425,0.1374,0.2549,0.2437,0.1257,0.1256, 0.4641,0.4580,0.4643,0.4589],
'limb_std': [0.0164,0.0333,0.0078,0.0237,0.0233,0.0085,0.0233,0.0237,0.0076,0.0076, 0.0273,0.0247,0.0272,0.0242],
'colors': CONFIG['panoptic']['colors'][:15]
}
CONFIG['mpii_16'] = {
'nJoints': 16,
'joint_names': ['rankle', 'rknee', 'rhip', 'lhip', 'lknee', 'lankle', 'pelvis', 'thorax', 'upper_neck', 'head_top', 'rwrist', 'relbow', 'rshoulder', 'lshoulder', 'lelbow', 'lwrist'],
'kintree': [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [12, 7], [13, 14], [14, 15], [13, 7]],
'colors': ['b' for _ in range(16)]
}
CONFIG['ochuman_19'] = {
'nJoints': 19,
'joint_names': ["right_shoulder", "right_elbow", "right_wrist",
"left_shoulder", "left_elbow", "left_wrist",
"right_hip", "right_knee", "right_ankle",
"left_hip", "left_knee", "left_ankle",
"head", "neck"] + ['right_ear', 'left_ear', 'nose', 'right_eye', 'left_eye'],
'kintree': [
[0, 1], [1, 2], [3, 4], [4, 5],
[6, 7], [7, 8], [9, 10], [10, 11],
[13, 0], [13, 3], [0, 3], [6, 9],
[12, 16], [16, 13], [16, 17], [16, 18], [18, 15], [17, 14],
],
'colors': ['b' for _ in range(19)]
}
CONFIG['chi3d_25'] = {
'nJoints': 25,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
[13, 21], [13, 22], [16, 23], [16, 24], [3, 17], [3, 18], [6, 19], [6, 20]],
'colors': ['b' for _ in range(25)]
}
CONFIG['chi3d_17'] = {
'nJoints': 17,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
],
'colors': ['b' for _ in range(17)]
}
CONFIG['hand'] = {'nJoints': 21, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 0],
[ 6, 5],
[ 7, 6],
[ 8, 7],
[ 9, 0],
[10, 9],
[11, 10],
[12, 11],
[13, 0],
[14, 13],
[15, 14],
[16, 15],
[17, 0],
[18, 17],
[19, 18],
[20, 19]],
'colors': [
'_k', '_k', '_k', '_k', '_r', '_r', '_r', '_r',
'_g', '_g', '_g', '_g', '_b', '_b', '_b', '_b',
'_y', '_y', '_y', '_y'],
'colorsrhand': [
'_pink', '_pink', '_pink', '_pink', '_mint', '_mint', '_mint', '_mint',
'_orange', '_orange', '_orange', '_orange', '_mint2', '_mint2', '_mint2', '_mint2',
'purple', 'purple', 'purple', 'purple'],
'joint_names':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
}
CONFIG['handl'] = CONFIG['hand']
CONFIG['handr'] = CONFIG['hand']
CONFIG['handlr'] = {
'nJoints': 42,
'colors': CONFIG['hand']['colors'] + CONFIG['hand']['colorsrhand'],
'joint_names': CONFIG['hand']['joint_names'] + CONFIG['hand']['joint_names'],
'kintree': np.vstack((np.array(CONFIG['hand']['kintree']), np.array(CONFIG['hand']['kintree'])+21)).tolist()
}
CONFIG['bodyhand'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65]
],
'nJoints': 67,
'colors': CONFIG['body25']['colors'] + CONFIG['hand']['colors'] + CONFIG['hand']['colors'],
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
}
}
CONFIG['bodyhandface'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65],
[ 67, 68],
[ 68, 69],
[ 69, 70],
[ 70, 71],
[ 72, 73],
[ 73, 74],
[ 74, 75],
[ 75, 76],
[ 77, 78],
[ 78, 79],
[ 79, 80],
[ 81, 82],
[ 82, 83],
[ 83, 84],
[ 84, 85],
[ 86, 87],
[ 87, 88],
[ 88, 89],
[ 89, 90],
[ 90, 91],
[ 91, 86],
[ 92, 93],
[ 93, 94],
[ 94, 95],
[ 95, 96],
[ 96, 97],
[ 97, 92],
[ 98, 99],
[ 99, 100],
[100, 101],
[101, 102],
[102, 103],
[103, 104],
[104, 105],
[105, 106],
[106, 107],
[107, 108],
[108, 109],
[109, 98],
[110, 111],
[111, 112],
[112, 113],
[113, 114],
[114, 115],
[115, 116],
[116, 117],
[117, 110]
],
'nJoints': 118,
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
(67, 68): {'mean': 0.012, 'std': 0.002},
(68, 69): {'mean': 0.013, 'std': 0.003},
(69, 70): {'mean': 0.014, 'std': 0.003},
(70, 71): {'mean': 0.012, 'std': 0.002},
(72, 73): {'mean': 0.014, 'std': 0.003},
(73, 74): {'mean': 0.014, 'std': 0.003},
(74, 75): {'mean': 0.015, 'std': 0.003},
(75, 76): {'mean': 0.013, 'std': 0.003},
(77, 78): {'mean': 0.014, 'std': 0.003},
(78, 79): {'mean': 0.014, 'std': 0.003},
(79, 80): {'mean': 0.015, 'std': 0.003},
(81, 82): {'mean': 0.009, 'std': 0.002},
(82, 83): {'mean': 0.010, 'std': 0.002},
(83, 84): {'mean': 0.010, 'std': 0.002},
(84, 85): {'mean': 0.010, 'std': 0.002},
(86, 87): {'mean': 0.009, 'std': 0.002},
(87, 88): {'mean': 0.009, 'std': 0.002},
(88, 89): {'mean': 0.008, 'std': 0.002},
(89, 90): {'mean': 0.008, 'std': 0.002},
(90, 91): {'mean': 0.009, 'std': 0.002},
(86, 91): {'mean': 0.008, 'std': 0.002},
(92, 93): {'mean': 0.009, 'std': 0.002},
(93, 94): {'mean': 0.009, 'std': 0.002},
(94, 95): {'mean': 0.009, 'std': 0.002},
(95, 96): {'mean': 0.009, 'std': 0.002},
(96, 97): {'mean': 0.009, 'std': 0.002},
(92, 97): {'mean': 0.009, 'std': 0.002},
(98, 99): {'mean': 0.016, 'std': 0.003},
(99, 100): {'mean': 0.013, 'std': 0.003},
(100, 101): {'mean': 0.008, 'std': 0.002},
(101, 102): {'mean': 0.008, 'std': 0.002},
(102, 103): {'mean': 0.012, 'std': 0.002},
(103, 104): {'mean': 0.014, 'std': 0.003},
(104, 105): {'mean': 0.015, 'std': 0.003},
(105, 106): {'mean': 0.012, 'std': 0.002},
(106, 107): {'mean': 0.009, 'std': 0.002},
(107, 108): {'mean': 0.009, 'std': 0.002},
(108, 109): {'mean': 0.013, 'std': 0.003},
(98, 109): {'mean': 0.016, 'std': 0.003},
(110, 111): {'mean': 0.021, 'std': 0.004},
(111, 112): {'mean': 0.009, 'std': 0.002},
(112, 113): {'mean': 0.008, 'std': 0.002},
(113, 114): {'mean': 0.019, 'std': 0.004},
(114, 115): {'mean': 0.018, 'std': 0.004},
(115, 116): {'mean': 0.008, 'std': 0.002},
(116, 117): {'mean': 0.009, 'std': 0.002},
(110, 117): {'mean': 0.020, 'std': 0.004},
}
}
CONFIG['face'] = {'nJoints': 70,
'kintree':[ [0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16], #outline (ignored)
[17,18],[18,19],[19,20],[20,21], #right eyebrow
[22,23],[23,24],[24,25],[25,26], #left eyebrow
[27,28],[28,29],[29,30], #nose upper part
[31,32],[32,33],[33,34],[34,35], #nose lower part
[36,37],[37,38],[38,39],[39,40],[40,41],[41,36], #right eye
[42,43],[43,44],[44,45],[45,46],[46,47],[47,42], #left eye
[48,49],[49,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,58],[58,59],[59,48], #Lip outline
[60,61],[61,62],[62,63],[63,64],[64,65],[65,66],[66,67],[67,60] #Lip inner line
], 'colors': ['g' for _ in range(100)]}
CONFIG['h36m'] = {
'kintree': [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [
12, 13], [8, 14], [14, 15], [15, 16]],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': [
'hip', # 0
'LHip', # 1
'LKnee', # 2
'LAnkle', # 3
'RHip', # 4
'RKnee', # 5
'RAnkle', # 6
'Spine (H36M)', # 7
'Neck', # 8
'Head (H36M)', # 9
'headtop', # 10
'LShoulder', # 11
'LElbow', # 12
'LWrist', # 13
'RShoulder', # 14
'RElbow', # 15
'RWrist', # 16
],
'nJoints': 17}
CONFIG['h36m_17'] = CONFIG['h36m']
CONFIG['total'] = compose(['body25', 'hand', 'hand', 'face'])
CONFIG['bodyhandface']['joint_names'] = CONFIG['body25']['joint_names']
CONFIG['keypoints2d'] = CONFIG['body25']
CONFIG['handl2d'] = CONFIG['hand']
CONFIG['handr2d'] = CONFIG['hand']
CONFIG['face2d'] = CONFIG['face']
CONFIG['mpbody'] = {}
CONFIG['mpbody']['kintree'] = [
(0, 1),
(0, 4),
(1, 2),
(2, 3),
(3, 7),
(4, 5),
(5, 6),
(6, 8),
(9, 10),
(11, 12),
(11, 13),
(11, 23),
(12, 14),
(12, 24),
(13, 15),
(14, 16),
(15, 17),
(15, 19),
(15, 21),
(16, 18),
(16, 20),
(16, 22),
(17, 19),
(18, 20),
(23, 24),
(23, 25),
(24, 26),
(25, 27),
(26, 28),
(27, 29),
(27, 31),
(28, 30),
(28, 32),
(29, 31),
(30, 32)
]
CONFIG['mpbody']['nJoints'] = 33
CONFIG['mpbody']['colors'] = ['b', 'r', 'b', 'b', 'b', 'r', 'r', 'r', 'k', 'k', 'b', 'b', 'r', 'r', 'b', 'r',
'y', 'r', 'y', 'g', 'b', 'g', 'y', 'g', 'k', 'b', 'r', 'b', 'r', 'b', 'b', 'r', 'r', 'b', 'b']
CONFIG['mpface'] = {}
CONFIG['mpface']['kintree'] = [(270, 409), (176, 149), (37, 0), (84, 17), (318, 324), (293, 334), (386, 385), (7, 163), (33, 246), (17, 314), (374, 380), (251, 389), (390, 373), (267, 269), (295, 285), (389, 356), (173, 133), (33, 7), (377, 152), (158, 157), (405, 321), (54, 103), (263, 466), (324, 308), (67, 109), (409, 291), (157, 173), (454, 323), (388, 387), (78, 191), (148, 176), (311, 310), (39, 37), (249, 390), (144, 145), (402, 318), (80, 81), (310, 415), (153, 154), (384, 398), (397, 365), (234, 127), (103, 67), (282, 295), (338, 297), (378, 400), (127, 162), (321, 375), (375, 291), (317, 402), (81, 82), (154, 155), (91, 181), (334, 296), (297, 332), (269, 270), (150, 136), (109, 10), (356, 454), (58, 132), (312, 311), (152, 148), (415, 308), (161, 160), (296, 336), (65, 55), (61, 146), (78, 95), (380, 381), (398, 362), (361, 288), (246, 161), (162, 21), (0, 267), (82, 13), (132, 93), (314, 405), (10, 338), (178, 87), (387, 386), (381, 382), (70, 63), (61, 185), (14, 317), (105, 66), (300, 293), (382, 362), (88, 178), (185, 40), (46, 53), (284, 251), (400, 377), (136, 172), (323, 361), (13, 312), (21, 54), (172, 58), (373, 374), (163, 144), (276, 283), (53, 52), (365, 379), (379, 378), (146, 91), (263, 249), (283, 282), (87, 14), (145, 153), (155, 133), (93, 234), (66, 107), (95, 88), (159, 158), (52, 65), (332, 284), (40, 39), (191, 80), (63, 105), (181, 84), (466, 388), (149, 150), (288, 397), (160, 159), (385, 384)]
CONFIG['mpface']['nJoints'] = 468
CONFIG['mptotal'] = compose(['mpbody', 'hand', 'hand', 'mpface'])
CONFIG['bodyhandmpface'] = compose(['body25', 'hand', 'hand', 'mpface'])
CONFIG['iris'] = {
'nJoints': 10,
'kintree': [[0, 1], [1, 2], [2, 3], [3, 4]]
}
CONFIG['onepoint'] = {
'nJoints': 1,
'kintree': []
}
CONFIG['up'] = {
'nJoints': 79,
'kintree': []
}
CONFIG['ochuman'] = {
'nJoints': 19,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13], [14, 17], [15, 18], [17, 16], [18, 16]]
}
CONFIG['mpii'] = {
'nJoints': 16,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [2, 6], [3, 6], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [7, 12], [7, 13], \
[13, 14], [14, 15]],
'joint_names': ['rank', 'rkne', 'rhip', 'lhip', 'lkne', 'lank', 'pelv', 'thrx', 'neck', 'head', 'rwri', 'relb', 'rsho', 'lsho', 'lelb', 'lwri'],
}
CONFIG['h36mltri_17'] = {
'kintree': [(0, 1), (1, 2), (2, 6), (5, 4), (4, 3), (3, 6), (6, 7), (7, 8), (8, 16), (9, 16), (8, 12), (11, 12), (10, 11), (8, 13), (13, 14), (14, 15)],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': CONFIG['mpii']['joint_names'] + ['Neck/Nose'],
'nJoints': 17}
def compose(names):
kintrees = []
nJoints = 0
for name in names:
kintrees.append(np.array(CONFIG[name]['kintree']) + nJoints)
nJoints += CONFIG[name]['nJoints']
kintrees = np.vstack(kintrees)
cfg = {
'kintree': kintrees.tolist(),
'nJoints': nJoints
}
return cfg | null |
13,122 | import numpy as np
CONFIG = {
'points': {
'nJoints': 1,
'kintree': []
}
}
CONFIG['smpl'] = {'nJoints': 24, 'kintree':
[
[ 0, 1 ],
[ 0, 2 ],
[ 0, 3 ],
[ 1, 4 ],
[ 2, 5 ],
[ 3, 6 ],
[ 4, 7 ],
[ 5, 8 ],
[ 6, 9 ],
[ 7, 10],
[ 8, 11],
[ 9, 12],
[ 9, 13],
[ 9, 14],
[12, 15],
[13, 16],
[14, 17],
[16, 18],
[17, 19],
[18, 20],
[19, 21],
[20, 22],
[21, 23],
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['smplh'] = {'nJoints': 52, 'kintree':
[
[ 1, 0],
[ 2, 0],
[ 3, 0],
[ 4, 1],
[ 5, 2],
[ 6, 3],
[ 7, 4],
[ 8, 5],
[ 9, 6],
[ 10, 7],
[ 11, 8],
[ 12, 9],
[ 13, 9],
[ 14, 9],
[ 15, 12],
[ 16, 13],
[ 17, 14],
[ 18, 16],
[ 19, 17],
[ 20, 18],
[ 21, 19],
[ 22, 20],
[ 23, 22],
[ 24, 23],
[ 25, 20],
[ 26, 25],
[ 27, 26],
[ 28, 20],
[ 29, 28],
[ 30, 29],
[ 31, 20],
[ 32, 31],
[ 33, 32],
[ 34, 20],
[ 35, 34],
[ 36, 35],
[ 37, 21],
[ 38, 37],
[ 39, 38],
[ 40, 21],
[ 41, 40],
[ 42, 41],
[ 43, 21],
[ 44, 43],
[ 45, 44],
[ 46, 21],
[ 47, 46],
[ 48, 47],
[ 49, 21],
[ 50, 49],
[ 51, 50]
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['coco'] = {
'nJoints': 17,
'kintree': [
[0, 1], [0, 2], [1, 3], [2, 4], [0, 5], [0, 6], [5, 6], [5, 7], [6, 8], [7, 9], [8, 10], [5, 11], [5, 12], [11, 12], [11, 13], [12, 14], [13, 15], [14, 16]
],
}
CONFIG['coco_17'] = CONFIG['coco']
CONFIG['body25'] = {'nJoints': 25, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11]],
'joint_names': [
"Nose", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "MidHip", "RHip","RKnee","RAnkle","LHip","LKnee","LAnkle","REye","LEye","REar","LEar","LBigToe","LSmallToe","LHeel","RBigToe","RSmallToe","RHeel"]}
CONFIG['body25']['kintree_order'] = [
[1, 8], # 躯干放在最前面
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[8, 9],
[8, 12],
[9, 10],
[10, 11],
[12, 13],
[13, 14],
[1, 0],
[0, 15],
[0, 16],
[15, 17],
[16, 18],
[11, 22],
[11, 24],
[22, 23],
[14, 19],
[19, 20],
[14, 21]
]
CONFIG['body25']['colors'] = ['k', 'r', 'r', 'r', 'b', 'b', 'b', 'k', 'r', 'r', 'r', 'b', 'b', 'b', 'r', 'b', 'r', 'b', 'b', 'b', 'b', 'r', 'r', 'r']
CONFIG['body25']['skeleton'] = \
{
( 0, 1): {'mean': 0.228, 'std': 0.046}, # Nose ->Neck
( 1, 2): {'mean': 0.144, 'std': 0.029}, # Neck ->RShoulder
( 2, 3): {'mean': 0.283, 'std': 0.057}, # RShoulder->RElbow
( 3, 4): {'mean': 0.258, 'std': 0.052}, # RElbow ->RWrist
( 1, 5): {'mean': 0.145, 'std': 0.029}, # Neck ->LShoulder
( 5, 6): {'mean': 0.281, 'std': 0.056}, # LShoulder->LElbow
( 6, 7): {'mean': 0.258, 'std': 0.052}, # LElbow ->LWrist
( 1, 8): {'mean': 0.483, 'std': 0.097}, # Neck ->MidHip
( 8, 9): {'mean': 0.106, 'std': 0.021}, # MidHip ->RHip
( 9, 10): {'mean': 0.438, 'std': 0.088}, # RHip ->RKnee
(10, 11): {'mean': 0.406, 'std': 0.081}, # RKnee ->RAnkle
( 8, 12): {'mean': 0.106, 'std': 0.021}, # MidHip ->LHip
(12, 13): {'mean': 0.438, 'std': 0.088}, # LHip ->LKnee
(13, 14): {'mean': 0.408, 'std': 0.082}, # LKnee ->LAnkle
( 0, 15): {'mean': 0.043, 'std': 0.009}, # Nose ->REye
( 0, 16): {'mean': 0.043, 'std': 0.009}, # Nose ->LEye
(15, 17): {'mean': 0.105, 'std': 0.021}, # REye ->REar
(16, 18): {'mean': 0.104, 'std': 0.021}, # LEye ->LEar
(14, 19): {'mean': 0.180, 'std': 0.036}, # LAnkle ->LBigToe
(19, 20): {'mean': 0.038, 'std': 0.008}, # LBigToe ->LSmallToe
(14, 21): {'mean': 0.044, 'std': 0.009}, # LAnkle ->LHeel
(11, 22): {'mean': 0.182, 'std': 0.036}, # RAnkle ->RBigToe
(22, 23): {'mean': 0.038, 'std': 0.008}, # RBigToe ->RSmallToe
(11, 24): {'mean': 0.044, 'std': 0.009}, # RAnkle ->RHeel
}
CONFIG['body25vis'] = {
'nJoints': 25,
'kintree': [
[8, 1], # 躯干放在最前面
[8, 9],
[8, 12],
[9, 10],
[12, 13],
[10, 11],
[13, 14],
[11, 22],
[14, 19],
[1, 2],
[1, 5],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[1, 0]]
}
CONFIG['handvis'] = {
'nJoints': 21,
'kintree': [
[0, 1],
[0, 5],
[0, 9],
[0, 13],
[0, 17],
[1, 2],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[7, 8],
[9, 10],
[10, 11],
[11, 12],
[13, 14],
[14, 15],
[15, 16],
[17, 18],
[18, 19],
[19, 20]
]
}
CONFIG['body15'] = {'nJoints': 15, 'root': 8,
'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13]], 'root': 8,}
CONFIG['body15']['joint_names'] = CONFIG['body25']['joint_names'][:15]
CONFIG['body15']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 15 and key[1] < 15}
CONFIG['body15']['kintree_order'] = CONFIG['body25']['kintree_order'][:14]
CONFIG['body15']['colors'] = CONFIG['body25']['colors'][:15]
CONFIG['body19'] = {'nJoints': 19, 'kintree': [[i, j] for (i, j) in CONFIG['body25']['kintree'] if i < 19 and j < 19]}
CONFIG['body19']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 19 and key[1] < 19}
CONFIG['panoptic'] = {
'nJoints': 19,
'joint_names': ['Neck', 'Nose', 'MidHip', 'LShoulder', 'LElbow', 'LWrist', 'LHip', 'LKnee', 'LAnkle', 'RShoulder','RElbow', 'RWrist', 'RHip','RKnee', 'RAnkle', 'LEye', 'LEar', 'REye', 'REar'],
'kintree': [[0, 1],
[0, 2],
[0, 3],
[3, 4],
[4, 5],
[0, 9],
[9, 10],
[10, 11],
[2, 6],
[2, 12],
[6, 7],
[7, 8],
[12, 13],
[13, 14]],
'colors': ['b' for _ in range(19)]
}
CONFIG['panoptic15'] = {
'nJoints': 15,
'root': 2,
'joint_names': CONFIG['panoptic']['joint_names'][:15],
'kintree': [[i, j] for (i, j) in CONFIG['panoptic']['kintree'] if i < 15 and j < 15],
'limb_mean': [0.1129,0.4957,0.1382,0.2547,0.2425,0.1374,0.2549,0.2437,0.1257,0.1256, 0.4641,0.4580,0.4643,0.4589],
'limb_std': [0.0164,0.0333,0.0078,0.0237,0.0233,0.0085,0.0233,0.0237,0.0076,0.0076, 0.0273,0.0247,0.0272,0.0242],
'colors': CONFIG['panoptic']['colors'][:15]
}
CONFIG['mpii_16'] = {
'nJoints': 16,
'joint_names': ['rankle', 'rknee', 'rhip', 'lhip', 'lknee', 'lankle', 'pelvis', 'thorax', 'upper_neck', 'head_top', 'rwrist', 'relbow', 'rshoulder', 'lshoulder', 'lelbow', 'lwrist'],
'kintree': [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [12, 7], [13, 14], [14, 15], [13, 7]],
'colors': ['b' for _ in range(16)]
}
CONFIG['ochuman_19'] = {
'nJoints': 19,
'joint_names': ["right_shoulder", "right_elbow", "right_wrist",
"left_shoulder", "left_elbow", "left_wrist",
"right_hip", "right_knee", "right_ankle",
"left_hip", "left_knee", "left_ankle",
"head", "neck"] + ['right_ear', 'left_ear', 'nose', 'right_eye', 'left_eye'],
'kintree': [
[0, 1], [1, 2], [3, 4], [4, 5],
[6, 7], [7, 8], [9, 10], [10, 11],
[13, 0], [13, 3], [0, 3], [6, 9],
[12, 16], [16, 13], [16, 17], [16, 18], [18, 15], [17, 14],
],
'colors': ['b' for _ in range(19)]
}
CONFIG['chi3d_25'] = {
'nJoints': 25,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
[13, 21], [13, 22], [16, 23], [16, 24], [3, 17], [3, 18], [6, 19], [6, 20]],
'colors': ['b' for _ in range(25)]
}
CONFIG['chi3d_17'] = {
'nJoints': 17,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
],
'colors': ['b' for _ in range(17)]
}
CONFIG['hand'] = {'nJoints': 21, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 0],
[ 6, 5],
[ 7, 6],
[ 8, 7],
[ 9, 0],
[10, 9],
[11, 10],
[12, 11],
[13, 0],
[14, 13],
[15, 14],
[16, 15],
[17, 0],
[18, 17],
[19, 18],
[20, 19]],
'colors': [
'_k', '_k', '_k', '_k', '_r', '_r', '_r', '_r',
'_g', '_g', '_g', '_g', '_b', '_b', '_b', '_b',
'_y', '_y', '_y', '_y'],
'colorsrhand': [
'_pink', '_pink', '_pink', '_pink', '_mint', '_mint', '_mint', '_mint',
'_orange', '_orange', '_orange', '_orange', '_mint2', '_mint2', '_mint2', '_mint2',
'purple', 'purple', 'purple', 'purple'],
'joint_names':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
}
CONFIG['handl'] = CONFIG['hand']
CONFIG['handr'] = CONFIG['hand']
CONFIG['handlr'] = {
'nJoints': 42,
'colors': CONFIG['hand']['colors'] + CONFIG['hand']['colorsrhand'],
'joint_names': CONFIG['hand']['joint_names'] + CONFIG['hand']['joint_names'],
'kintree': np.vstack((np.array(CONFIG['hand']['kintree']), np.array(CONFIG['hand']['kintree'])+21)).tolist()
}
CONFIG['bodyhand'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65]
],
'nJoints': 67,
'colors': CONFIG['body25']['colors'] + CONFIG['hand']['colors'] + CONFIG['hand']['colors'],
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
}
}
CONFIG['bodyhandface'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65],
[ 67, 68],
[ 68, 69],
[ 69, 70],
[ 70, 71],
[ 72, 73],
[ 73, 74],
[ 74, 75],
[ 75, 76],
[ 77, 78],
[ 78, 79],
[ 79, 80],
[ 81, 82],
[ 82, 83],
[ 83, 84],
[ 84, 85],
[ 86, 87],
[ 87, 88],
[ 88, 89],
[ 89, 90],
[ 90, 91],
[ 91, 86],
[ 92, 93],
[ 93, 94],
[ 94, 95],
[ 95, 96],
[ 96, 97],
[ 97, 92],
[ 98, 99],
[ 99, 100],
[100, 101],
[101, 102],
[102, 103],
[103, 104],
[104, 105],
[105, 106],
[106, 107],
[107, 108],
[108, 109],
[109, 98],
[110, 111],
[111, 112],
[112, 113],
[113, 114],
[114, 115],
[115, 116],
[116, 117],
[117, 110]
],
'nJoints': 118,
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
(67, 68): {'mean': 0.012, 'std': 0.002},
(68, 69): {'mean': 0.013, 'std': 0.003},
(69, 70): {'mean': 0.014, 'std': 0.003},
(70, 71): {'mean': 0.012, 'std': 0.002},
(72, 73): {'mean': 0.014, 'std': 0.003},
(73, 74): {'mean': 0.014, 'std': 0.003},
(74, 75): {'mean': 0.015, 'std': 0.003},
(75, 76): {'mean': 0.013, 'std': 0.003},
(77, 78): {'mean': 0.014, 'std': 0.003},
(78, 79): {'mean': 0.014, 'std': 0.003},
(79, 80): {'mean': 0.015, 'std': 0.003},
(81, 82): {'mean': 0.009, 'std': 0.002},
(82, 83): {'mean': 0.010, 'std': 0.002},
(83, 84): {'mean': 0.010, 'std': 0.002},
(84, 85): {'mean': 0.010, 'std': 0.002},
(86, 87): {'mean': 0.009, 'std': 0.002},
(87, 88): {'mean': 0.009, 'std': 0.002},
(88, 89): {'mean': 0.008, 'std': 0.002},
(89, 90): {'mean': 0.008, 'std': 0.002},
(90, 91): {'mean': 0.009, 'std': 0.002},
(86, 91): {'mean': 0.008, 'std': 0.002},
(92, 93): {'mean': 0.009, 'std': 0.002},
(93, 94): {'mean': 0.009, 'std': 0.002},
(94, 95): {'mean': 0.009, 'std': 0.002},
(95, 96): {'mean': 0.009, 'std': 0.002},
(96, 97): {'mean': 0.009, 'std': 0.002},
(92, 97): {'mean': 0.009, 'std': 0.002},
(98, 99): {'mean': 0.016, 'std': 0.003},
(99, 100): {'mean': 0.013, 'std': 0.003},
(100, 101): {'mean': 0.008, 'std': 0.002},
(101, 102): {'mean': 0.008, 'std': 0.002},
(102, 103): {'mean': 0.012, 'std': 0.002},
(103, 104): {'mean': 0.014, 'std': 0.003},
(104, 105): {'mean': 0.015, 'std': 0.003},
(105, 106): {'mean': 0.012, 'std': 0.002},
(106, 107): {'mean': 0.009, 'std': 0.002},
(107, 108): {'mean': 0.009, 'std': 0.002},
(108, 109): {'mean': 0.013, 'std': 0.003},
(98, 109): {'mean': 0.016, 'std': 0.003},
(110, 111): {'mean': 0.021, 'std': 0.004},
(111, 112): {'mean': 0.009, 'std': 0.002},
(112, 113): {'mean': 0.008, 'std': 0.002},
(113, 114): {'mean': 0.019, 'std': 0.004},
(114, 115): {'mean': 0.018, 'std': 0.004},
(115, 116): {'mean': 0.008, 'std': 0.002},
(116, 117): {'mean': 0.009, 'std': 0.002},
(110, 117): {'mean': 0.020, 'std': 0.004},
}
}
CONFIG['face'] = {'nJoints': 70,
'kintree':[ [0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16], #outline (ignored)
[17,18],[18,19],[19,20],[20,21], #right eyebrow
[22,23],[23,24],[24,25],[25,26], #left eyebrow
[27,28],[28,29],[29,30], #nose upper part
[31,32],[32,33],[33,34],[34,35], #nose lower part
[36,37],[37,38],[38,39],[39,40],[40,41],[41,36], #right eye
[42,43],[43,44],[44,45],[45,46],[46,47],[47,42], #left eye
[48,49],[49,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,58],[58,59],[59,48], #Lip outline
[60,61],[61,62],[62,63],[63,64],[64,65],[65,66],[66,67],[67,60] #Lip inner line
], 'colors': ['g' for _ in range(100)]}
CONFIG['h36m'] = {
'kintree': [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [
12, 13], [8, 14], [14, 15], [15, 16]],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': [
'hip', # 0
'LHip', # 1
'LKnee', # 2
'LAnkle', # 3
'RHip', # 4
'RKnee', # 5
'RAnkle', # 6
'Spine (H36M)', # 7
'Neck', # 8
'Head (H36M)', # 9
'headtop', # 10
'LShoulder', # 11
'LElbow', # 12
'LWrist', # 13
'RShoulder', # 14
'RElbow', # 15
'RWrist', # 16
],
'nJoints': 17}
CONFIG['h36m_17'] = CONFIG['h36m']
NJOINTS_BODY = 25
NJOINTS_HAND = 21
NLIMBS_BODY = len(CONFIG['body25']['kintree'])
NLIMBS_HAND = len(CONFIG['hand']['kintree'])
CONFIG['total'] = compose(['body25', 'hand', 'hand', 'face'])
CONFIG['bodyhandface']['joint_names'] = CONFIG['body25']['joint_names']
CONFIG['keypoints2d'] = CONFIG['body25']
CONFIG['handl2d'] = CONFIG['hand']
CONFIG['handr2d'] = CONFIG['hand']
CONFIG['face2d'] = CONFIG['face']
CONFIG['mpbody'] = {}
CONFIG['mpbody']['kintree'] = [
(0, 1),
(0, 4),
(1, 2),
(2, 3),
(3, 7),
(4, 5),
(5, 6),
(6, 8),
(9, 10),
(11, 12),
(11, 13),
(11, 23),
(12, 14),
(12, 24),
(13, 15),
(14, 16),
(15, 17),
(15, 19),
(15, 21),
(16, 18),
(16, 20),
(16, 22),
(17, 19),
(18, 20),
(23, 24),
(23, 25),
(24, 26),
(25, 27),
(26, 28),
(27, 29),
(27, 31),
(28, 30),
(28, 32),
(29, 31),
(30, 32)
]
CONFIG['mpbody']['nJoints'] = 33
CONFIG['mpbody']['colors'] = ['b', 'r', 'b', 'b', 'b', 'r', 'r', 'r', 'k', 'k', 'b', 'b', 'r', 'r', 'b', 'r',
'y', 'r', 'y', 'g', 'b', 'g', 'y', 'g', 'k', 'b', 'r', 'b', 'r', 'b', 'b', 'r', 'r', 'b', 'b']
CONFIG['mpface'] = {}
CONFIG['mpface']['kintree'] = [(270, 409), (176, 149), (37, 0), (84, 17), (318, 324), (293, 334), (386, 385), (7, 163), (33, 246), (17, 314), (374, 380), (251, 389), (390, 373), (267, 269), (295, 285), (389, 356), (173, 133), (33, 7), (377, 152), (158, 157), (405, 321), (54, 103), (263, 466), (324, 308), (67, 109), (409, 291), (157, 173), (454, 323), (388, 387), (78, 191), (148, 176), (311, 310), (39, 37), (249, 390), (144, 145), (402, 318), (80, 81), (310, 415), (153, 154), (384, 398), (397, 365), (234, 127), (103, 67), (282, 295), (338, 297), (378, 400), (127, 162), (321, 375), (375, 291), (317, 402), (81, 82), (154, 155), (91, 181), (334, 296), (297, 332), (269, 270), (150, 136), (109, 10), (356, 454), (58, 132), (312, 311), (152, 148), (415, 308), (161, 160), (296, 336), (65, 55), (61, 146), (78, 95), (380, 381), (398, 362), (361, 288), (246, 161), (162, 21), (0, 267), (82, 13), (132, 93), (314, 405), (10, 338), (178, 87), (387, 386), (381, 382), (70, 63), (61, 185), (14, 317), (105, 66), (300, 293), (382, 362), (88, 178), (185, 40), (46, 53), (284, 251), (400, 377), (136, 172), (323, 361), (13, 312), (21, 54), (172, 58), (373, 374), (163, 144), (276, 283), (53, 52), (365, 379), (379, 378), (146, 91), (263, 249), (283, 282), (87, 14), (145, 153), (155, 133), (93, 234), (66, 107), (95, 88), (159, 158), (52, 65), (332, 284), (40, 39), (191, 80), (63, 105), (181, 84), (466, 388), (149, 150), (288, 397), (160, 159), (385, 384)]
CONFIG['mpface']['nJoints'] = 468
CONFIG['mptotal'] = compose(['mpbody', 'hand', 'hand', 'mpface'])
CONFIG['bodyhandmpface'] = compose(['body25', 'hand', 'hand', 'mpface'])
CONFIG['iris'] = {
'nJoints': 10,
'kintree': [[0, 1], [1, 2], [2, 3], [3, 4]]
}
CONFIG['onepoint'] = {
'nJoints': 1,
'kintree': []
}
CONFIG['up'] = {
'nJoints': 79,
'kintree': []
}
CONFIG['ochuman'] = {
'nJoints': 19,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13], [14, 17], [15, 18], [17, 16], [18, 16]]
}
CONFIG['mpii'] = {
'nJoints': 16,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [2, 6], [3, 6], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [7, 12], [7, 13], \
[13, 14], [14, 15]],
'joint_names': ['rank', 'rkne', 'rhip', 'lhip', 'lkne', 'lank', 'pelv', 'thrx', 'neck', 'head', 'rwri', 'relb', 'rsho', 'lsho', 'lelb', 'lwri'],
}
CONFIG['h36mltri_17'] = {
'kintree': [(0, 1), (1, 2), (2, 6), (5, 4), (4, 3), (3, 6), (6, 7), (7, 8), (8, 16), (9, 16), (8, 12), (11, 12), (10, 11), (8, 13), (13, 14), (14, 15)],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': CONFIG['mpii']['joint_names'] + ['Neck/Nose'],
'nJoints': 17}
def getKintree(name='total'):
if name == 'total':
# order: body25, face, rhand, lhand
kintree = CONFIG['body25']['kintree'] + CONFIG['hand']['kintree'] + CONFIG['hand']['kintree'] + CONFIG['face']['kintree']
kintree = np.array(kintree)
kintree[NLIMBS_BODY:NLIMBS_BODY + NLIMBS_HAND] += NJOINTS_BODY
kintree[NLIMBS_BODY + NLIMBS_HAND:NLIMBS_BODY + 2*NLIMBS_HAND] += NJOINTS_BODY + NJOINTS_HAND
kintree[NLIMBS_BODY + 2*NLIMBS_HAND:] += NJOINTS_BODY + 2*NJOINTS_HAND
elif name == 'smplh':
# order: body25, lhand, rhand
kintree = CONFIG['body25']['kintree'] + CONFIG['hand']['kintree'] + CONFIG['hand']['kintree']
kintree = np.array(kintree)
kintree[NLIMBS_BODY:NLIMBS_BODY + NLIMBS_HAND] += NJOINTS_BODY
kintree[NLIMBS_BODY + NLIMBS_HAND:NLIMBS_BODY + 2*NLIMBS_HAND] += NJOINTS_BODY + NJOINTS_HAND
return kintree | null |
13,123 | import numpy as np
COCO17_IN_BODY25 = [0,16,15,18,17,5,2,6,3,7,4,12,9,13,10,14,11]
def coco17tobody25(points2d):
dim = 3
if len(points2d.shape) == 2:
points2d = points2d[None, :, :]
dim = 2
kpts = np.zeros((points2d.shape[0], 25, 3))
kpts[:, COCO17_IN_BODY25, :2] = points2d[:, :, :2]
kpts[:, COCO17_IN_BODY25, 2:3] = points2d[:, :, 2:3]
kpts[:, 8, :2] = kpts[:, [9, 12], :2].mean(axis=1)
kpts[:, 8, 2] = kpts[:, [9, 12], 2].min(axis=1)
kpts[:, 1, :2] = kpts[:, [2, 5], :2].mean(axis=1)
kpts[:, 1, 2] = kpts[:, [2, 5], 2].min(axis=1)
if dim == 2:
kpts = kpts[0]
return kpts | null |
13,124 | import os
from os.path import join
from glob import glob
import cv2
import os, sys
import numpy as np
from ..mytools.camera_utils import read_camera, get_fundamental_matrix, Undistort
from ..mytools import FileWriter, read_annot, getFileList, save_json
from ..mytools.reader import read_keypoints3d, read_json, read_smpl
from ..mytools.file_utils import merge_params, select_nf, save_annot
def crop_image(img, annot, vis_2d=False, config={}, crop_square=True):
for det in annot:
bbox = det['bbox']
l, t, r, b = det['bbox'][:4]
if crop_square:
if b - t > r - l:
diff = (b - t) - (r - l)
l -= diff//2
r += diff//2
else:
diff = (r - l) - (b - t)
t -= diff//2
b += diff//2
l = max(0, int(l+0.5))
t = max(0, int(t+0.5))
r = min(img.shape[1], int(r+0.5))
b = min(img.shape[0], int(b+0.5))
det['bbox'][:4] = [l, t, r, b]
if vis_2d:
crop_img = img.copy()
from easymocap.mytools import plot_keypoints
plot_keypoints(crop_img, det['keypoints'], pid=det['id'],
config=config, use_limb_color=True, lw=2)
else:
crop_img = img
crop_img = crop_img[t:b, l:r, :]
if crop_square:
crop_img = cv2.resize(crop_img, (256, 256))
else:
crop_img = cv2.resize(crop_img, (128, 256))
det['crop'] = crop_img
det['img'] = img
return 0 | null |
13,125 | import os
from os.path import join
from glob import glob
import cv2
import os, sys
import numpy as np
from ..mytools.camera_utils import read_camera, get_fundamental_matrix, Undistort
from ..mytools import FileWriter, read_annot, getFileList, save_json
from ..mytools.reader import read_keypoints3d, read_json, read_smpl
from ..mytools.file_utils import merge_params, select_nf, save_annot
def read_camera(intri_name, extri_name, cam_names=[]):
assert os.path.exists(intri_name), intri_name
assert os.path.exists(extri_name), extri_name
intri = FileStorage(intri_name)
extri = FileStorage(extri_name)
cams, P = {}, {}
cam_names = intri.read('names', dt='list')
for cam in cam_names:
# 内参只读子码流的
cams[cam] = {}
cams[cam]['K'] = intri.read('K_{}'.format( cam))
cams[cam]['invK'] = np.linalg.inv(cams[cam]['K'])
H = intri.read('H_{}'.format(cam), dt='int')
W = intri.read('W_{}'.format(cam), dt='int')
if H is None or W is None:
print('[camera] no H or W for {}'.format(cam))
H, W = -1, -1
cams[cam]['H'] = H
cams[cam]['W'] = W
Rvec = extri.read('R_{}'.format(cam))
Tvec = extri.read('T_{}'.format(cam))
assert Rvec is not None, cam
R = cv2.Rodrigues(Rvec)[0]
RT = np.hstack((R, Tvec))
cams[cam]['RT'] = RT
cams[cam]['R'] = R
cams[cam]['Rvec'] = Rvec
cams[cam]['T'] = Tvec
cams[cam]['center'] = - Rvec.T @ Tvec
P[cam] = cams[cam]['K'] @ cams[cam]['RT']
cams[cam]['P'] = P[cam]
cams[cam]['dist'] = intri.read('dist_{}'.format(cam))
if cams[cam]['dist'] is None:
cams[cam]['dist'] = intri.read('D_{}'.format(cam))
if cams[cam]['dist'] is None:
print('[camera] no dist for {}'.format(cam))
cams['basenames'] = cam_names
return cams
def load_cameras(path):
# 读入相机参数
intri_name = join(path, 'intri.yml')
extri_name = join(path, 'extri.yml')
if os.path.exists(intri_name) and os.path.exists(extri_name):
cameras = read_camera(intri_name, extri_name)
cams = cameras.pop('basenames')
else:
print('\n\n!!!there is no camera parameters, maybe bug: \n', intri_name, extri_name, '\n')
cameras = None
return cameras | null |
13,126 | import os
from os.path import join
from glob import glob
import cv2
import os, sys
import numpy as np
from ..mytools.camera_utils import read_camera, get_fundamental_matrix, Undistort
from ..mytools import FileWriter, read_annot, getFileList, save_json
from ..mytools.reader import read_keypoints3d, read_json, read_smpl
from ..mytools.file_utils import merge_params, select_nf, save_annot
def numpy_to_list(array, precision=3):
return np.round(array, precision).tolist() | null |
13,127 |
def load_weight_pose2d(model, opts):
if model == 'smpl':
weight = {
'k2d': 2e-4,
'init_poses': 1e-3, 'init_shapes': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1,
}
elif model == 'smplh':
raise NotImplementedError
elif model == 'smplx':
raise NotImplementedError
else:
weight = {}
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight | null |
13,128 | from ..pyfitting import optimizeShape, optimizePose2D, optimizePose3D
from ..mytools import Timer
from ..dataset import CONFIG
from .weight import load_weight_pose, load_weight_shape
from .config import Config
class Config:
OPT_R = False
OPT_T = False
OPT_POSE = False
OPT_SHAPE = False
OPT_HAND = False
OPT_EXPR = False
ROBUST_3D_ = False
ROBUST_3D = False
verbose = False
model = 'smpl'
device = None
def __init__(self, args=None) -> None:
if args is not None:
self.verbose = args.verbose
self.model = args.model
self.ROBUST_3D_ = args.robust3d
def multi_stage_optimize2d(body_model, params, kp2ds, bboxes, Pall, weight={}, args=None):
cfg = Config(args)
cfg.device = body_model.device
cfg.device = body_model.device
cfg.model = body_model.model_type
with Timer('Optimize global RT'):
cfg.OPT_R = True
cfg.OPT_T = True
params = optimizePose2D(body_model, params, bboxes, kp2ds, Pall, weight=weight, cfg=cfg)
with Timer('Optimize 2D Pose/{} frames'.format(kp2ds.shape[0])):
cfg.OPT_POSE = True
cfg.OPT_SHAPE = True
# bboxes => (nFrames, nViews, 5), keypoints2d => (nFrames, nViews, nJoints, 3)
params = optimizePose2D(body_model, params, bboxes, kp2ds, Pall, weight=weight, cfg=cfg)
return params | null |
13,129 | from ..pyfitting import optimizeShape, optimizePose2D, optimizePose3D
from ..mytools import Timer
from ..dataset import CONFIG
from .weight import load_weight_pose, load_weight_shape
from .config import Config
def multi_stage_optimize(body_model, params, kp3ds, kp2ds=None, bboxes=None, Pall=None, weight={}, cfg=None):
with Timer('Optimize global RT'):
cfg.OPT_R = True
cfg.OPT_T = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
# params = optimizePose(body_model, params, kp3ds, weight_loss=weight, kintree=config['kintree'], cfg=cfg)
with Timer('Optimize 3D Pose/{} frames'.format(kp3ds.shape[0])):
cfg.OPT_POSE = True
cfg.ROBUST_3D = False
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if False:
cfg.ROBUST_3D = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if cfg.model in ['smplh', 'smplx']:
cfg.OPT_HAND = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if cfg.model == 'smplx':
cfg.OPT_EXPR = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if kp2ds is not None:
with Timer('Optimize 2D Pose/{} frames'.format(kp3ds.shape[0])):
# bboxes => (nFrames, nViews, 5), keypoints2d => (nFrames, nViews, nJoints, 3)
params = optimizePose2D(body_model, params, bboxes, kp2ds, Pall, weight=weight, cfg=cfg)
return params
def load_weight_shape(model, opts):
if model in ['smpl', 'smplh', 'smplx']:
weight = {'s3d': 1., 'reg_shapes': 5e-3}
elif model == 'mano':
weight = {'s3d': 1e2, 'reg_shapes': 5e-5}
else:
raise NotImplementedError
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight
def load_weight_pose(model, opts):
if model == 'smpl':
weight = {
'k3d': 1., 'reg_poses_zero': 1e-2, 'smooth_body': 5e0,
'smooth_poses': 1e0, 'reg_poses': 1e-3,
'k2d': 1e-4
}
elif model == 'smplh':
weight = {
'k3d': 1., 'k3d_hand': 5.,
'reg_poses_zero': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1, 'smooth_hand': 1e-3,
'reg_hand': 1e-4,
'k2d': 1e-4
}
elif model == 'smplx':
weight = {
'k3d': 1., 'k3d_hand': 5., 'k3d_face': 2.,
'reg_poses_zero': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1, 'smooth_hand': 1e-3,
'reg_hand': 1e-4, 'reg_expr': 1e-2, 'reg_head': 1e-2,
'k2d': 1e-4
}
elif model == 'mano':
weight = {
'k3d': 1e2, 'k2d': 2e-3,
'reg_poses': 1e-3, 'smooth_body': 1e2,
# 'collision': 1 # If the frame number is too large (more than 1000), then GPU oom
}
# weight = {
# 'k3d': 1., 'k2d': 1e-4,
# 'reg_poses': 1e-4, 'smooth_body': 0
# }
else:
print(model)
raise NotImplementedError
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight
class Config:
OPT_R = False
OPT_T = False
OPT_POSE = False
OPT_SHAPE = False
OPT_HAND = False
OPT_EXPR = False
ROBUST_3D_ = False
ROBUST_3D = False
verbose = False
model = 'smpl'
device = None
def __init__(self, args=None) -> None:
if args is not None:
self.verbose = args.verbose
self.model = args.model
self.ROBUST_3D_ = args.robust3d
def smpl_from_keypoints3d2d(body_model, kp3ds, kp2ds, bboxes, Pall, config, args,
weight_shape=None, weight_pose=None):
model_type = body_model.model_type
params_init = body_model.init_params(nFrames=1)
if weight_shape is None:
weight_shape = load_weight_shape(model_type, args.opts)
if model_type in ['smpl', 'smplh', 'smplx']:
# when use SMPL model, optimize the shape only with first 1-14 limbs,
# don't use (nose, neck)
params_shape = optimizeShape(body_model, params_init, kp3ds,
weight_loss=weight_shape, kintree=CONFIG['body15']['kintree'][1:])
else:
params_shape = optimizeShape(body_model, params_init, kp3ds,
weight_loss=weight_shape, kintree=config['kintree'])
# optimize 3D pose
cfg = Config(args)
cfg.device = body_model.device
params = body_model.init_params(nFrames=kp3ds.shape[0])
params['shapes'] = params_shape['shapes'].copy()
if weight_pose is None:
weight_pose = load_weight_pose(model_type, args.opts)
# We divide this step to two functions, because we can have different initialization method
params = multi_stage_optimize(body_model, params, kp3ds, kp2ds, bboxes, Pall, weight_pose, cfg)
return params | null |
13,130 | from ..pyfitting import optimizeShape, optimizePose2D, optimizePose3D
from ..mytools import Timer
from ..dataset import CONFIG
from .weight import load_weight_pose, load_weight_shape
from .config import Config
def multi_stage_optimize(body_model, params, kp3ds, kp2ds=None, bboxes=None, Pall=None, weight={}, cfg=None):
with Timer('Optimize global RT'):
cfg.OPT_R = True
cfg.OPT_T = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
# params = optimizePose(body_model, params, kp3ds, weight_loss=weight, kintree=config['kintree'], cfg=cfg)
with Timer('Optimize 3D Pose/{} frames'.format(kp3ds.shape[0])):
cfg.OPT_POSE = True
cfg.ROBUST_3D = False
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if False:
cfg.ROBUST_3D = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if cfg.model in ['smplh', 'smplx']:
cfg.OPT_HAND = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if cfg.model == 'smplx':
cfg.OPT_EXPR = True
params = optimizePose3D(body_model, params, kp3ds, weight=weight, cfg=cfg)
if kp2ds is not None:
with Timer('Optimize 2D Pose/{} frames'.format(kp3ds.shape[0])):
# bboxes => (nFrames, nViews, 5), keypoints2d => (nFrames, nViews, nJoints, 3)
params = optimizePose2D(body_model, params, bboxes, kp2ds, Pall, weight=weight, cfg=cfg)
return params
def load_weight_shape(model, opts):
if model in ['smpl', 'smplh', 'smplx']:
weight = {'s3d': 1., 'reg_shapes': 5e-3}
elif model == 'mano':
weight = {'s3d': 1e2, 'reg_shapes': 5e-5}
else:
raise NotImplementedError
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight
def load_weight_pose(model, opts):
if model == 'smpl':
weight = {
'k3d': 1., 'reg_poses_zero': 1e-2, 'smooth_body': 5e0,
'smooth_poses': 1e0, 'reg_poses': 1e-3,
'k2d': 1e-4
}
elif model == 'smplh':
weight = {
'k3d': 1., 'k3d_hand': 5.,
'reg_poses_zero': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1, 'smooth_hand': 1e-3,
'reg_hand': 1e-4,
'k2d': 1e-4
}
elif model == 'smplx':
weight = {
'k3d': 1., 'k3d_hand': 5., 'k3d_face': 2.,
'reg_poses_zero': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1, 'smooth_hand': 1e-3,
'reg_hand': 1e-4, 'reg_expr': 1e-2, 'reg_head': 1e-2,
'k2d': 1e-4
}
elif model == 'mano':
weight = {
'k3d': 1e2, 'k2d': 2e-3,
'reg_poses': 1e-3, 'smooth_body': 1e2,
# 'collision': 1 # If the frame number is too large (more than 1000), then GPU oom
}
# weight = {
# 'k3d': 1., 'k2d': 1e-4,
# 'reg_poses': 1e-4, 'smooth_body': 0
# }
else:
print(model)
raise NotImplementedError
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight
class Config:
OPT_R = False
OPT_T = False
OPT_POSE = False
OPT_SHAPE = False
OPT_HAND = False
OPT_EXPR = False
ROBUST_3D_ = False
ROBUST_3D = False
verbose = False
model = 'smpl'
device = None
def __init__(self, args=None) -> None:
if args is not None:
self.verbose = args.verbose
self.model = args.model
self.ROBUST_3D_ = args.robust3d
def smpl_from_keypoints3d(body_model, kp3ds, config, args,
weight_shape=None, weight_pose=None):
model_type = body_model.model_type
params_init = body_model.init_params(nFrames=1)
if weight_shape is None:
weight_shape = load_weight_shape(model_type, args.opts)
if model_type in ['smpl', 'smplh', 'smplx']:
# when use SMPL model, optimize the shape only with first 1-14 limbs,
# don't use (nose, neck)
params_shape = optimizeShape(body_model, params_init, kp3ds,
weight_loss=weight_shape, kintree=CONFIG['body15']['kintree'][1:])
else:
params_shape = optimizeShape(body_model, params_init, kp3ds,
weight_loss=weight_shape, kintree=config['kintree'])
# optimize 3D pose
cfg = Config(args)
cfg.device = body_model.device
cfg.model_type = model_type
params = body_model.init_params(nFrames=kp3ds.shape[0])
params['shapes'] = params_shape['shapes'].copy()
if weight_pose is None:
weight_pose = load_weight_pose(model_type, args.opts)
# We divide this step to two functions, because we can have different initialization method
params = multi_stage_optimize(body_model, params, kp3ds, None, None, None, weight_pose, cfg)
return params | null |
13,131 | from .yacs import CfgNode as CN
import importlib
def load_object(module_name, module_args, **extra_args):
def load_renderer(cfg, network):
if cfg.split == 'mesh':
return load_object(cfg.renderer_mesh_module, cfg.renderer_mesh_args, net=network)
else:
return load_object(cfg.renderer_module, cfg.renderer_args, net=network) | null |
13,132 | from .yacs import CfgNode as CN
import importlib
def load_object(module_name, module_args, **extra_args):
module_path = '.'.join(module_name.split('.')[:-1])
module = importlib.import_module(module_path)
name = module_name.split('.')[-1]
obj = getattr(module, name)(**extra_args, **module_args)
return obj
def load_visualizer(cfg):
if cfg.split == 'mesh':
return load_object(cfg.visualizer_mesh_module, cfg.visualizer_mesh_args)
else:
return load_object(cfg.visualizer_module, cfg.visualizer_args) | null |
13,133 | from .yacs import CfgNode as CN
import importlib
def load_object(module_name, module_args, **extra_args):
module_path = '.'.join(module_name.split('.')[:-1])
module = importlib.import_module(module_path)
name = module_name.split('.')[-1]
obj = getattr(module, name)(**extra_args, **module_args)
return obj
def load_evaluator(cfg):
if cfg.evaluator_args.skip_eval:
return None
else:
return load_object(cfg.evaluator_module, cfg.evaluator_args) | null |
13,134 | from .yacs import CfgNode as CN
class Config:
def load_from_args(cls, default_cfg='config/vis/base.yml'):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default=default_cfg)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--debug', action='store_true')
parser.add_argument("--opts", default=[], nargs='+')
args = parser.parse_args()
return cls.load(filename=args.cfg, opts=args.opts, debug=args.debug)
def load_args(cls, usage=None):
import argparse
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('--cfg', type=str, default='config/vis/base.yml')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--slurm', action='store_true')
parser.add_argument("opts", default=None, nargs='+')
args = parser.parse_args()
return args, cls.load(filename=args.cfg, opts=args.opts, debug=args.debug)
def load(cls, filename=None, opts=[], debug=False) -> CN:
cfg = CN()
cfg = cls.init(cfg)
if filename is not None:
cfg.merge_from_file(filename)
if len(opts) > 0:
cfg.merge_from_list(opts)
cls.parse(cfg)
if debug:
cls.print(cfg)
return cfg
def init(cfg):
return cfg
def parse(cfg):
pass
def print(cfg):
print('[Info] --------------')
print('[Info] Configuration:')
print('[Info] --------------')
print(cfg)
import importlib
def load_config_from_index(config_dict, mode):
if isinstance(config_dict, str):
config_dict = Config.load(config_dict, [])
config_ori = config_dict[mode]
_cfg = CN()
if 'exp' in config_ori.keys():
config_ = config_dict[config_ori.pop('exp')]
opts = config_.get('opts', []) + config_ori.get('opts', [])
config = config_
config.opts = opts
else:
config = config_ori
_cfg['parents'] = []
opts = config.pop('opts', [])
for key in list(config.keys()):
if config[key].endswith('.yml'):
_cfg['parents'].append(config[key])
tmp_name = 'tmp_config.yml'
print(_cfg, file=open(tmp_name, 'w'))
print(config)
config['alias'] = config_ori['alias']
return Config.load(tmp_name, opts=opts), config | null |
13,135 | import copy
import io
import logging
import os
from ast import literal_eval
import yaml
try:
_FILE_TYPES = (file, io.IOBase)
_PY2 = True
except NameError:
_FILE_TYPES = (io.IOBase,)
def _load_cfg_from_file(file_obj):
"""Load a config from a YAML file or a Python source file."""
_, file_extension = os.path.splitext(file_obj.name)
if file_extension in _YAML_EXTS:
return _load_cfg_from_yaml_str(file_obj.read())
elif file_extension in _PY_EXTS:
return _load_cfg_py_source(file_obj.name)
else:
raise Exception(
"Attempt to load from an unsupported file type {}; "
"only {} are supported".format(file_obj, _YAML_EXTS.union(_PY_EXTS))
)
def _load_cfg_from_yaml_str(str_obj):
"""Load a config from a YAML string encoding."""
cfg_as_dict = yaml.safe_load(str_obj)
return CfgNode(cfg_as_dict)
def _assert_with_logging(cond, msg):
if not cond:
logger.debug(msg)
assert cond, msg
The provided code snippet includes necessary dependencies for implementing the `load_cfg` function. Write a Python function `def load_cfg(cfg_file_obj_or_str)` to solve the following problem:
Load a cfg. Supports loading from: - A file object backed by a YAML file - A file object backed by a Python source file that exports an attribute "cfg" that is either a dict or a CfgNode - A string that can be parsed as valid YAML
Here is the function:
def load_cfg(cfg_file_obj_or_str):
"""Load a cfg. Supports loading from:
- A file object backed by a YAML file
- A file object backed by a Python source file that exports an attribute
"cfg" that is either a dict or a CfgNode
- A string that can be parsed as valid YAML
"""
_assert_with_logging(
isinstance(cfg_file_obj_or_str, _FILE_TYPES + (str,)),
"Expected first argument to be of type {} or {}, but it was {}".format(
_FILE_TYPES, str, type(cfg_file_obj_or_str)
),
)
if isinstance(cfg_file_obj_or_str, str):
return _load_cfg_from_yaml_str(cfg_file_obj_or_str)
elif isinstance(cfg_file_obj_or_str, _FILE_TYPES):
return _load_cfg_from_file(cfg_file_obj_or_str)
else:
raise NotImplementedError("Impossible to reach here (unless there's a bug)") | Load a cfg. Supports loading from: - A file object backed by a YAML file - A file object backed by a Python source file that exports an attribute "cfg" that is either a dict or a CfgNode - A string that can be parsed as valid YAML |
13,136 | import copy
import io
import logging
import os
from ast import literal_eval
import yaml
_VALID_TYPES = {tuple, list, str, int, float, bool}
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
IMMUTABLE = "__immutable__"
DEPRECATED_KEYS = "__deprecated_keys__"
RENAMED_KEYS = "__renamed_keys__"
def __init__(self, init_dict=None, key_list=None):
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
for k, v in init_dict.items():
if type(v) is dict:
# Convert dict to CfgNode
init_dict[k] = CfgNode(v, key_list=key_list + [k])
if '_parent_' in v.keys():
parent_ = CfgNode()
parent_.merge_from_file(v['_parent_'])
init_dict[k].pop('_parent_')
parent_.merge_from_other_cfg(init_dict[k])
init_dict[k] = parent_
if '_parents_' in v.keys():
parent_ = CfgNode()
for parent in v['_parents_']:
parent_.merge_from_file(parent)
init_dict[k].pop('_parents_')
parent_.merge_from_other_cfg(init_dict[k])
init_dict[k] = parent_
if '_const_' in v.keys() and v['_const_']:
init_dict[k].__dict__[CfgNode.IMMUTABLE] = True
init_dict[k].pop('_const_')
elif type(v) is str and v.startswith('_file_/'):
filename = v.replace('_file_/', '')
init_dict[k] = CfgNode()
init_dict[k].merge_from_file(filename)
else:
# Check for valid leaf type or nested CfgNode
_assert_with_logging(
_valid_type(v, allow_cfg_node=True),
"Key {} with value {} is not a valid type; valid types: {}".format(
".".join(key_list + [k]), type(v), _VALID_TYPES
),
)
super(CfgNode, self).__init__(init_dict)
# Manage if the CfgNode is frozen or not
self.__dict__[CfgNode.IMMUTABLE] = False
# Deprecated options
# If an option is removed from the code and you don't want to break existing
# yaml configs, you can add the full config key as a string to the set below.
self.__dict__[CfgNode.DEPRECATED_KEYS] = set()
# Renamed options
# If you rename a config option, record the mapping from the old name to the new
# name in the dictionary below. Optionally, if the type also changed, you can
# make the value a tuple that specifies first the renamed key and then
# instructions for how to edit the config file.
self.__dict__[CfgNode.RENAMED_KEYS] = {
# 'EXAMPLE.OLD.KEY': 'EXAMPLE.NEW.KEY', # Dummy example to follow
# 'EXAMPLE.OLD.KEY': ( # A more complex example to follow
# 'EXAMPLE.NEW.KEY',
# "Also convert to a tuple, e.g., 'foo' -> ('foo',) or "
# + "'foo:bar' -> ('foo', 'bar')"
# ),
}
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if self.is_frozen():
raise AttributeError(
"Attempted to set {} to {}, but CfgNode is immutable".format(
name, value
)
)
_assert_with_logging(
name not in self.__dict__,
"Invalid attempt to modify internal CfgNode state: {}".format(name),
)
_assert_with_logging(
_valid_type(value, allow_cfg_node=True),
"Invalid type {} for key {}; valid types = {}".format(
type(value), name, _VALID_TYPES
),
)
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
if len(self.keys()) == 0:
return "{}"
for k, v in self.items():
seperator = "\n" if isinstance(v, CfgNode) and len(v.keys()) > 0 else " "
if isinstance(v, float):
attr_str = "{}:{}{:f}".format(str(k), seperator, v)
else:
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 4)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def dump(self):
"""Dump to a string."""
self_as_dict = _to_dict(self)
return yaml.safe_dump(self_as_dict)
def merge_from_file(self, cfg_filename):
"""Load a yaml config file and merge it this CfgNode."""
with open(cfg_filename, "r", encoding='utf8') as f:
cfg = load_cfg(f)
if 'parent' in cfg.keys():
if cfg.parent != 'none':
print('[Config] merge from parent file: {}'.format(cfg.parent))
self.merge_from_file(cfg.parent)
if 'parents' in cfg.keys():
for parent in cfg['parents']:
print('[Config] merge from parent file: {}'.format(parent))
self.merge_from_file(parent)
cfg.pop('parents')
self.merge_from_other_cfg(cfg)
def merge_from_other_cfg(self, cfg_other):
"""Merge `cfg_other` into this CfgNode."""
_merge_a_into_b(cfg_other, self, self, [])
def merge_from_list(self, cfg_list):
"""Merge config (keys, values) in a list (e.g., from command line) into
this CfgNode. For example, `cfg_list = ['FOO.BAR', 0.5]`.
"""
_assert_with_logging(
len(cfg_list) % 2 == 0,
"Override list has odd length: {}; it must be a list of pairs".format(
cfg_list
),
)
root = self
cfg_list_new = []
alias = self.pop('_alias_', {})
for i in range(len(cfg_list)//2):
if cfg_list[2*i] in alias.keys():
for name in alias[cfg_list[2*i]]:
cfg_list_new.append(name)
cfg_list_new.append(cfg_list[2*i+1])
else:
cfg_list_new.append(cfg_list[2*i])
cfg_list_new.append(cfg_list[2*i+1])
cfg_list = cfg_list_new
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
if root.key_is_deprecated(full_key):
continue
if root.key_is_renamed(full_key):
root.raise_key_rename_error(full_key)
key_list = full_key.split(".")
d = self
for subkey in key_list[:-1]:
_assert_with_logging(
subkey in d, "Non-existent key: {}".format(full_key)
)
d = d[subkey]
subkey = key_list[-1]
value = _decode_cfg_value(v)
if subkey not in d.keys():
logger.warning("Key is not in the template: {}".format(full_key))
d[subkey] = value
else:
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key)
d[subkey] = value
def freeze(self):
"""Make this CfgNode and all of its children immutable."""
self._immutable(True)
def defrost(self):
"""Make this CfgNode and all of its children mutable."""
self._immutable(False)
def is_frozen(self):
"""Return mutability."""
return self.__dict__[CfgNode.IMMUTABLE]
def _immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested CfgNodes.
"""
self.__dict__[CfgNode.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
for v in self.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
def clone(self):
"""Recursively copy this CfgNode."""
return copy.deepcopy(self)
def register_deprecated_key(self, key):
"""Register key (e.g. `FOO.BAR`) a deprecated option. When merging deprecated
keys a warning is generated and the key is ignored.
"""
_assert_with_logging(
key not in self.__dict__[CfgNode.DEPRECATED_KEYS],
"key {} is already registered as a deprecated key".format(key),
)
self.__dict__[CfgNode.DEPRECATED_KEYS].add(key)
def register_renamed_key(self, old_name, new_name, message=None):
"""Register a key as having been renamed from `old_name` to `new_name`.
When merging a renamed key, an exception is thrown alerting to user to
the fact that the key has been renamed.
"""
_assert_with_logging(
old_name not in self.__dict__[CfgNode.RENAMED_KEYS],
"key {} is already registered as a renamed cfg key".format(old_name),
)
value = new_name
if message:
value = (new_name, message)
self.__dict__[CfgNode.RENAMED_KEYS][old_name] = value
def key_is_deprecated(self, full_key):
"""Test if a key is deprecated."""
if full_key in self.__dict__[CfgNode.DEPRECATED_KEYS]:
logger.warning("Deprecated config key (ignoring): {}".format(full_key))
return True
return False
def key_is_renamed(self, full_key):
"""Test if a key is renamed."""
return full_key in self.__dict__[CfgNode.RENAMED_KEYS]
def raise_key_rename_error(self, full_key):
new_key = self.__dict__[CfgNode.RENAMED_KEYS][full_key]
if isinstance(new_key, tuple):
msg = " Note: " + new_key[1]
new_key = new_key[0]
else:
msg = ""
raise KeyError(
"Key {} was renamed to {}; please update your config.{}".format(
full_key, new_key, msg
)
)
def _valid_type(value, allow_cfg_node=False):
return (type(value) in _VALID_TYPES) or (allow_cfg_node and type(value) == CfgNode)
def _assert_with_logging(cond, msg):
if not cond:
logger.debug(msg)
assert cond, msg
The provided code snippet includes necessary dependencies for implementing the `_to_dict` function. Write a Python function `def _to_dict(cfg_node)` to solve the following problem:
Recursively convert all CfgNode objects to dict objects.
Here is the function:
def _to_dict(cfg_node):
"""Recursively convert all CfgNode objects to dict objects."""
def convert_to_dict(cfg_node, key_list):
if not isinstance(cfg_node, CfgNode):
_assert_with_logging(
_valid_type(cfg_node),
"Key {} with value {} is not a valid type; valid types: {}".format(
".".join(key_list), type(cfg_node), _VALID_TYPES
),
)
return cfg_node
else:
cfg_dict = dict(cfg_node)
for k, v in cfg_dict.items():
cfg_dict[k] = convert_to_dict(v, key_list + [k])
return cfg_dict
return convert_to_dict(cfg_node, []) | Recursively convert all CfgNode objects to dict objects. |
13,137 | import copy
import io
import logging
import os
from ast import literal_eval
import yaml
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
IMMUTABLE = "__immutable__"
DEPRECATED_KEYS = "__deprecated_keys__"
RENAMED_KEYS = "__renamed_keys__"
def __init__(self, init_dict=None, key_list=None):
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
for k, v in init_dict.items():
if type(v) is dict:
# Convert dict to CfgNode
init_dict[k] = CfgNode(v, key_list=key_list + [k])
if '_parent_' in v.keys():
parent_ = CfgNode()
parent_.merge_from_file(v['_parent_'])
init_dict[k].pop('_parent_')
parent_.merge_from_other_cfg(init_dict[k])
init_dict[k] = parent_
if '_parents_' in v.keys():
parent_ = CfgNode()
for parent in v['_parents_']:
parent_.merge_from_file(parent)
init_dict[k].pop('_parents_')
parent_.merge_from_other_cfg(init_dict[k])
init_dict[k] = parent_
if '_const_' in v.keys() and v['_const_']:
init_dict[k].__dict__[CfgNode.IMMUTABLE] = True
init_dict[k].pop('_const_')
elif type(v) is str and v.startswith('_file_/'):
filename = v.replace('_file_/', '')
init_dict[k] = CfgNode()
init_dict[k].merge_from_file(filename)
else:
# Check for valid leaf type or nested CfgNode
_assert_with_logging(
_valid_type(v, allow_cfg_node=True),
"Key {} with value {} is not a valid type; valid types: {}".format(
".".join(key_list + [k]), type(v), _VALID_TYPES
),
)
super(CfgNode, self).__init__(init_dict)
# Manage if the CfgNode is frozen or not
self.__dict__[CfgNode.IMMUTABLE] = False
# Deprecated options
# If an option is removed from the code and you don't want to break existing
# yaml configs, you can add the full config key as a string to the set below.
self.__dict__[CfgNode.DEPRECATED_KEYS] = set()
# Renamed options
# If you rename a config option, record the mapping from the old name to the new
# name in the dictionary below. Optionally, if the type also changed, you can
# make the value a tuple that specifies first the renamed key and then
# instructions for how to edit the config file.
self.__dict__[CfgNode.RENAMED_KEYS] = {
# 'EXAMPLE.OLD.KEY': 'EXAMPLE.NEW.KEY', # Dummy example to follow
# 'EXAMPLE.OLD.KEY': ( # A more complex example to follow
# 'EXAMPLE.NEW.KEY',
# "Also convert to a tuple, e.g., 'foo' -> ('foo',) or "
# + "'foo:bar' -> ('foo', 'bar')"
# ),
}
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if self.is_frozen():
raise AttributeError(
"Attempted to set {} to {}, but CfgNode is immutable".format(
name, value
)
)
_assert_with_logging(
name not in self.__dict__,
"Invalid attempt to modify internal CfgNode state: {}".format(name),
)
_assert_with_logging(
_valid_type(value, allow_cfg_node=True),
"Invalid type {} for key {}; valid types = {}".format(
type(value), name, _VALID_TYPES
),
)
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
if len(self.keys()) == 0:
return "{}"
for k, v in self.items():
seperator = "\n" if isinstance(v, CfgNode) and len(v.keys()) > 0 else " "
if isinstance(v, float):
attr_str = "{}:{}{:f}".format(str(k), seperator, v)
else:
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 4)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def dump(self):
"""Dump to a string."""
self_as_dict = _to_dict(self)
return yaml.safe_dump(self_as_dict)
def merge_from_file(self, cfg_filename):
"""Load a yaml config file and merge it this CfgNode."""
with open(cfg_filename, "r", encoding='utf8') as f:
cfg = load_cfg(f)
if 'parent' in cfg.keys():
if cfg.parent != 'none':
print('[Config] merge from parent file: {}'.format(cfg.parent))
self.merge_from_file(cfg.parent)
if 'parents' in cfg.keys():
for parent in cfg['parents']:
print('[Config] merge from parent file: {}'.format(parent))
self.merge_from_file(parent)
cfg.pop('parents')
self.merge_from_other_cfg(cfg)
def merge_from_other_cfg(self, cfg_other):
"""Merge `cfg_other` into this CfgNode."""
_merge_a_into_b(cfg_other, self, self, [])
def merge_from_list(self, cfg_list):
"""Merge config (keys, values) in a list (e.g., from command line) into
this CfgNode. For example, `cfg_list = ['FOO.BAR', 0.5]`.
"""
_assert_with_logging(
len(cfg_list) % 2 == 0,
"Override list has odd length: {}; it must be a list of pairs".format(
cfg_list
),
)
root = self
cfg_list_new = []
alias = self.pop('_alias_', {})
for i in range(len(cfg_list)//2):
if cfg_list[2*i] in alias.keys():
for name in alias[cfg_list[2*i]]:
cfg_list_new.append(name)
cfg_list_new.append(cfg_list[2*i+1])
else:
cfg_list_new.append(cfg_list[2*i])
cfg_list_new.append(cfg_list[2*i+1])
cfg_list = cfg_list_new
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
if root.key_is_deprecated(full_key):
continue
if root.key_is_renamed(full_key):
root.raise_key_rename_error(full_key)
key_list = full_key.split(".")
d = self
for subkey in key_list[:-1]:
_assert_with_logging(
subkey in d, "Non-existent key: {}".format(full_key)
)
d = d[subkey]
subkey = key_list[-1]
value = _decode_cfg_value(v)
if subkey not in d.keys():
logger.warning("Key is not in the template: {}".format(full_key))
d[subkey] = value
else:
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key)
d[subkey] = value
def freeze(self):
"""Make this CfgNode and all of its children immutable."""
self._immutable(True)
def defrost(self):
"""Make this CfgNode and all of its children mutable."""
self._immutable(False)
def is_frozen(self):
"""Return mutability."""
return self.__dict__[CfgNode.IMMUTABLE]
def _immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested CfgNodes.
"""
self.__dict__[CfgNode.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
for v in self.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
def clone(self):
"""Recursively copy this CfgNode."""
return copy.deepcopy(self)
def register_deprecated_key(self, key):
"""Register key (e.g. `FOO.BAR`) a deprecated option. When merging deprecated
keys a warning is generated and the key is ignored.
"""
_assert_with_logging(
key not in self.__dict__[CfgNode.DEPRECATED_KEYS],
"key {} is already registered as a deprecated key".format(key),
)
self.__dict__[CfgNode.DEPRECATED_KEYS].add(key)
def register_renamed_key(self, old_name, new_name, message=None):
"""Register a key as having been renamed from `old_name` to `new_name`.
When merging a renamed key, an exception is thrown alerting to user to
the fact that the key has been renamed.
"""
_assert_with_logging(
old_name not in self.__dict__[CfgNode.RENAMED_KEYS],
"key {} is already registered as a renamed cfg key".format(old_name),
)
value = new_name
if message:
value = (new_name, message)
self.__dict__[CfgNode.RENAMED_KEYS][old_name] = value
def key_is_deprecated(self, full_key):
"""Test if a key is deprecated."""
if full_key in self.__dict__[CfgNode.DEPRECATED_KEYS]:
logger.warning("Deprecated config key (ignoring): {}".format(full_key))
return True
return False
def key_is_renamed(self, full_key):
"""Test if a key is renamed."""
return full_key in self.__dict__[CfgNode.RENAMED_KEYS]
def raise_key_rename_error(self, full_key):
new_key = self.__dict__[CfgNode.RENAMED_KEYS][full_key]
if isinstance(new_key, tuple):
msg = " Note: " + new_key[1]
new_key = new_key[0]
else:
msg = ""
raise KeyError(
"Key {} was renamed to {}; please update your config.{}".format(
full_key, new_key, msg
)
)
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to CfgNode objects
if isinstance(v, dict):
return CfgNode(v)
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
"""Checks that `replacement`, which is intended to replace `original` is of
the right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
original_type = type(original)
replacement_type = type(replacement)
# The types must match (with some exceptions)
if replacement_type == original_type:
return replacement
# Cast replacement from from_type to to_type if the replacement and original
# types match from_type and to_type
def conditional_cast(from_type, to_type):
if replacement_type == from_type and original_type == to_type:
return True, to_type(replacement)
else:
return False, None
# Conditionally casts
# list <-> tuple
casts = [(tuple, list), (list, tuple), (int, float), (float, int)]
# For py2: allow converting from str (bytes) to a unicode string
try:
casts.append((str, unicode)) # noqa: F821
except Exception:
pass
for (from_type, to_type) in casts:
converted, converted_value = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError(
"Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
"key: {}".format(
original_type, replacement_type, original, replacement, full_key
)
)
def _assert_with_logging(cond, msg):
if not cond:
logger.debug(msg)
assert cond, msg
The provided code snippet includes necessary dependencies for implementing the `_merge_a_into_b` function. Write a Python function `def _merge_a_into_b(a, b, root, key_list)` to solve the following problem:
Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a.
Here is the function:
def _merge_a_into_b(a, b, root, key_list):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
_assert_with_logging(
isinstance(a, CfgNode),
"`a` (cur type {}) must be an instance of {}".format(type(a), CfgNode),
)
_assert_with_logging(
isinstance(b, CfgNode),
"`b` (cur type {}) must be an instance of {}".format(type(b), CfgNode),
)
if '_no_merge_' in a.keys() and a['_no_merge_']:
b.clear()
# TODO:这里好像b好像有时候是a的拷贝,有时候不是
if '_no_merge_' in a.keys():
a.pop('_no_merge_')
for k, v_ in a.items():
full_key = ".".join(key_list + [k])
# a must specify keys that are in b
if k not in b:
if root.key_is_deprecated(full_key):
continue
elif root.key_is_renamed(full_key):
root.raise_key_rename_error(full_key)
else:
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
b.update({k: v})
else:
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, CfgNode):
try:
_merge_a_into_b(v, b[k], root, key_list + [k])
except BaseException:
raise
else:
b[k] = v | Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. |
13,138 | from .optimize_simple import _optimizeSMPL, deepcopy_tensor, get_prepare_smplx, dict_of_tensor_to_numpy
from .lossfactory import LossRepro, LossInit, LossSmoothBody, LossSmoothPoses, LossSmoothBodyMulti, LossSmoothPosesMulti
from ..dataset.mirror import flipSMPLPoses, flipPoint2D, flipSMPLParams
import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `calc_mirror_transform` function. Write a Python function `def calc_mirror_transform(m_)` to solve the following problem:
From mirror vector to mirror matrix Args: m (bn, 4): (a, b, c, d) Returns: M: (bn, 3, 4)
Here is the function:
def calc_mirror_transform(m_):
""" From mirror vector to mirror matrix
Args:
m (bn, 4): (a, b, c, d)
Returns:
M: (bn, 3, 4)
"""
norm = torch.norm(m_[:, :3], dim=1, keepdim=True)
m = m_[:, :3] / norm
d = m_[:, 3]
coeff_mat = torch.zeros((m.shape[0], 3, 4), device=m.device)
coeff_mat[:, 0, 0] = 1 - 2*m[:, 0]**2
coeff_mat[:, 0, 1] = -2*m[:, 0]*m[:, 1]
coeff_mat[:, 0, 2] = -2*m[:, 0]*m[:, 2]
coeff_mat[:, 0, 3] = -2*m[:, 0]*d
coeff_mat[:, 1, 0] = -2*m[:, 1]*m[:, 0]
coeff_mat[:, 1, 1] = 1-2*m[:, 1]**2
coeff_mat[:, 1, 2] = -2*m[:, 1]*m[:, 2]
coeff_mat[:, 1, 3] = -2*m[:, 1]*d
coeff_mat[:, 2, 0] = -2*m[:, 2]*m[:, 0]
coeff_mat[:, 2, 1] = -2*m[:, 2]*m[:, 1]
coeff_mat[:, 2, 2] = 1-2*m[:, 2]**2
coeff_mat[:, 2, 3] = -2*m[:, 2]*d
return coeff_mat | From mirror vector to mirror matrix Args: m (bn, 4): (a, b, c, d) Returns: M: (bn, 3, 4) |
13,139 | from .optimize_simple import _optimizeSMPL, deepcopy_tensor, get_prepare_smplx, dict_of_tensor_to_numpy
from .lossfactory import LossRepro, LossInit, LossSmoothBody, LossSmoothPoses, LossSmoothBodyMulti, LossSmoothPosesMulti
from ..dataset.mirror import flipSMPLPoses, flipPoint2D, flipSMPLParams
import torch
import numpy as np
def flipSMPLParamsV(params, mirror):
params_mirror = flipSMPLParams(params, mirror)
params_new = {}
for key in params.keys():
if key == 'shapes':
params_new['shapes'] = params['shapes']
else:
params_new[key] = np.vstack([params[key], params_mirror[key]])
return params_new
class LossKeypointsMirror2DDirect(LossKeypointsMirror2D):
def __init__(self, keypoints2d, bboxes, Pall, normal=None, cfg=None, mirror=None) -> None:
super().__init__(keypoints2d, bboxes, Pall, cfg)
nFrames = 1
if mirror is None:
self.mirror = torch.zeros([nFrames, 4], device=cfg.device)
if normal is not None:
self.mirror[:, :3] = torch.Tensor(normal).to(cfg.device)
else:
# roughly initialize the mirror => n = (0, -1, 0)
self.mirror[:, 2] = 1.
self.mirror[:, 3] = -10.
else:
self.mirror = torch.Tensor(mirror).to(cfg.device)
self.norm = 'l2'
def __call__(self, kpts_est, **kwargs):
"reprojection error for direct mirror ="
# kpts_est: (nFrames, 25, 3)
M = calc_mirror_transform(self.mirror)
if M.shape[0] != kpts_est.shape[0]:
M = M.expand(kpts_est.shape[0], -1, -1)
homo = torch.ones((kpts_est.shape[0], kpts_est.shape[1], 1), device=kpts_est.device)
kpts_homo = torch.cat([kpts_est, homo], dim=2)
kpts_mirror = flipPoint2D(torch.bmm(M, kpts_homo.transpose(1, 2)).transpose(1, 2))
# 视频的时候注意拼接的顺序
kpts_new = torch.cat([kpts_est, kpts_mirror])
# 使用镜像进行翻转
return super().__call__(kpts_new)
def __str__(self) -> str:
return 'Loss function for Reprojection error of Mirror '
def deepcopy_tensor(body_params):
for key in body_params.keys():
body_params[key] = body_params[key].clone()
return body_params
def dict_of_tensor_to_numpy(body_params):
body_params = {key:val.detach().cpu().numpy() for key, val in body_params.items()}
return body_params
def get_prepare_smplx(body_params, cfg, nFrames):
zero_pose = torch.zeros((nFrames, 3), device=cfg.device)
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
zero_pose_hand = torch.zeros((nFrames, body_params['poses'].shape[1] - 66), device=cfg.device)
elif cfg.OPT_HAND and not cfg.OPT_EXPR and cfg.model == 'smplx':
zero_pose_face = torch.zeros((nFrames, body_params['poses'].shape[1] - 78), device=cfg.device)
def pack(new_params):
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:66], zero_pose_hand], dim=1)
else:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:]], dim=1)
return new_params
return pack
def _optimizeSMPL(body_model, body_params, prepare_funcs, postprocess_funcs,
loss_funcs, extra_params=None,
weight_loss={}, cfg=None):
""" A common interface for different optimization.
Args:
body_model (SMPL model)
body_params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3)
prepare_funcs (List): functions for prepare
loss_funcs (Dict): functions for loss
weight_loss (Dict): weight
cfg (Config): Config Node controling running mode
"""
loss_funcs = {key: val for key, val in loss_funcs.items() if key in weight_loss.keys() and weight_loss[key] > 0.}
if cfg.verbose:
print('Loss Functions: ')
for key, func in loss_funcs.items():
print(' -> {:15s}: {}'.format(key, func.__doc__))
opt_params = get_optParams(body_params, cfg, extra_params)
grad_require(opt_params, True)
optimizer = LBFGS(opt_params,
line_search_fn='strong_wolfe')
PRINT_STEP = 100
records = []
def closure(debug=False):
# 0. Prepare body parameters => new_params
optimizer.zero_grad()
new_params = body_params.copy()
for func in prepare_funcs:
new_params = func(new_params)
# 1. Compute keypoints => kpts_est
kpts_est = body_model(return_verts=False, return_tensor=True, **new_params)
# 2. Compute loss => loss_dict
loss_dict = {key:func(kpts_est=kpts_est, **new_params) for key, func in loss_funcs.items()}
# 3. Summary and log
cnt = len(records)
if cfg.verbose and cnt % PRINT_STEP == 0:
print('{:-6d}: '.format(cnt) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss = sum([loss_dict[key]*weight_loss[key]
for key in loss_dict.keys()])
records.append(loss.item())
if debug:
return loss_dict
loss.backward()
return loss
fitting = FittingMonitor(ftol=1e-4)
final_loss = fitting.run_fitting(optimizer, closure, opt_params)
fitting.close()
grad_require(opt_params, False)
loss_dict = closure(debug=True)
if cfg.verbose:
print('{:-6d}: '.format(len(records)) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss_dict = {key:val.item() for key, val in loss_dict.items()}
# post-process the body_parameters
for func in postprocess_funcs:
body_params = func(body_params)
return body_params
class LossInit:
def __init__(self, params, cfg) -> None:
self.norm = 'l2'
self.poses = torch.Tensor(params['poses']).to(cfg.device)
self.shapes = torch.Tensor(params['shapes']).to(cfg.device)
def init_poses(self, poses, **kwargs):
"distance to poses_0"
if self.norm == 'l2':
return torch.sum((poses - self.poses)**2)/poses.shape[0]
def init_shapes(self, shapes, **kwargs):
"distance to shapes_0"
if self.norm == 'l2':
return torch.sum((shapes - self.shapes)**2)/shapes.shape[0]
The provided code snippet includes necessary dependencies for implementing the `optimizeMirrorDirect` function. Write a Python function `def optimizeMirrorDirect(body_model, params, bboxes, keypoints2d, Pall, normal, weight, cfg)` to solve the following problem:
simple function for optimizing mirror # 先写图片的 Args: body_model (SMPL model) params (DictParam): poses(2, 72), shapes(1, 10), Rh(2, 3), Th(2, 3) bboxes (nFrames, nViews, nJoints, 4): 2D bbox of each view,输入的时候是按照时序叠起来的 keypoints2d (nFrames, nViews, nJoints, 4): 2D keypoints of each view,输入的时候是按照时序叠起来的 weight (Dict): string:float cfg (Config): Config Node controling running mode
Here is the function:
def optimizeMirrorDirect(body_model, params, bboxes, keypoints2d, Pall, normal, weight, cfg):
"""
simple function for optimizing mirror
# 先写图片的
Args:
body_model (SMPL model)
params (DictParam): poses(2, 72), shapes(1, 10), Rh(2, 3), Th(2, 3)
bboxes (nFrames, nViews, nJoints, 4): 2D bbox of each view,输入的时候是按照时序叠起来的
keypoints2d (nFrames, nViews, nJoints, 4): 2D keypoints of each view,输入的时候是按照时序叠起来的
weight (Dict): string:float
cfg (Config): Config Node controling running mode
"""
nViews, nFrames = keypoints2d.shape[:2]
assert nViews == 2, 'Please make sure that there exists only 2 views'
# keep the parameters of the real person
for key in ['poses', 'Rh', 'Th']:
# select the parameters of first person
params[key] = params[key][:nFrames]
prepare_funcs = [
deepcopy_tensor,
get_prepare_smplx(params, cfg, nFrames),
]
loss_repro = LossKeypointsMirror2DDirect(keypoints2d, bboxes, Pall, normal, cfg,
mirror=params.pop('mirror', None))
loss_funcs = {
'k2d': loss_repro,
'init_poses': LossInit(params, cfg).init_poses,
'init_shapes': LossInit(params, cfg).init_shapes,
}
postprocess_funcs = [
dict_of_tensor_to_numpy,
]
params = _optimizeSMPL(body_model, params, prepare_funcs, postprocess_funcs, loss_funcs,
extra_params=[loss_repro.mirror],
weight_loss=weight, cfg=cfg)
mirror = loss_repro.mirror.detach().cpu().numpy()
params = flipSMPLParamsV(params, mirror)
params['mirror'] = mirror
return params | simple function for optimizing mirror # 先写图片的 Args: body_model (SMPL model) params (DictParam): poses(2, 72), shapes(1, 10), Rh(2, 3), Th(2, 3) bboxes (nFrames, nViews, nJoints, 4): 2D bbox of each view,输入的时候是按照时序叠起来的 keypoints2d (nFrames, nViews, nJoints, 4): 2D keypoints of each view,输入的时候是按照时序叠起来的 weight (Dict): string:float cfg (Config): Config Node controling running mode |
13,140 | from .optimize_simple import _optimizeSMPL, deepcopy_tensor, get_prepare_smplx, dict_of_tensor_to_numpy
from .lossfactory import LossRepro, LossInit, LossSmoothBody, LossSmoothPoses, LossSmoothBodyMulti, LossSmoothPosesMulti
from ..dataset.mirror import flipSMPLPoses, flipPoint2D, flipSMPLParams
import torch
import numpy as np
def flipSMPLPosesV(params, reverse=False):
# 前面部分是外面的人,后面部分是镜子里的人
nFrames = params['poses'].shape[0] // 2
if reverse:
params['poses'][:nFrames] = flipSMPLPoses(params['poses'][nFrames:])
else:
params['poses'][nFrames:] = flipSMPLPoses(params['poses'][:nFrames])
return params
class LossKeypointsMirror2D(LossRepro):
def __init__(self, keypoints2d, bboxes, Pall, cfg) -> None:
super().__init__(bboxes, keypoints2d, cfg)
self.Pall = torch.Tensor(Pall).to(cfg.device)
self.nJoints = keypoints2d.shape[-2]
self.nViews, self.nFrames = self.keypoints2d.shape[0], self.keypoints2d.shape[1]
self.kpt_homo = torch.ones((keypoints2d.shape[0]*keypoints2d.shape[1], keypoints2d.shape[2], 1), device=cfg.device)
self.norm = 'l2'
def residual(self, kpts_est):
# kpts_est: (2xnFrames, nJoints, 3)
kpts_homo = torch.cat([kpts_est[..., :self.nJoints, :], self.kpt_homo], dim=2)
point_cam = torch.einsum('ab,fnb->fna', self.Pall, kpts_homo)
img_points = point_cam[..., :2]/point_cam[..., 2:]
img_points = img_points.view(self.nViews, self.nFrames, self.nJoints, 2)
residual = (img_points - self.keypoints2d) * self.conf
return residual
def __call__(self, kpts_est, **kwargs):
"reprojection error for mirror"
# kpts_est: (2xnFrames, 25, 3)
kpts_homo = torch.cat([kpts_est[..., :self.nJoints, :], self.kpt_homo], dim=2)
point_cam = torch.einsum('ab,fnb->fna', self.Pall, kpts_homo)
img_points = point_cam[..., :2]/point_cam[..., 2:]
img_points = img_points.view(self.nViews, self.nFrames, self.nJoints, 2)
return super().__call__(img_points)/self.nViews/self.nFrames
def __str__(self) -> str:
return 'Loss function for Reprojection error of Mirror'
class LossMirrorSymmetry:
def __init__(self, N_JOINTS=25, normal=None, cfg=None) -> None:
idx0, idx1 = np.meshgrid(np.arange(N_JOINTS), np.arange(N_JOINTS))
idx0, idx1 = idx0.reshape(-1), idx1.reshape(-1)
idx_diff = np.where(idx0!=idx1)[0]
self.idx00, self.idx11 = idx0[idx_diff], idx1[idx_diff]
self.N_JOINTS = N_JOINTS
self.idx0 = idx0
self.idx1 = idx1
if normal is not None:
self.normal = torch.Tensor(normal).to(cfg.device)
self.normal = self.normal.expand(-1, N_JOINTS, -1)
else:
self.normal = None
self.device = cfg.device
def parallel_mirror(self, kpts_est, **kwargs):
"encourage parallel to mirror"
# kpts_est: (nFramesxnViews, nJoints, 3)
if self.normal is None:
return torch.tensor(0.).to(self.device)
nFrames = kpts_est.shape[0] // 2
kpts_out = kpts_est[:nFrames, ...]
kpts_in = kpts_est[nFrames:, ...]
kpts_in = flipPoint2D(kpts_in)
direct = kpts_in - kpts_out
direct_norm = direct/torch.norm(direct, dim=-1, keepdim=True)
loss = torch.sum(torch.norm(torch.cross(self.normal, direct_norm), dim=2))
return loss / nFrames / kpts_est.shape[1]
def parallel_self(self, kpts_est, **kwargs):
"encourage parallel to self"
# kpts_est: (nFramesxnViews, nJoints, 3)
nFrames = kpts_est.shape[0] // 2
kpts_out = kpts_est[:nFrames, ...]
kpts_in = kpts_est[nFrames:, ...]
kpts_in = flipPoint2D(kpts_in)
direct = kpts_in - kpts_out
direct_norm = direct/torch.norm(direct, dim=-1, keepdim=True)
loss = torch.sum(torch.norm(
torch.cross(direct_norm[:, self.idx0, :], direct_norm[:, self.idx1, :]), dim=2))/self.idx0.shape[0]
return loss / nFrames
def vertical_self(self, kpts_est, **kwargs):
"encourage vertical to self"
# kpts_est: (nFramesxnViews, nJoints, 3)
nFrames = kpts_est.shape[0] // 2
kpts_out = kpts_est[:nFrames, ...]
kpts_in = kpts_est[nFrames:, ...]
kpts_in = flipPoint2D(kpts_in)
direct = kpts_in - kpts_out
direct_norm = direct/torch.norm(direct, dim=-1, keepdim=True)
mid_point = (kpts_in + kpts_out)/2
inner = torch.abs(torch.sum((mid_point[:, self.idx00, :] - mid_point[:, self.idx11, :])*direct_norm[:, self.idx11, :], dim=2))
loss = torch.sum(inner)/self.idx00.shape[0]
return loss / nFrames
def __str__(self) -> str:
return 'Loss function for Mirror Symmetry'
def viewSelection(params, body_model, loss_repro, nFrames):
# view selection
params_inp = {key: val.copy() for key, val in params.items()}
params_inp = flipSMPLPosesV(params_inp)
kpts_est = body_model(return_verts=False, return_tensor=True, **params_inp)
residual = loss_repro.residual(kpts_est)
res_i = torch.norm(residual, dim=-1).mean(dim=-1).sum(dim=0)
params_rev = {key: val.copy() for key, val in params.items()}
params_rev = flipSMPLPosesV(params_rev, reverse=True)
kpts_est = body_model(return_verts=False, return_tensor=True, **params_rev)
residual = loss_repro.residual(kpts_est)
res_o = torch.norm(residual, dim=-1).mean(dim=-1).sum(dim=0)
for nf in range(res_i.shape[0]):
if res_i[nf] < res_o[nf]: # 使用外面的
params['poses'][[nFrames+nf]] = flipSMPLPoses(params['poses'][[nf]])
else:
params['poses'][[nf]] = flipSMPLPoses(params['poses'][[nFrames+nf]])
return params
def deepcopy_tensor(body_params):
for key in body_params.keys():
body_params[key] = body_params[key].clone()
return body_params
def dict_of_tensor_to_numpy(body_params):
body_params = {key:val.detach().cpu().numpy() for key, val in body_params.items()}
return body_params
def get_prepare_smplx(body_params, cfg, nFrames):
zero_pose = torch.zeros((nFrames, 3), device=cfg.device)
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
zero_pose_hand = torch.zeros((nFrames, body_params['poses'].shape[1] - 66), device=cfg.device)
elif cfg.OPT_HAND and not cfg.OPT_EXPR and cfg.model == 'smplx':
zero_pose_face = torch.zeros((nFrames, body_params['poses'].shape[1] - 78), device=cfg.device)
def pack(new_params):
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:66], zero_pose_hand], dim=1)
else:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:]], dim=1)
return new_params
return pack
def _optimizeSMPL(body_model, body_params, prepare_funcs, postprocess_funcs,
loss_funcs, extra_params=None,
weight_loss={}, cfg=None):
""" A common interface for different optimization.
Args:
body_model (SMPL model)
body_params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3)
prepare_funcs (List): functions for prepare
loss_funcs (Dict): functions for loss
weight_loss (Dict): weight
cfg (Config): Config Node controling running mode
"""
loss_funcs = {key: val for key, val in loss_funcs.items() if key in weight_loss.keys() and weight_loss[key] > 0.}
if cfg.verbose:
print('Loss Functions: ')
for key, func in loss_funcs.items():
print(' -> {:15s}: {}'.format(key, func.__doc__))
opt_params = get_optParams(body_params, cfg, extra_params)
grad_require(opt_params, True)
optimizer = LBFGS(opt_params,
line_search_fn='strong_wolfe')
PRINT_STEP = 100
records = []
def closure(debug=False):
# 0. Prepare body parameters => new_params
optimizer.zero_grad()
new_params = body_params.copy()
for func in prepare_funcs:
new_params = func(new_params)
# 1. Compute keypoints => kpts_est
kpts_est = body_model(return_verts=False, return_tensor=True, **new_params)
# 2. Compute loss => loss_dict
loss_dict = {key:func(kpts_est=kpts_est, **new_params) for key, func in loss_funcs.items()}
# 3. Summary and log
cnt = len(records)
if cfg.verbose and cnt % PRINT_STEP == 0:
print('{:-6d}: '.format(cnt) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss = sum([loss_dict[key]*weight_loss[key]
for key in loss_dict.keys()])
records.append(loss.item())
if debug:
return loss_dict
loss.backward()
return loss
fitting = FittingMonitor(ftol=1e-4)
final_loss = fitting.run_fitting(optimizer, closure, opt_params)
fitting.close()
grad_require(opt_params, False)
loss_dict = closure(debug=True)
if cfg.verbose:
print('{:-6d}: '.format(len(records)) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss_dict = {key:val.item() for key, val in loss_dict.items()}
# post-process the body_parameters
for func in postprocess_funcs:
body_params = func(body_params)
return body_params
class LossSmoothBodyMulti(LossSmoothBody):
def __init__(self, dimGroups, cfg) -> None:
super().__init__(cfg)
self.cfg = cfg
self.dimGroups = dimGroups
def __call__(self, kpts_est, **kwargs):
"Smooth body"
assert kpts_est.shape[0] > 1, 'If you use smooth loss, it must be more than 1 frames'
loss = 0
for nv in range(len(self.dimGroups) - 1):
kpts = kpts_est[self.dimGroups[nv]:self.dimGroups[nv+1]]
loss += super().__call__(kpts_est=kpts)
return loss/(len(self.dimGroups) - 1)
def __str__(self) -> str:
return 'Loss function for Multi Smooth of Body'
class LossSmoothPosesMulti:
def __init__(self, dimGroups, cfg) -> None:
self.dimGroups = dimGroups
self.norm = 'l2'
def __call__(self, poses, **kwargs):
"Smooth poses"
loss = 0
for nv in range(len(self.dimGroups) - 1):
poses_ = poses[self.dimGroups[nv]:self.dimGroups[nv+1]]
poses_interp = poses_.clone().detach()
poses_interp[1:-1] = (poses_interp[1:-1] + poses_interp[:-2] + poses_interp[2:])/3
loss += funcl2(poses_[1:-1] - poses_interp[1:-1])/(poses_.shape[0] - 2)
return loss/(len(self.dimGroups) - 1)
def __str__(self) -> str:
return 'Loss function for Multi Smooth of Poses'
class LossInit:
def __init__(self, params, cfg) -> None:
self.norm = 'l2'
self.poses = torch.Tensor(params['poses']).to(cfg.device)
self.shapes = torch.Tensor(params['shapes']).to(cfg.device)
def init_poses(self, poses, **kwargs):
"distance to poses_0"
if self.norm == 'l2':
return torch.sum((poses - self.poses)**2)/poses.shape[0]
def init_shapes(self, shapes, **kwargs):
"distance to shapes_0"
if self.norm == 'l2':
return torch.sum((shapes - self.shapes)**2)/shapes.shape[0]
The provided code snippet includes necessary dependencies for implementing the `optimizeMirrorSoft` function. Write a Python function `def optimizeMirrorSoft(body_model, params, bboxes, keypoints2d, Pall, normal, weight, cfg)` to solve the following problem:
simple function for optimizing mirror Args: body_model (SMPL model) params (DictParam): poses(2, 72), shapes(1, 10), Rh(2, 3), Th(2, 3) bboxes (nViews, nFrames, 5): 2D bbox of each view,输入的时候是按照时序叠起来的 keypoints2d (nViews, nFrames, nJoints, 3): 2D keypoints of each view,输入的时候是按照时序叠起来的 weight (Dict): string:float cfg (Config): Config Node controling running mode
Here is the function:
def optimizeMirrorSoft(body_model, params, bboxes, keypoints2d, Pall, normal, weight, cfg):
"""
simple function for optimizing mirror
Args:
body_model (SMPL model)
params (DictParam): poses(2, 72), shapes(1, 10), Rh(2, 3), Th(2, 3)
bboxes (nViews, nFrames, 5): 2D bbox of each view,输入的时候是按照时序叠起来的
keypoints2d (nViews, nFrames, nJoints, 3): 2D keypoints of each view,输入的时候是按照时序叠起来的
weight (Dict): string:float
cfg (Config): Config Node controling running mode
"""
nViews, nFrames = keypoints2d.shape[:2]
assert nViews == 2, 'Please make sure that there exists only 2 views'
prepare_funcs = [
deepcopy_tensor,
flipSMPLPosesV, #
get_prepare_smplx(params, cfg, nFrames*nViews)
]
loss_sym = LossMirrorSymmetry(normal=normal, cfg=cfg)
loss_repro = LossKeypointsMirror2D(keypoints2d, bboxes, Pall, cfg)
params = viewSelection(params, body_model, loss_repro, nFrames)
init = LossInit(params, cfg)
loss_funcs = {
'k2d': loss_repro.__call__,
'init_poses': init.init_poses,
'init_shapes': init.init_shapes,
'par_self': loss_sym.parallel_self,
'ver_self': loss_sym.vertical_self,
'par_mirror': loss_sym.parallel_mirror,
}
if nFrames > 1:
loss_funcs['smooth_body'] = LossSmoothBodyMulti([0, nFrames, nFrames*2], cfg)
loss_funcs['smooth_poses'] = LossSmoothPosesMulti([0, nFrames, nFrames*2], cfg)
postprocess_funcs = [
dict_of_tensor_to_numpy,
flipSMPLPosesV
]
params = _optimizeSMPL(body_model, params, prepare_funcs, postprocess_funcs, loss_funcs, weight_loss=weight, cfg=cfg)
return params | simple function for optimizing mirror Args: body_model (SMPL model) params (DictParam): poses(2, 72), shapes(1, 10), Rh(2, 3), Th(2, 3) bboxes (nViews, nFrames, 5): 2D bbox of each view,输入的时候是按照时序叠起来的 keypoints2d (nViews, nFrames, nJoints, 3): 2D keypoints of each view,输入的时候是按照时序叠起来的 weight (Dict): string:float cfg (Config): Config Node controling running mode |
13,141 | import torch
from functools import reduce
from torch.optim.optimizer import Optimizer
def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
# ported from https://github.com/torch/optim/blob/master/polyinterp.lua
# Compute bounds of interpolation area
if bounds is not None:
xmin_bound, xmax_bound = bounds
else:
xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1)
# Code for most common case: cubic interpolation of 2 points
# w/ function and derivative values for both
# Solution in this case (where x2 is the farthest point):
# d1 = g1 + g2 - 3*(f1-f2)/(x1-x2);
# d2 = sqrt(d1^2 - g1*g2);
# min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2));
# t_new = min(max(min_pos,xmin_bound),xmax_bound);
d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2)
d2_square = d1**2 - g1 * g2
if d2_square >= 0:
d2 = d2_square.sqrt()
if x1 <= x2:
min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2))
else:
min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2))
return min(max(min_pos, xmin_bound), xmax_bound)
else:
return (xmin_bound + xmax_bound) / 2.
def _strong_wolfe(obj_func,
x,
t,
d,
f,
g,
gtd,
c1=1e-4,
c2=0.9,
tolerance_change=1e-9,
max_ls=25):
# ported from https://github.com/torch/optim/blob/master/lswolfe.lua
d_norm = d.abs().max()
g = g.clone()
# evaluate objective and gradient using initial step
f_new, g_new = obj_func(x, t, d)
ls_func_evals = 1
gtd_new = g_new.dot(d)
# bracket an interval containing a point satisfying the Wolfe criteria
t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd
done = False
ls_iter = 0
while ls_iter < max_ls:
# check conditions
if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev):
bracket = [t_prev, t]
bracket_f = [f_prev, f_new]
bracket_g = [g_prev, g_new.clone()]
bracket_gtd = [gtd_prev, gtd_new]
break
if abs(gtd_new) <= -c2 * gtd:
bracket = [t]
bracket_f = [f_new]
bracket_g = [g_new]
done = True
break
if gtd_new >= 0:
bracket = [t_prev, t]
bracket_f = [f_prev, f_new]
bracket_g = [g_prev, g_new.clone()]
bracket_gtd = [gtd_prev, gtd_new]
break
# interpolate
min_step = t + 0.01 * (t - t_prev)
max_step = t * 10
tmp = t
t = _cubic_interpolate(
t_prev,
f_prev,
gtd_prev,
t,
f_new,
gtd_new,
bounds=(min_step, max_step))
# next step
t_prev = tmp
f_prev = f_new
g_prev = g_new.clone()
gtd_prev = gtd_new
f_new, g_new = obj_func(x, t, d)
ls_func_evals += 1
gtd_new = g_new.dot(d)
ls_iter += 1
# reached max number of iterations?
if ls_iter == max_ls:
bracket = [0, t]
bracket_f = [f, f_new]
bracket_g = [g, g_new]
# zoom phase: we now have a point satisfying the criteria, or
# a bracket around it. We refine the bracket until we find the
# exact point satisfying the criteria
insuf_progress = False
# find high and low points in bracket
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0)
while not done and ls_iter < max_ls:
# compute new trial value
t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0],
bracket[1], bracket_f[1], bracket_gtd[1])
# test that we are making sufficient progress:
# in case `t` is so close to boundary, we mark that we are making
# insufficient progress, and if
# + we have made insufficient progress in the last step, or
# + `t` is at one of the boundary,
# we will move `t` to a position which is `0.1 * len(bracket)`
# away from the nearest boundary point.
eps = 0.1 * (max(bracket) - min(bracket))
if min(max(bracket) - t, t - min(bracket)) < eps:
# interpolation close to boundary
if insuf_progress or t >= max(bracket) or t <= min(bracket):
# evaluate at 0.1 away from boundary
if abs(t - max(bracket)) < abs(t - min(bracket)):
t = max(bracket) - eps
else:
t = min(bracket) + eps
insuf_progress = False
else:
insuf_progress = True
else:
insuf_progress = False
# Evaluate new point
f_new, g_new = obj_func(x, t, d)
ls_func_evals += 1
gtd_new = g_new.dot(d)
ls_iter += 1
if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]:
# Armijo condition not satisfied or not lower than lowest point
bracket[high_pos] = t
bracket_f[high_pos] = f_new
bracket_g[high_pos] = g_new.clone()
bracket_gtd[high_pos] = gtd_new
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0)
else:
if abs(gtd_new) <= -c2 * gtd:
# Wolfe conditions satisfied
done = True
elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0:
# old high becomes new low
bracket[high_pos] = bracket[low_pos]
bracket_f[high_pos] = bracket_f[low_pos]
bracket_g[high_pos] = bracket_g[low_pos]
bracket_gtd[high_pos] = bracket_gtd[low_pos]
# new point becomes new low
bracket[low_pos] = t
bracket_f[low_pos] = f_new
bracket_g[low_pos] = g_new.clone()
bracket_gtd[low_pos] = gtd_new
# line-search bracket is so small
if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change:
break
# return stuff
t = bracket[low_pos]
f_new = bracket_f[low_pos]
g_new = bracket_g[low_pos]
return f_new, g_new, t, ls_func_evals | null |
13,142 | import numpy as np
import torch
from .lbfgs import LBFGS
from .optimize import FittingMonitor, grad_require, FittingLog
from .lossfactory import LossSmoothBodyMean, LossRegPoses
from .lossfactory import LossKeypoints3D, LossKeypointsMV2D, LossSmoothBody, LossRegPosesZero, LossInit, LossSmoothPoses
class LBFGS(Optimizer):
"""Implements L-BFGS algorithm, heavily inspired by `minFunc
<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`.
.. warning::
This optimizer doesn't support per-parameter options and parameter
groups (there can be only one).
.. warning::
Right now all parameters have to be on a single device. This will be
improved in the future.
.. note::
This is a very memory intensive optimizer (it requires additional
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
try reducing the history size, or use a different algorithm.
Arguments:
lr (float): learning rate (default: 1)
max_iter (int): maximal number of iterations per optimization step
(default: 20)
max_eval (int): maximal number of function evaluations per optimization
step (default: max_iter * 1.25).
tolerance_grad (float): termination tolerance on first order optimality
(default: 1e-5).
tolerance_change (float): termination tolerance on function
value/parameter changes (default: 1e-9).
history_size (int): update history size (default: 100).
line_search_fn (str): either 'strong_wolfe' or None (default: None).
"""
def __init__(self,
params,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-5,
tolerance_change=1e-9,
history_size=100,
line_search_fn=None):
if max_eval is None:
max_eval = max_iter * 5 // 4
defaults = dict(
lr=lr,
max_iter=max_iter,
max_eval=max_eval,
tolerance_grad=tolerance_grad,
tolerance_change=tolerance_change,
history_size=history_size,
line_search_fn=line_search_fn)
super(LBFGS, self).__init__(params, defaults)
if len(self.param_groups) != 1:
raise ValueError("LBFGS doesn't support per-parameter options "
"(parameter groups)")
self._params = self.param_groups[0]['params']
self._numel_cache = None
def _numel(self):
if self._numel_cache is None:
self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0)
return self._numel_cache
def _gather_flat_grad(self):
views = []
for p in self._params:
if p.grad is None:
view = p.new(p.numel()).zero_()
elif p.grad.is_sparse:
view = p.grad.to_dense().view(-1)
else:
view = p.grad.view(-1)
views.append(view)
return torch.cat(views, 0)
def _add_grad(self, step_size, update):
offset = 0
for p in self._params:
numel = p.numel()
# view as to avoid deprecated pointwise semantics
p.data.add_(step_size, update[offset:offset + numel].view_as(p.data))
offset += numel
assert offset == self._numel()
def _clone_param(self):
return [p.clone() for p in self._params]
def _set_param(self, params_data):
for p, pdata in zip(self._params, params_data):
p.data.copy_(pdata)
def _directional_evaluate(self, closure, x, t, d):
self._add_grad(t, d)
loss = float(closure())
flat_grad = self._gather_flat_grad()
self._set_param(x)
return loss, flat_grad
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1
group = self.param_groups[0]
lr = group['lr']
max_iter = group['max_iter']
max_eval = group['max_eval']
tolerance_grad = group['tolerance_grad']
tolerance_change = group['tolerance_change']
line_search_fn = group['line_search_fn']
history_size = group['history_size']
# NOTE: LBFGS has only global state, but we register it as state for
# the first param, because this helps with casting in load_state_dict
state = self.state[self._params[0]]
state.setdefault('func_evals', 0)
state.setdefault('n_iter', 0)
# evaluate initial f(x) and df/dx
orig_loss = closure()
loss = float(orig_loss)
current_evals = 1
state['func_evals'] += 1
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
# optimal condition
if opt_cond:
return orig_loss
# tensors cached in state (for tracing)
d = state.get('d')
t = state.get('t')
old_dirs = state.get('old_dirs')
old_stps = state.get('old_stps')
ro = state.get('ro')
H_diag = state.get('H_diag')
prev_flat_grad = state.get('prev_flat_grad')
prev_loss = state.get('prev_loss')
n_iter = 0
# optimize for a max of max_iter iterations
while n_iter < max_iter:
# keep track of nb of iterations
n_iter += 1
state['n_iter'] += 1
############################################################
# compute gradient descent direction
############################################################
if state['n_iter'] == 1:
d = flat_grad.neg()
old_dirs = []
old_stps = []
ro = []
H_diag = 1
else:
# do lbfgs update (update memory)
y = flat_grad.sub(prev_flat_grad)
s = d.mul(t)
ys = y.dot(s) # y*s
if ys > 1e-10:
# updating memory
if len(old_dirs) == history_size:
# shift history by one (limited-memory)
old_dirs.pop(0)
old_stps.pop(0)
ro.pop(0)
# store new direction/step
old_dirs.append(y)
old_stps.append(s)
ro.append(1. / ys)
# update scale of initial Hessian approximation
H_diag = ys / y.dot(y) # (y*y)
# compute the approximate (L-BFGS) inverse Hessian
# multiplied by the gradient
num_old = len(old_dirs)
if 'al' not in state:
state['al'] = [None] * history_size
al = state['al']
# iteration in L-BFGS loop collapsed to use just one buffer
q = flat_grad.neg()
for i in range(num_old - 1, -1, -1):
al[i] = old_stps[i].dot(q) * ro[i]
q.add_(-al[i], old_dirs[i])
# multiply by initial Hessian
# r/d is the final direction
d = r = torch.mul(q, H_diag)
for i in range(num_old):
be_i = old_dirs[i].dot(r) * ro[i]
r.add_(al[i] - be_i, old_stps[i])
if prev_flat_grad is None:
prev_flat_grad = flat_grad.clone()
else:
prev_flat_grad.copy_(flat_grad)
prev_loss = loss
############################################################
# compute step length
############################################################
# reset initial guess for step size
if state['n_iter'] == 1:
t = min(1., 1. / flat_grad.abs().sum()) * lr
else:
t = lr
# directional derivative
gtd = flat_grad.dot(d) # g * d
# directional derivative is below tolerance
if gtd > -tolerance_change:
break
# optional line search: user function
ls_func_evals = 0
if line_search_fn is not None:
# perform line search, using user function
if line_search_fn != "strong_wolfe":
raise RuntimeError("only 'strong_wolfe' is supported")
else:
x_init = self._clone_param()
def obj_func(x, t, d):
return self._directional_evaluate(closure, x, t, d)
loss, flat_grad, t, ls_func_evals = _strong_wolfe(
obj_func, x_init, t, d, loss, flat_grad, gtd)
self._add_grad(t, d)
opt_cond = flat_grad.abs().max() <= tolerance_grad
else:
# no line search, simply move with fixed-step
self._add_grad(t, d)
if n_iter != max_iter:
# re-evaluate function only if not in last iteration
# the reason we do this: in a stochastic setting,
# no use to re-evaluate that function here
loss = float(closure())
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
ls_func_evals = 1
# update func eval
current_evals += ls_func_evals
state['func_evals'] += ls_func_evals
############################################################
# check conditions
############################################################
if n_iter == max_iter:
break
if current_evals >= max_eval:
break
# optimal condition
if opt_cond:
break
# lack of progress
if d.mul(t).abs().max() <= tolerance_change:
break
if abs(loss - prev_loss) < tolerance_change:
break
state['d'] = d
state['t'] = t
state['old_dirs'] = old_dirs
state['old_stps'] = old_stps
state['ro'] = ro
state['H_diag'] = H_diag
state['prev_flat_grad'] = prev_flat_grad
state['prev_loss'] = prev_loss
return orig_loss
class FittingMonitor:
def __init__(self, ftol=1e-5, gtol=1e-6, maxiters=100, visualize=False, verbose=False, **kwargs):
self.maxiters = maxiters
self.ftol = ftol
self.gtol = gtol
self.visualize = visualize
self.verbose = verbose
if self.visualize:
from utils.mesh_viewer import MeshViewer
self.mv = MeshViewer(width=1024, height=1024, bg_color=[1.0, 1.0, 1.0, 1.0],
body_color=[0.65098039, 0.74117647, 0.85882353, 1.0],
offscreen=False)
def run_fitting(self, optimizer, closure, params, smpl_render=None, **kwargs):
prev_loss = None
grad_require(params, True)
if self.verbose:
trange = tqdm(range(self.maxiters), desc='Fitting')
else:
trange = range(self.maxiters)
for iter in trange:
loss = optimizer.step(closure)
if torch.isnan(loss).sum() > 0:
print('NaN loss value, stopping!')
break
if torch.isinf(loss).sum() > 0:
print('Infinite loss value, stopping!')
break
# if all([torch.abs(var.grad.view(-1).max()).item() < self.gtol
# for var in params if var.grad is not None]):
# print('Small grad, stopping!')
# break
if iter > 0 and prev_loss is not None and self.ftol > 0:
loss_rel_change = rel_change(prev_loss, loss.item())
if loss_rel_change <= self.ftol:
break
if self.visualize:
vertices = smpl_render.GetVertices(**kwargs)
self.mv.update_mesh(vertices[::10], smpl_render.faces)
prev_loss = loss.item()
grad_require(params, False)
return prev_loss
def close(self):
if self.visualize:
self.mv.close_viewer()
def grad_require(paras, flag=False):
if isinstance(paras, list):
for par in paras:
par.requires_grad = flag
elif isinstance(paras, dict):
for key, par in paras.items():
par.requires_grad = flag
The provided code snippet includes necessary dependencies for implementing the `optimizeShape` function. Write a Python function `def optimizeShape(body_model, body_params, keypoints3d, weight_loss, kintree, cfg=None)` to solve the following problem:
simple function for optimizing model shape given 3d keypoints Args: body_model (SMPL model) params_init (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3) keypoints (nFrames, nJoints, 3): 3D keypoints weight (Dict): string:float kintree ([[src, dst]]): list of list:int cfg (Config): Config Node controling running mode
Here is the function:
def optimizeShape(body_model, body_params, keypoints3d,
weight_loss, kintree, cfg=None):
""" simple function for optimizing model shape given 3d keypoints
Args:
body_model (SMPL model)
params_init (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3)
keypoints (nFrames, nJoints, 3): 3D keypoints
weight (Dict): string:float
kintree ([[src, dst]]): list of list:int
cfg (Config): Config Node controling running mode
"""
device = body_model.device
# 计算不同的骨长
kintree = np.array(kintree, dtype=int)
# limb_length: nFrames, nLimbs, 1
limb_length = np.linalg.norm(keypoints3d[:, kintree[:, 1], :3] - keypoints3d[:, kintree[:, 0], :3], axis=2, keepdims=True)
# conf: nFrames, nLimbs, 1
limb_conf = np.minimum(keypoints3d[:, kintree[:, 1], 3:], keypoints3d[:, kintree[:, 0], 3:])
limb_length = torch.Tensor(limb_length).to(device)
limb_conf = torch.Tensor(limb_conf).to(device)
body_params = {key:torch.Tensor(val).to(device) for key, val in body_params.items()}
body_params_init = {key:val.clone() for key, val in body_params.items()}
opt_params = [body_params['shapes']]
grad_require(opt_params, True)
optimizer = LBFGS(
opt_params, line_search_fn='strong_wolfe', max_iter=10)
nFrames = keypoints3d.shape[0]
verbose = False
def closure(debug=False):
optimizer.zero_grad()
keypoints3d = body_model(return_verts=False, return_tensor=True, only_shape=True, **body_params)
src = keypoints3d[:, kintree[:, 0], :3] #.detach()
dst = keypoints3d[:, kintree[:, 1], :3]
direct_est = (dst - src).detach()
direct_norm = torch.norm(direct_est, dim=2, keepdim=True)
direct_normalized = direct_est/(direct_norm + 1e-4)
err = dst - src - direct_normalized * limb_length
loss_dict = {
's3d': torch.sum(err**2*limb_conf)/nFrames,
'reg_shapes': torch.sum(body_params['shapes']**2)}
if 'init_shape' in weight_loss.keys():
loss_dict['init_shape'] = torch.sum((body_params['shapes'] - body_params_init['shapes'])**2)
# fittingLog.step(loss_dict, weight_loss)
if verbose:
print(' '.join([key + ' %.3f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss = sum([loss_dict[key]*weight_loss[key]
for key in loss_dict.keys()])
if not debug:
loss.backward()
return loss
else:
return loss_dict
fitting = FittingMonitor(ftol=1e-4)
final_loss = fitting.run_fitting(optimizer, closure, opt_params)
fitting.close()
grad_require(opt_params, False)
loss_dict = closure(debug=True)
for key in loss_dict.keys():
loss_dict[key] = loss_dict[key].item()
optimizer = LBFGS(
opt_params, line_search_fn='strong_wolfe')
body_params = {key:val.detach().cpu().numpy() for key, val in body_params.items()}
return body_params | simple function for optimizing model shape given 3d keypoints Args: body_model (SMPL model) params_init (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3) keypoints (nFrames, nJoints, 3): 3D keypoints weight (Dict): string:float kintree ([[src, dst]]): list of list:int cfg (Config): Config Node controling running mode |
13,143 | import numpy as np
import torch
from .lbfgs import LBFGS
from .optimize import FittingMonitor, grad_require, FittingLog
from .lossfactory import LossSmoothBodyMean, LossRegPoses
from .lossfactory import LossKeypoints3D, LossKeypointsMV2D, LossSmoothBody, LossRegPosesZero, LossInit, LossSmoothPoses
def interp(left_value, right_value, weight, key='poses'):
if key == 'Rh':
return left_value * weight + right_value * (1 - weight)
elif key == 'Th':
return left_value * weight + right_value * (1 - weight)
elif key == 'poses':
return left_value * weight + right_value * (1 - weight)
def interp_by_k3d(conf, params):
for key in ['Rh', 'Th', 'poses']:
params[key] = params[key].clone()
# Totally invalid frames
not_valid_frames = torch.nonzero(conf.sum(dim=1).squeeze() < 0.01)[:, 0].detach().cpu().numpy().tolist()
# 遍历空白帧,选择起点和终点
ranges = []
if len(not_valid_frames) > 0:
start = not_valid_frames[0]
for i in range(1, len(not_valid_frames)):
if not_valid_frames[i] == not_valid_frames[i-1] + 1:
pass
else:# 改变位置了
end = not_valid_frames[i-1]
ranges.append((start, end))
start = not_valid_frames[i]
ranges.append((start, not_valid_frames[-1]))
for start, end in ranges:
# 对每个需要插值的区间: 这里直接使用最近帧进行插值了
left = start - 1
right = end + 1
for nf in range(start, end+1):
weight = (nf - left)/(right - left)
for key in ['Rh', 'Th', 'poses']:
params[key][nf] = interp(params[key][left], params[key][right], 1-weight, key=key)
return params | null |
13,144 | import numpy as np
import torch
from .lbfgs import LBFGS
from .optimize import FittingMonitor, grad_require, FittingLog
from .lossfactory import LossSmoothBodyMean, LossRegPoses
from .lossfactory import LossKeypoints3D, LossKeypointsMV2D, LossSmoothBody, LossRegPosesZero, LossInit, LossSmoothPoses
def get_interp_by_keypoints(keypoints):
if len(keypoints.shape) == 3: # (nFrames, nJoints, 3)
conf = keypoints[..., -1]
elif len(keypoints.shape) == 4: # (nViews, nFrames, nJoints)
conf = keypoints[..., -1].sum(axis=0)
else:
raise NotImplementedError
not_valid_frames = np.where(conf.sum(axis=1) < 0.01)[0].tolist()
# 遍历空白帧,选择起点和终点
ranges = []
if len(not_valid_frames) > 0:
start = not_valid_frames[0]
for i in range(1, len(not_valid_frames)):
if not_valid_frames[i] == not_valid_frames[i-1] + 1:
pass
else:# 改变位置了
end = not_valid_frames[i-1]
ranges.append((start, end))
start = not_valid_frames[i]
ranges.append((start, not_valid_frames[-1]))
def interp_func(params):
for start, end in ranges:
# 对每个需要插值的区间: 这里直接使用最近帧进行插值了
left = start - 1
right = end + 1
for nf in range(start, end+1):
weight = (nf - left)/(right - left)
for key in ['Rh', 'Th', 'poses']:
params[key][nf] = interp(params[key][left], params[key][right], 1-weight, key=key)
return params
return interp_func
def deepcopy_tensor(body_params):
for key in body_params.keys():
body_params[key] = body_params[key].clone()
return body_params
def dict_of_tensor_to_numpy(body_params):
body_params = {key:val.detach().cpu().numpy() for key, val in body_params.items()}
return body_params
def get_prepare_smplx(body_params, cfg, nFrames):
zero_pose = torch.zeros((nFrames, 3), device=cfg.device)
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
zero_pose_hand = torch.zeros((nFrames, body_params['poses'].shape[1] - 66), device=cfg.device)
elif cfg.OPT_HAND and not cfg.OPT_EXPR and cfg.model == 'smplx':
zero_pose_face = torch.zeros((nFrames, body_params['poses'].shape[1] - 78), device=cfg.device)
def pack(new_params):
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:66], zero_pose_hand], dim=1)
else:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:]], dim=1)
return new_params
return pack
def _optimizeSMPL(body_model, body_params, prepare_funcs, postprocess_funcs,
loss_funcs, extra_params=None,
weight_loss={}, cfg=None):
""" A common interface for different optimization.
Args:
body_model (SMPL model)
body_params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3)
prepare_funcs (List): functions for prepare
loss_funcs (Dict): functions for loss
weight_loss (Dict): weight
cfg (Config): Config Node controling running mode
"""
loss_funcs = {key: val for key, val in loss_funcs.items() if key in weight_loss.keys() and weight_loss[key] > 0.}
if cfg.verbose:
print('Loss Functions: ')
for key, func in loss_funcs.items():
print(' -> {:15s}: {}'.format(key, func.__doc__))
opt_params = get_optParams(body_params, cfg, extra_params)
grad_require(opt_params, True)
optimizer = LBFGS(opt_params,
line_search_fn='strong_wolfe')
PRINT_STEP = 100
records = []
def closure(debug=False):
# 0. Prepare body parameters => new_params
optimizer.zero_grad()
new_params = body_params.copy()
for func in prepare_funcs:
new_params = func(new_params)
# 1. Compute keypoints => kpts_est
kpts_est = body_model(return_verts=False, return_tensor=True, **new_params)
# 2. Compute loss => loss_dict
loss_dict = {key:func(kpts_est=kpts_est, **new_params) for key, func in loss_funcs.items()}
# 3. Summary and log
cnt = len(records)
if cfg.verbose and cnt % PRINT_STEP == 0:
print('{:-6d}: '.format(cnt) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss = sum([loss_dict[key]*weight_loss[key]
for key in loss_dict.keys()])
records.append(loss.item())
if debug:
return loss_dict
loss.backward()
return loss
fitting = FittingMonitor(ftol=1e-4)
final_loss = fitting.run_fitting(optimizer, closure, opt_params)
fitting.close()
grad_require(opt_params, False)
loss_dict = closure(debug=True)
if cfg.verbose:
print('{:-6d}: '.format(len(records)) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss_dict = {key:val.item() for key, val in loss_dict.items()}
# post-process the body_parameters
for func in postprocess_funcs:
body_params = func(body_params)
return body_params
class LossKeypoints3D:
def __init__(self, keypoints3d, cfg, norm='l2') -> None:
self.cfg = cfg
keypoints3d = torch.Tensor(keypoints3d).to(cfg.device)
self.nJoints = keypoints3d.shape[1]
self.keypoints3d = keypoints3d[..., :3]
self.conf = keypoints3d[..., 3:]
self.nFrames = keypoints3d.shape[0]
self.norm = norm
def loss(self, diff_square):
if self.norm == 'l2':
loss_3d = funcl2(diff_square)
elif self.norm == 'l1':
loss_3d = funcl1(diff_square)
elif self.norm == 'gm':
# 阈值设为0.2^2米
loss_3d = torch.sum(gmof(diff_square**2, 0.04))
else:
raise NotImplementedError
return loss_3d/self.nFrames
def body(self, kpts_est, **kwargs):
"distance of keypoints3d"
nJoints = min([kpts_est.shape[1], self.keypoints3d.shape[1], 25])
diff_square = (kpts_est[:, :nJoints, :3] - self.keypoints3d[:, :nJoints, :3])*self.conf[:, :nJoints]
return self.loss(diff_square)
def hand(self, kpts_est, **kwargs):
"distance of 3d hand keypoints"
diff_square = (kpts_est[:, 25:25+42, :3] - self.keypoints3d[:, 25:25+42, :3])*self.conf[:, 25:25+42]
return self.loss(diff_square)
def face(self, kpts_est, **kwargs):
"distance of 3d face keypoints"
diff_square = (kpts_est[:, 25+42:, :3] - self.keypoints3d[:, 25+42:, :3])*self.conf[:, 25+42:]
return self.loss(diff_square)
def __str__(self) -> str:
return 'Loss function for keypoints3D, norm = {}'.format(self.norm)
class LossRegPoses:
def __init__(self, cfg) -> None:
self.cfg = cfg
def reg_hand(self, poses, **kwargs):
"regulizer for hand pose"
assert self.cfg.model in ['smplh', 'smplx']
hand_poses = poses[:, 66:78]
loss = funcl2(hand_poses)
return loss/poses.shape[0]
def reg_head(self, poses, **kwargs):
"regulizer for head pose"
assert self.cfg.model in ['smplx']
poses = poses[:, 78:]
loss = funcl2(poses)
return loss/poses.shape[0]
def reg_expr(self, expression, **kwargs):
"regulizer for expression"
assert self.cfg.model in ['smplh', 'smplx']
return torch.sum(expression**2)
def reg_body(self, poses, **kwargs):
"regulizer for body poses"
if self.cfg.model in ['smplh', 'smplx']:
poses = poses[:, :66]
loss = funcl2(poses)
return loss/poses.shape[0]
def __str__(self) -> str:
return 'Loss function for Regulizer of Poses'
class LossRegPosesZero:
def __init__(self, keypoints, cfg) -> None:
model_type = cfg.model
if keypoints.shape[-2] <= 15:
use_feet = False
use_head = False
else:
use_feet = keypoints[..., [19, 20, 21, 22, 23, 24], -1].sum() > 0.1
use_head = keypoints[..., [15, 16, 17, 18], -1].sum() > 0.1
if model_type == 'smpl':
SMPL_JOINT_ZERO_IDX = [3, 6, 9, 10, 11, 13, 14, 20, 21, 22, 23]
elif model_type == 'smplh':
SMPL_JOINT_ZERO_IDX = [3, 6, 9, 10, 11, 13, 14]
elif model_type == 'smplx':
SMPL_JOINT_ZERO_IDX = [3, 6, 9, 10, 11, 13, 14]
else:
raise NotImplementedError
if not use_feet:
SMPL_JOINT_ZERO_IDX.extend([7, 8])
if not use_head:
SMPL_JOINT_ZERO_IDX.extend([12, 15])
SMPL_POSES_ZERO_IDX = [[j for j in range(3*i, 3*i+3)] for i in SMPL_JOINT_ZERO_IDX]
SMPL_POSES_ZERO_IDX = sum(SMPL_POSES_ZERO_IDX, [])
# SMPL_POSES_ZERO_IDX.extend([36, 37, 38, 45, 46, 47])
self.idx = SMPL_POSES_ZERO_IDX
def __call__(self, poses, **kwargs):
"regulizer for zero joints"
return torch.sum(torch.abs(poses[:, self.idx]))/poses.shape[0]
def __str__(self) -> str:
return 'Loss function for Regulizer of Poses'
class LossSmoothBodyMean:
def __init__(self, cfg) -> None:
self.cfg = cfg
def smooth(self, kpts_est, **kwargs):
"smooth body"
kpts_interp = kpts_est.clone().detach()
kpts_interp[1:-1] = (kpts_interp[:-2] + kpts_interp[2:])/2
loss = funcl2(kpts_est[1:-1] - kpts_interp[1:-1])
return loss/(kpts_est.shape[0] - 2)
def body(self, kpts_est, **kwargs):
"smooth body"
return self.smooth(kpts_est[:, :25])
def hand(self, kpts_est, **kwargs):
"smooth body"
return self.smooth(kpts_est[:, 25:25+42])
def __str__(self) -> str:
return 'Loss function for Smooth of Body'
class LossSmoothPoses:
def __init__(self, nViews, nFrames, cfg=None) -> None:
self.nViews = nViews
self.nFrames = nFrames
self.norm = 'l2'
self.cfg = cfg
def _poses(self, poses):
"smooth poses"
loss = 0
for nv in range(self.nViews):
poses_ = poses[nv*self.nFrames:(nv+1)*self.nFrames, ]
# 计算poses插值
poses_interp = poses_.clone().detach()
poses_interp[1:-1] = (poses_interp[1:-1] + poses_interp[:-2] + poses_interp[2:])/3
loss += funcl2(poses_[1:-1] - poses_interp[1:-1])
return loss/(self.nFrames-2)/self.nViews
def poses(self, poses, **kwargs):
"smooth body poses"
if self.cfg.model in ['smplh', 'smplx']:
poses = poses[:, :66]
return self._poses(poses)
def hands(self, poses, **kwargs):
"smooth hand poses"
if self.cfg.model in ['smplh', 'smplx']:
poses = poses[:, 66:66+12]
else:
raise NotImplementedError
return self._poses(poses)
def head(self, poses, **kwargs):
"smooth head poses"
if self.cfg.model == 'smplx':
poses = poses[:, 66+12:]
else:
raise NotImplementedError
return self._poses(poses)
def __str__(self) -> str:
return 'Loss function for Smooth of Body'
class LossInit:
def __init__(self, params, cfg) -> None:
self.norm = 'l2'
self.poses = torch.Tensor(params['poses']).to(cfg.device)
self.shapes = torch.Tensor(params['shapes']).to(cfg.device)
def init_poses(self, poses, **kwargs):
"distance to poses_0"
if self.norm == 'l2':
return torch.sum((poses - self.poses)**2)/poses.shape[0]
def init_shapes(self, shapes, **kwargs):
"distance to shapes_0"
if self.norm == 'l2':
return torch.sum((shapes - self.shapes)**2)/shapes.shape[0]
The provided code snippet includes necessary dependencies for implementing the `optimizePose3D` function. Write a Python function `def optimizePose3D(body_model, params, keypoints3d, weight, cfg)` to solve the following problem:
simple function for optimizing model pose given 3d keypoints Args: body_model (SMPL model) params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3) keypoints3d (nFrames, nJoints, 4): 3D keypoints weight (Dict): string:float cfg (Config): Config Node controling running mode
Here is the function:
def optimizePose3D(body_model, params, keypoints3d, weight, cfg):
"""
simple function for optimizing model pose given 3d keypoints
Args:
body_model (SMPL model)
params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3)
keypoints3d (nFrames, nJoints, 4): 3D keypoints
weight (Dict): string:float
cfg (Config): Config Node controling running mode
"""
nFrames = keypoints3d.shape[0]
prepare_funcs = [
deepcopy_tensor,
get_prepare_smplx(params, cfg, nFrames),
get_interp_by_keypoints(keypoints3d)
]
loss_funcs = {
'k3d': LossKeypoints3D(keypoints3d, cfg).body,
'smooth_body': LossSmoothBodyMean(cfg).body,
'smooth_poses': LossSmoothPoses(1, nFrames, cfg).poses,
'reg_poses': LossRegPoses(cfg).reg_body,
'init_poses': LossInit(params, cfg).init_poses,
}
if body_model.model_type != 'mano':
loss_funcs['reg_poses_zero'] = LossRegPosesZero(keypoints3d, cfg).__call__
if cfg.OPT_HAND:
loss_funcs['k3d_hand'] = LossKeypoints3D(keypoints3d, cfg, norm='l1').hand
loss_funcs['reg_hand'] = LossRegPoses(cfg).reg_hand
# loss_funcs['smooth_hand'] = LossSmoothPoses(1, nFrames, cfg).hands
loss_funcs['smooth_hand'] = LossSmoothBodyMean(cfg).hand
if cfg.OPT_EXPR:
loss_funcs['k3d_face'] = LossKeypoints3D(keypoints3d, cfg, norm='l1').face
loss_funcs['reg_head'] = LossRegPoses(cfg).reg_head
loss_funcs['reg_expr'] = LossRegPoses(cfg).reg_expr
loss_funcs['smooth_head'] = LossSmoothPoses(1, nFrames, cfg).head
postprocess_funcs = [
get_interp_by_keypoints(keypoints3d),
dict_of_tensor_to_numpy
]
params = _optimizeSMPL(body_model, params, prepare_funcs, postprocess_funcs, loss_funcs, weight_loss=weight, cfg=cfg)
return params | simple function for optimizing model pose given 3d keypoints Args: body_model (SMPL model) params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3) keypoints3d (nFrames, nJoints, 4): 3D keypoints weight (Dict): string:float cfg (Config): Config Node controling running mode |
13,145 | import numpy as np
import torch
from .lbfgs import LBFGS
from .optimize import FittingMonitor, grad_require, FittingLog
from .lossfactory import LossSmoothBodyMean, LossRegPoses
from .lossfactory import LossKeypoints3D, LossKeypointsMV2D, LossSmoothBody, LossRegPosesZero, LossInit, LossSmoothPoses
def get_interp_by_keypoints(keypoints):
if len(keypoints.shape) == 3: # (nFrames, nJoints, 3)
conf = keypoints[..., -1]
elif len(keypoints.shape) == 4: # (nViews, nFrames, nJoints)
conf = keypoints[..., -1].sum(axis=0)
else:
raise NotImplementedError
not_valid_frames = np.where(conf.sum(axis=1) < 0.01)[0].tolist()
# 遍历空白帧,选择起点和终点
ranges = []
if len(not_valid_frames) > 0:
start = not_valid_frames[0]
for i in range(1, len(not_valid_frames)):
if not_valid_frames[i] == not_valid_frames[i-1] + 1:
pass
else:# 改变位置了
end = not_valid_frames[i-1]
ranges.append((start, end))
start = not_valid_frames[i]
ranges.append((start, not_valid_frames[-1]))
def interp_func(params):
for start, end in ranges:
# 对每个需要插值的区间: 这里直接使用最近帧进行插值了
left = start - 1
right = end + 1
for nf in range(start, end+1):
weight = (nf - left)/(right - left)
for key in ['Rh', 'Th', 'poses']:
params[key][nf] = interp(params[key][left], params[key][right], 1-weight, key=key)
return params
return interp_func
def deepcopy_tensor(body_params):
for key in body_params.keys():
body_params[key] = body_params[key].clone()
return body_params
def dict_of_tensor_to_numpy(body_params):
body_params = {key:val.detach().cpu().numpy() for key, val in body_params.items()}
return body_params
def get_prepare_smplx(body_params, cfg, nFrames):
zero_pose = torch.zeros((nFrames, 3), device=cfg.device)
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
zero_pose_hand = torch.zeros((nFrames, body_params['poses'].shape[1] - 66), device=cfg.device)
elif cfg.OPT_HAND and not cfg.OPT_EXPR and cfg.model == 'smplx':
zero_pose_face = torch.zeros((nFrames, body_params['poses'].shape[1] - 78), device=cfg.device)
def pack(new_params):
if not cfg.OPT_HAND and cfg.model in ['smplh', 'smplx']:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:66], zero_pose_hand], dim=1)
else:
new_params['poses'] = torch.cat([zero_pose, new_params['poses'][:, 3:]], dim=1)
return new_params
return pack
def _optimizeSMPL(body_model, body_params, prepare_funcs, postprocess_funcs,
loss_funcs, extra_params=None,
weight_loss={}, cfg=None):
""" A common interface for different optimization.
Args:
body_model (SMPL model)
body_params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3)
prepare_funcs (List): functions for prepare
loss_funcs (Dict): functions for loss
weight_loss (Dict): weight
cfg (Config): Config Node controling running mode
"""
loss_funcs = {key: val for key, val in loss_funcs.items() if key in weight_loss.keys() and weight_loss[key] > 0.}
if cfg.verbose:
print('Loss Functions: ')
for key, func in loss_funcs.items():
print(' -> {:15s}: {}'.format(key, func.__doc__))
opt_params = get_optParams(body_params, cfg, extra_params)
grad_require(opt_params, True)
optimizer = LBFGS(opt_params,
line_search_fn='strong_wolfe')
PRINT_STEP = 100
records = []
def closure(debug=False):
# 0. Prepare body parameters => new_params
optimizer.zero_grad()
new_params = body_params.copy()
for func in prepare_funcs:
new_params = func(new_params)
# 1. Compute keypoints => kpts_est
kpts_est = body_model(return_verts=False, return_tensor=True, **new_params)
# 2. Compute loss => loss_dict
loss_dict = {key:func(kpts_est=kpts_est, **new_params) for key, func in loss_funcs.items()}
# 3. Summary and log
cnt = len(records)
if cfg.verbose and cnt % PRINT_STEP == 0:
print('{:-6d}: '.format(cnt) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss = sum([loss_dict[key]*weight_loss[key]
for key in loss_dict.keys()])
records.append(loss.item())
if debug:
return loss_dict
loss.backward()
return loss
fitting = FittingMonitor(ftol=1e-4)
final_loss = fitting.run_fitting(optimizer, closure, opt_params)
fitting.close()
grad_require(opt_params, False)
loss_dict = closure(debug=True)
if cfg.verbose:
print('{:-6d}: '.format(len(records)) + ' '.join([key + ' %f'%(loss_dict[key].item()*weight_loss[key])
for key in loss_dict.keys() if weight_loss[key]>0]))
loss_dict = {key:val.item() for key, val in loss_dict.items()}
# post-process the body_parameters
for func in postprocess_funcs:
body_params = func(body_params)
return body_params
class LossRegPoses:
def __init__(self, cfg) -> None:
self.cfg = cfg
def reg_hand(self, poses, **kwargs):
"regulizer for hand pose"
assert self.cfg.model in ['smplh', 'smplx']
hand_poses = poses[:, 66:78]
loss = funcl2(hand_poses)
return loss/poses.shape[0]
def reg_head(self, poses, **kwargs):
"regulizer for head pose"
assert self.cfg.model in ['smplx']
poses = poses[:, 78:]
loss = funcl2(poses)
return loss/poses.shape[0]
def reg_expr(self, expression, **kwargs):
"regulizer for expression"
assert self.cfg.model in ['smplh', 'smplx']
return torch.sum(expression**2)
def reg_body(self, poses, **kwargs):
"regulizer for body poses"
if self.cfg.model in ['smplh', 'smplx']:
poses = poses[:, :66]
loss = funcl2(poses)
return loss/poses.shape[0]
def __str__(self) -> str:
return 'Loss function for Regulizer of Poses'
class LossRegPosesZero:
def __init__(self, keypoints, cfg) -> None:
model_type = cfg.model
if keypoints.shape[-2] <= 15:
use_feet = False
use_head = False
else:
use_feet = keypoints[..., [19, 20, 21, 22, 23, 24], -1].sum() > 0.1
use_head = keypoints[..., [15, 16, 17, 18], -1].sum() > 0.1
if model_type == 'smpl':
SMPL_JOINT_ZERO_IDX = [3, 6, 9, 10, 11, 13, 14, 20, 21, 22, 23]
elif model_type == 'smplh':
SMPL_JOINT_ZERO_IDX = [3, 6, 9, 10, 11, 13, 14]
elif model_type == 'smplx':
SMPL_JOINT_ZERO_IDX = [3, 6, 9, 10, 11, 13, 14]
else:
raise NotImplementedError
if not use_feet:
SMPL_JOINT_ZERO_IDX.extend([7, 8])
if not use_head:
SMPL_JOINT_ZERO_IDX.extend([12, 15])
SMPL_POSES_ZERO_IDX = [[j for j in range(3*i, 3*i+3)] for i in SMPL_JOINT_ZERO_IDX]
SMPL_POSES_ZERO_IDX = sum(SMPL_POSES_ZERO_IDX, [])
# SMPL_POSES_ZERO_IDX.extend([36, 37, 38, 45, 46, 47])
self.idx = SMPL_POSES_ZERO_IDX
def __call__(self, poses, **kwargs):
"regulizer for zero joints"
return torch.sum(torch.abs(poses[:, self.idx]))/poses.shape[0]
def __str__(self) -> str:
return 'Loss function for Regulizer of Poses'
class LossSmoothBodyMean:
def __init__(self, cfg) -> None:
self.cfg = cfg
def smooth(self, kpts_est, **kwargs):
"smooth body"
kpts_interp = kpts_est.clone().detach()
kpts_interp[1:-1] = (kpts_interp[:-2] + kpts_interp[2:])/2
loss = funcl2(kpts_est[1:-1] - kpts_interp[1:-1])
return loss/(kpts_est.shape[0] - 2)
def body(self, kpts_est, **kwargs):
"smooth body"
return self.smooth(kpts_est[:, :25])
def hand(self, kpts_est, **kwargs):
"smooth body"
return self.smooth(kpts_est[:, 25:25+42])
def __str__(self) -> str:
return 'Loss function for Smooth of Body'
class LossSmoothPoses:
def __init__(self, nViews, nFrames, cfg=None) -> None:
self.nViews = nViews
self.nFrames = nFrames
self.norm = 'l2'
self.cfg = cfg
def _poses(self, poses):
"smooth poses"
loss = 0
for nv in range(self.nViews):
poses_ = poses[nv*self.nFrames:(nv+1)*self.nFrames, ]
# 计算poses插值
poses_interp = poses_.clone().detach()
poses_interp[1:-1] = (poses_interp[1:-1] + poses_interp[:-2] + poses_interp[2:])/3
loss += funcl2(poses_[1:-1] - poses_interp[1:-1])
return loss/(self.nFrames-2)/self.nViews
def poses(self, poses, **kwargs):
"smooth body poses"
if self.cfg.model in ['smplh', 'smplx']:
poses = poses[:, :66]
return self._poses(poses)
def hands(self, poses, **kwargs):
"smooth hand poses"
if self.cfg.model in ['smplh', 'smplx']:
poses = poses[:, 66:66+12]
else:
raise NotImplementedError
return self._poses(poses)
def head(self, poses, **kwargs):
"smooth head poses"
if self.cfg.model == 'smplx':
poses = poses[:, 66+12:]
else:
raise NotImplementedError
return self._poses(poses)
def __str__(self) -> str:
return 'Loss function for Smooth of Body'
class LossInit:
def __init__(self, params, cfg) -> None:
self.norm = 'l2'
self.poses = torch.Tensor(params['poses']).to(cfg.device)
self.shapes = torch.Tensor(params['shapes']).to(cfg.device)
def init_poses(self, poses, **kwargs):
"distance to poses_0"
if self.norm == 'l2':
return torch.sum((poses - self.poses)**2)/poses.shape[0]
def init_shapes(self, shapes, **kwargs):
"distance to shapes_0"
if self.norm == 'l2':
return torch.sum((shapes - self.shapes)**2)/shapes.shape[0]
class LossKeypointsMV2D(LossRepro):
def __init__(self, keypoints2d, bboxes, Pall, cfg) -> None:
"""
Args:
keypoints2d (ndarray): (nViews, nFrames, nJoints, 3)
bboxes (ndarray): (nViews, nFrames, 5)
"""
super().__init__(bboxes, keypoints2d, cfg)
assert Pall.shape[0] == keypoints2d.shape[0] and Pall.shape[0] == bboxes.shape[0], \
'check you P shape: {} and keypoints2d shape: {}'.format(Pall.shape, keypoints2d.shape)
device = cfg.device
self.Pall = torch.Tensor(Pall).to(device)
self.nViews, self.nFrames, self.nJoints = keypoints2d.shape[:3]
self.kpt_homo = torch.ones((self.nFrames, self.nJoints, 1), device=device)
def __call__(self, kpts_est, **kwargs):
"reprojection loss for multiple views"
# kpts_est: (nFrames, nJoints, 3+1), P: (nViews, 3, 4)
# => projection: (nViews, nFrames, nJoints, 3)
kpts_homo = torch.cat([kpts_est[..., :self.nJoints, :], self.kpt_homo], dim=2)
point_cam = torch.einsum('vab,fnb->vfna', self.Pall, kpts_homo)
img_points = point_cam[..., :2]/point_cam[..., 2:]
return super().__call__(img_points)/self.nViews/self.nFrames
def __str__(self) -> str:
return 'Loss function for Reprojection error'
The provided code snippet includes necessary dependencies for implementing the `optimizePose2D` function. Write a Python function `def optimizePose2D(body_model, params, bboxes, keypoints2d, Pall, weight, cfg)` to solve the following problem:
simple function for optimizing model pose given 3d keypoints Args: body_model (SMPL model) params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3) keypoints2d (nFrames, nViews, nJoints, 4): 2D keypoints of each view bboxes: (nFrames, nViews, 5) weight (Dict): string:float cfg (Config): Config Node controling running mode
Here is the function:
def optimizePose2D(body_model, params, bboxes, keypoints2d, Pall, weight, cfg):
"""
simple function for optimizing model pose given 3d keypoints
Args:
body_model (SMPL model)
params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3)
keypoints2d (nFrames, nViews, nJoints, 4): 2D keypoints of each view
bboxes: (nFrames, nViews, 5)
weight (Dict): string:float
cfg (Config): Config Node controling running mode
"""
# transpose to (nViews, nFrames, 5)
bboxes = bboxes.transpose(1, 0, 2)
# transpose to => keypoints2d: (nViews, nFrames, nJoints, 3)
keypoints2d = keypoints2d.transpose(1, 0, 2, 3)
nViews, nFrames = keypoints2d.shape[:2]
prepare_funcs = [
deepcopy_tensor,
get_prepare_smplx(params, cfg, nFrames),
get_interp_by_keypoints(keypoints2d)
]
loss_funcs = {
'k2d': LossKeypointsMV2D(keypoints2d, bboxes, Pall, cfg).__call__,
'smooth_body': LossSmoothBodyMean(cfg).body,
'init_poses': LossInit(params, cfg).init_poses,
'smooth_poses': LossSmoothPoses(nViews, nFrames, cfg).poses,
'reg_poses': LossRegPoses(cfg).reg_body,
}
if body_model.model_type != 'mano':
loss_funcs['reg_poses_zero'] = LossRegPosesZero(keypoints2d, cfg).__call__
if cfg.OPT_SHAPE:
loss_funcs['init_shapes'] = LossInit(params, cfg).init_shapes
if cfg.OPT_HAND:
loss_funcs['reg_hand'] = LossRegPoses(cfg).reg_hand
# loss_funcs['smooth_hand'] = LossSmoothPoses(1, nFrames, cfg).hands
loss_funcs['smooth_hand'] = LossSmoothBodyMean(cfg).hand
if cfg.OPT_EXPR:
loss_funcs['reg_head'] = LossRegPoses(cfg).reg_head
loss_funcs['reg_expr'] = LossRegPoses(cfg).reg_expr
loss_funcs['smooth_head'] = LossSmoothPoses(1, nFrames, cfg).head
loss_funcs = {key:val for key, val in loss_funcs.items() if key in weight.keys()}
postprocess_funcs = [
get_interp_by_keypoints(keypoints2d),
dict_of_tensor_to_numpy
]
params = _optimizeSMPL(body_model, params, prepare_funcs, postprocess_funcs, loss_funcs, weight_loss=weight, cfg=cfg)
return params | simple function for optimizing model pose given 3d keypoints Args: body_model (SMPL model) params (DictParam): poses(1, 72), shapes(1, 10), Rh(1, 3), Th(1, 3) keypoints2d (nFrames, nViews, nJoints, 4): 2D keypoints of each view bboxes: (nFrames, nViews, 5) weight (Dict): string:float cfg (Config): Config Node controling running mode |
13,146 | import numpy as np
import torch
from .operation import projection, batch_rodrigues
The provided code snippet includes necessary dependencies for implementing the `gmof` function. Write a Python function `def gmof(squared_res, sigma_squared)` to solve the following problem:
Geman-McClure error function
Here is the function:
def gmof(squared_res, sigma_squared):
"""
Geman-McClure error function
"""
return (sigma_squared * squared_res) / (sigma_squared + squared_res) | Geman-McClure error function |
13,147 | import numpy as np
import torch
from .operation import projection, batch_rodrigues
def projection(points3d, camera_intri, R=None, T=None, distance=None):
""" project the 3d points to camera coordinate
Arguments:
points3d {Tensor} -- (bn, N, 3)
camera_intri {Tensor} -- (bn, 3, 3)
distance {Tensor} -- (bn, 1, 1)
R: bn, 3, 3
T: bn, 3, 1
Returns:
points2d -- (bn, N, 2)
"""
if R is not None:
Rt = torch.transpose(R, 1, 2)
if T.shape[-1] == 1:
Tt = torch.transpose(T, 1, 2)
points3d = torch.matmul(points3d, Rt) + Tt
else:
points3d = torch.matmul(points3d, Rt) + T
if distance is None:
img_points = torch.div(points3d[:, :, :2],
points3d[:, :, 2:3])
else:
img_points = torch.div(points3d[:, :, :2],
distance)
camera_mat = camera_intri[:, :2, :2]
center = torch.transpose(camera_intri[:, :2, 2:3], 1, 2)
img_points = torch.matmul(img_points, camera_mat.transpose(1, 2)) + center
# img_points = torch.einsum('bki,bji->bjk', [camera_mat, img_points]) \
# + center
return img_points
def ReprojectionLoss(keypoints3d, keypoints2d, K, Rc, Tc, inv_bbox_sizes, norm='l2'):
img_points = projection(keypoints3d, K, Rc, Tc)
residual = (img_points - keypoints2d[:, :, :2]) * keypoints2d[:, :, -1:]
# squared_res: (nFrames, nJoints, 2)
if norm == 'l2':
squared_res = (residual ** 2) * inv_bbox_sizes
elif norm == 'l1':
squared_res = torch.abs(residual) * inv_bbox_sizes
else:
import ipdb; ipdb.set_trace()
return torch.sum(squared_res) | null |
13,148 | import numpy as np
import torch
from .operation import projection, batch_rodrigues
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def SmoothLoss(body_params, keys, weight_loss, span=4, model_type='smpl'):
spans = [i for i in range(1, span)]
span_weights = {i:1/i for i in range(1, span)}
span_weights = {key: i/sum(span_weights) for key, i in span_weights.items()}
loss_dict = {}
nFrames = body_params['poses'].shape[0]
nPoses = body_params['poses'].shape[1]
if model_type == 'smplh' or model_type == 'smplx':
nPoses = 66
for key in ['poses', 'Th', 'poses_hand', 'expression']:
if key not in keys:
continue
k = 'smooth_' + key
if k in weight_loss.keys() and weight_loss[k] > 0.:
loss_dict[k] = 0.
for span in spans:
if key == 'poses_hand':
val = torch.sum((body_params['poses'][span:, 66:] - body_params['poses'][:nFrames-span, 66:])**2)
else:
val = torch.sum((body_params[key][span:, :nPoses] - body_params[key][:nFrames-span, :nPoses])**2)
loss_dict[k] += span_weights[span] * val
k = 'smooth_' + key + '_l1'
if k in weight_loss.keys() and weight_loss[k] > 0.:
loss_dict[k] = 0.
for span in spans:
if key == 'poses_hand':
val = torch.sum((body_params['poses'][span:, 66:] - body_params['poses'][:nFrames-span, 66:]).abs())
else:
val = torch.sum((body_params[key][span:, :nPoses] - body_params[key][:nFrames-span, :nPoses]).abs())
loss_dict[k] += span_weights[span] * val
# smooth rotation
rot = batch_rodrigues(body_params['Rh'])
key, k = 'Rh', 'smooth_Rh'
if key in keys and k in weight_loss.keys() and weight_loss[k] > 0.:
loss_dict[k] = 0.
for span in spans:
val = torch.sum((rot[span:, :] - rot[:nFrames-span, :])**2)
loss_dict[k] += span_weights[span] * val
return loss_dict | null |
13,149 | import numpy as np
import torch
from .operation import projection, batch_rodrigues
def RegularizationLoss(body_params, body_params_init, weight_loss):
loss_dict = {}
for key in ['poses', 'shapes', 'Th', 'hands', 'head', 'expression']:
if 'init_'+key in weight_loss.keys() and weight_loss['init_'+key] > 0.:
if key == 'poses':
loss_dict['init_'+key] = torch.sum((body_params[key][:, :66] - body_params_init[key][:, :66])**2)
elif key == 'hands':
loss_dict['init_'+key] = torch.sum((body_params['poses'][: , 66:66+12] - body_params_init['poses'][:, 66:66+12])**2)
elif key == 'head':
loss_dict['init_'+key] = torch.sum((body_params['poses'][: , 78:78+9] - body_params_init['poses'][:, 78:78+9])**2)
elif key in body_params.keys():
loss_dict['init_'+key] = torch.sum((body_params[key] - body_params_init[key])**2)
for key in ['poses', 'shapes', 'hands', 'head', 'expression']:
if 'reg_'+key in weight_loss.keys() and weight_loss['reg_'+key] > 0.:
if key == 'poses':
loss_dict['reg_'+key] = torch.sum((body_params[key][:, :66])**2)
elif key == 'hands':
loss_dict['reg_'+key] = torch.sum((body_params['poses'][: , 66:66+12])**2)
elif key == 'head':
loss_dict['reg_'+key] = torch.sum((body_params['poses'][: , 78:78+9])**2)
elif key in body_params.keys():
loss_dict['reg_'+key] = torch.sum((body_params[key])**2)
return loss_dict | null |
13,150 | import numpy as np
import os
from tqdm import tqdm
import torch
import json
def rel_change(prev_val, curr_val):
return (prev_val - curr_val) / max([np.abs(prev_val), np.abs(curr_val), 1]) | null |
13,151 | import os
import numpy as np
import cv2
import pyrender
import trimesh
import copy7, 1.),
colors_table = {
# colorblind/print/copy safe:
'_blue': [0.65098039, 0.74117647, 0.85882353],
'_pink': [.9, .7, .7],
'_mint': [ 166/255., 229/255., 204/255.],
'_mint2': [ 202/255., 229/255., 223/255.],
'_green': [ 153/255., 216/255., 201/255.],
'_green2': [ 171/255., 221/255., 164/255.],
'_red': [ 251/255., 128/255., 114/255.],
'_orange': [ 253/255., 174/255., 97/255.],
'_yellow': [ 250/255., 230/255., 154/255.],
'r':[255/255,0,0],
'g':[0,255/255,0],
'b':[0,0,255/255],
'k':[0,0,0],
'y':[255/255,255/255,0],
'purple':[128/255,0,128/255]
}
from pyrender import RenderFlags
def get_colors(pid):
if isinstance(pid, int):
return colors[pid % len(colors)]
elif isinstance(pid, str):
return colors_table[pid] | null |
13,152 | import os
import numpy as np
import cv2
import pyrender
import trimesh
import copy7, 1.),
from pyrender import RenderFlags
class Renderer(object):
def __init__(self, focal_length=1000, height=512, width=512, faces=None,
bg_color=[1.0, 1.0, 1.0, 0.0], down_scale=1, # render 配置
extra_mesh=[]
):
def add_light(self, scene):
def render(self, render_data, cameras, images,
use_white=False, add_back=True,
ret_depth=False, ret_color=False):
def _render_multiview(self, vertices, K, R, T, imglist, trackId=0, return_depth=False, return_color=False,
bg_color=[0.0, 0.0, 0.0, 0.0], camera=None):
def render_results(img, render_data, cam_params, outname=None, rotate=False, degree=90, axis=[1.,0.,0],
fix_center=None):
render_data = copy.deepcopy(render_data)
render = Renderer(height=1024, width=1024, faces=None)
Ks, Rs, Ts = [cam_params['K']], [cam_params['Rc']], [cam_params['Tc']]
imgsrender = render.render_multiview(render_data, Ks, Rs, Ts, [img], return_color=True)[0]
render0 = cv2.addWeighted(cv2.bitwise_and(img, 255 - imgsrender[:, :, 3:4].repeat(3, 2)), 1, imgsrender[:, :, :3], 1, 0.0)
if rotate:
# simple rotate the vertices
if fix_center is None:
center = np.mean(np.vstack([v['vertices'] for i, v in render_data.items()]), axis=0, keepdims=True)
new_center = center.copy()
new_center[:, 0:2] = 0
else:
center = fix_center.copy()
new_center = fix_center.copy()
new_center[:, 2] *= 1.5
direc = np.array(axis)
rot, _ = cv2.Rodrigues(direc*degree/90*np.pi/2)
for key in render_data.keys():
vertices = render_data[key]['vertices']
vert = (vertices - center) @ rot.T + new_center
render_data[key]['vertices'] = vert
blank = np.zeros(())
blank = np.zeros((img.shape[0], img.shape[1], 3), dtype=img.dtype) + 255
imgsrender = render.render_multiview(render_data, Ks, Rs, Ts, [blank], return_color=True)[0]
render1 = cv2.addWeighted(cv2.bitwise_and(blank, 255- imgsrender[:, :, 3:4].repeat(3, 2)), 1, imgsrender[:, :, :3], 1, 0.0)
render0 = np.vstack([render0, render1])
if outname is not None:
os.makedirs(os.path.dirname(outname), exist_ok=True)
cv2.imwrite(outname, render0)
return render0 | null |
13,153 | import numpy as np
import cv2
from os.path import join
import os
from ..dataset.config import CONFIG
The provided code snippet includes necessary dependencies for implementing the `calTransformation` function. Write a Python function `def calTransformation(v_i, v_j, r, adaptr=False, ratio=10)` to solve the following problem:
from to vertices to T Arguments: v_i {} -- [description] v_j {[type]} -- [description]
Here is the function:
def calTransformation(v_i, v_j, r, adaptr=False, ratio=10):
""" from to vertices to T
Arguments:
v_i {} -- [description]
v_j {[type]} -- [description]
"""
xaxis = np.array([1, 0, 0])
v = (v_i + v_j)/2
direc = (v_i - v_j)
length = np.linalg.norm(direc)
direc = direc/length
rotdir = np.cross(xaxis, direc)
if np.linalg.norm(rotdir) > 1e-3:
rotdir = rotdir/np.linalg.norm(rotdir)
rotdir = rotdir * np.arccos(np.dot(direc, xaxis))
rotmat, _ = cv2.Rodrigues(rotdir)
else:
rotmat = np.eye(3)
# set the minimal radius for the finger and face
shrink = min(max(length/ratio, 0.005), 0.05)
eigval = np.array([[length/2/r, 0, 0], [0, shrink, 0], [0, 0, shrink]])
T = np.eye(4)
T[:3,:3] = rotmat @ eigval @ rotmat.T
T[:3, 3] = v
return T, r, length | from to vertices to T Arguments: v_i {} -- [description] v_j {[type]} -- [description] |
13,154 | import numpy as np
import cv2
import numpy as np
from tqdm import tqdm
from os.path import join
def load_sphere():
cur_dir = os.path.dirname(__file__)
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=int)
vertices = np.loadtxt(join(cur_dir, 'sphere_vertices_20.txt'))
return vertices, faces
import os
The provided code snippet includes necessary dependencies for implementing the `create_point` function. Write a Python function `def create_point(points, r=0.01)` to solve the following problem:
create sphere Args: points (array): (N, 3)/(N, 4) r (float, optional): radius. Defaults to 0.01.
Here is the function:
def create_point(points, r=0.01):
""" create sphere
Args:
points (array): (N, 3)/(N, 4)
r (float, optional): radius. Defaults to 0.01.
"""
points = np.array(points)
nPoints = points.shape[0]
vert, face = load_sphere()
vert = vert * r
nVerts = vert.shape[0]
vert = vert[None, :, :].repeat(points.shape[0], 0)
vert = vert + points[:, None, :3]
verts = np.vstack(vert)
face = face[None, :, :].repeat(points.shape[0], 0)
face = face + nVerts * np.arange(nPoints).reshape(nPoints, 1, 1)
faces = np.vstack(face)
return {'vertices': verts, 'faces': faces, 'name': 'points'} | create sphere Args: points (array): (N, 3)/(N, 4) r (float, optional): radius. Defaults to 0.01. |
13,155 | import numpy as np
import cv2
import numpy as np
from tqdm import tqdm
from os.path import join
import os
def create_ground(
center=[0, 0, 0], xdir=[1, 0, 0], ydir=[0, 1, 0], # 位置
step=1, xrange=10, yrange=10, # 尺寸
white=[1., 1., 1.], black=[0.,0.,0.], # 颜色
two_sides=True
):
if isinstance(center, list):
center = np.array(center)
xdir = np.array(xdir)
ydir = np.array(ydir)
print('[Vis Info] {}, x: {}, y: {}'.format(center, xdir, ydir))
xdir = xdir * step
ydir = ydir * step
vertls, trils, colls = [],[],[]
cnt = 0
min_x = -xrange if two_sides else 0
min_y = -yrange if two_sides else 0
for i in range(min_x, xrange):
for j in range(min_y, yrange):
point0 = center + i*xdir + j*ydir
point1 = center + (i+1)*xdir + j*ydir
point2 = center + (i+1)*xdir + (j+1)*ydir
point3 = center + (i)*xdir + (j+1)*ydir
if (i%2==0 and j%2==0) or (i%2==1 and j%2==1):
col = white
else:
col = black
vert = np.stack([point0, point1, point2, point3])
col = np.stack([col for _ in range(vert.shape[0])])
tri = np.array([[2, 3, 0], [0, 1, 2]]) + vert.shape[0] * cnt
cnt += 1
vertls.append(vert)
trils.append(tri)
colls.append(col)
vertls = np.vstack(vertls)
trils = np.vstack(trils)
colls = np.vstack(colls)
return {'vertices': vertls, 'faces': trils, 'colors': colls, 'name': 'ground'} | null |
13,156 | import numpy as np
import cv2
import numpy as np
from tqdm import tqdm
from os.path import join
def get_rotation_from_two_directions(direc0, direc1):
direc0 = direc0/np.linalg.norm(direc0)
direc1 = direc1/np.linalg.norm(direc1)
rotdir = np.cross(direc0, direc1)
if np.linalg.norm(rotdir) < 1e-2:
return np.eye(3)
rotdir = rotdir/np.linalg.norm(rotdir)
rotdir = rotdir * np.arccos(np.dot(direc0, direc1))
rotmat, _ = cv2.Rodrigues(rotdir)
return rotmat
PLANE_VERTICES = np.array([
[0., 0., 0.],
[1., 0., 0.],
[0., 0., 1.],
[1., 0., 1.],
[0., 1., 0.],
[1., 1., 0.],
[0., 1., 1.],
[1., 1., 1.]])
PLANE_FACES = np.array([
[4, 7, 5],
[4, 6, 7],
[0, 2, 4],
[2, 6, 4],
[0, 1, 2],
[1, 3, 2],
[1, 5, 7],
[1, 7, 3],
[2, 3, 7],
[2, 7, 6],
[0, 4, 1],
[1, 4, 5]], dtype=np.int32)
import os
def create_plane(normal, center, dx=1, dy=1, dz=0.005, color=[0.8, 0.8, 0.8]):
vertices = PLANE_VERTICES.copy()
vertices[:, 0] = vertices[:, 0]*dx - dx/2
vertices[:, 1] = vertices[:, 1]*dy - dy/2
vertices[:, 2] = vertices[:, 2]*dz - dz/2
# 根据normal计算旋转
rotmat = get_rotation_from_two_directions(
np.array([0, 0, 1]), np.array(normal))
vertices = vertices @ rotmat.T
vertices += np.array(center).reshape(-1, 3)
return {'vertices': vertices, 'faces': PLANE_FACES.copy(), 'name': 'plane'} | null |
13,157 | import numpy as np
import cv2
import numpy as np
from tqdm import tqdm
from os.path import join
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
def create_cameras_texture(cameras, imgnames, scale=5e-3):
import trimesh
import pyrender
from PIL import Image
from os.path import join
cam_path = join(current_dir, 'objs', 'background.obj')
meshes = []
for nv, (key, camera) in enumerate(tqdm(cameras.items(), desc='loading images')):
cam_trimesh = trimesh.load(cam_path, process=False)
vert = np.asarray(cam_trimesh.vertices)
K, R, T = camera['K'], camera['R'], camera['T']
img = Image.open(imgnames[nv])
height, width = img.height, img.width
vert[:, 0] *= width
vert[:, 1] *= height
vert[:, 2] *= 0
vert[:, 0] -= vert[:, 0]*0.5
vert[:, 1] -= vert[:, 1]*0.5
vert[:, 1] = - vert[:, 1]
vert[:, :2] *= scale
# vert[:, 2] = 1
cam_trimesh.vertices = (vert - T.T) @ R
cam_trimesh.visual.material.image = img
cam_mesh = pyrender.Mesh.from_trimesh(cam_trimesh, smooth=True)
meshes.append(cam_mesh)
return meshes | null |
13,158 | import numpy as np
import cv2
import numpy as np
from tqdm import tqdm
from os.path import join
import os
def create_mesh_pyrender(vert, faces, col):
import trimesh
import pyrender
mesh = trimesh.Trimesh(vert, faces, process=False)
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.0,
alphaMode='OPAQUE',
baseColorFactor=col)
mesh = pyrender.Mesh.from_trimesh(
mesh,
material=material)
return mesh | null |
13,159 | import pyrender
import numpy as np
import trimesh
import cv2
from .pyrender_flags import get_flags
from ..mytools.vis_base import get_rgb
def offscree_render(renderer, scene, img, flags):
rend_rgba, rend_depth = renderer.render(scene, flags=flags)
assert rend_depth.max() < 65, 'depth should less than 65.536: {}'.format(rend_depth.max())
rend_depth = (rend_depth * 1000).astype(np.uint16)
if rend_rgba.shape[2] == 3: # fail to generate transparent channel
valid_mask = (rend_depth > 0)[:, :, None]
rend_rgba = np.dstack((rend_rgba, (valid_mask*255).astype(np.uint8)))
rend_rgba = rend_rgba[..., [2, 1, 0, 3]]
if False:
rend_cat = cv2.addWeighted(
cv2.bitwise_and(img, 255 - rend_rgba[:, :, 3:4].repeat(3, 2)), 1,
cv2.bitwise_and(rend_rgba[:, :, :3], rend_rgba[:, :, 3:4].repeat(3, 2)), 1, 0)
else:
rend_cat = img.copy()
rend_cat[rend_rgba[:,:,-1]==255] = rend_rgba[:,:,:3][rend_rgba[:,:,-1]==255]
return rend_rgba, rend_depth, rend_cat | null |
13,160 | import pyrender
import numpy as np
import trimesh
import cv2
from .pyrender_flags import get_flags
from ..mytools.vis_base import get_rgb
class Renderer:
def __init__(self, bg_color=[1.0, 1.0, 1.0, 0.0], ambient_light=[0.5, 0.5, 0.5], flags={}) -> None:
self.bg_color = bg_color
self.ambient_light = ambient_light
self.renderer = pyrender.OffscreenRenderer(1024, 1024)
self.flags = get_flags(flags)
def add_light(scene, camera=None):
# Use 3 directional lights
# Create light source
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=3)
light_forward = np.eye(4)
# here the location of the light is set to be the origin
# and this location doesn't affect the render results
scene.add(light, pose=light_forward)
light_z = np.eye(4)
light_z[:3, :3] = cv2.Rodrigues(np.array([np.pi, 0, 0]))[0]
# if camera is not None:
# light_z[:3, :3] = camera['R'] @ light_z[:3, :3]
scene.add(light, pose=light_z)
def __call__(self, render_data, images, cameras, extra_mesh=[],
ret_image=False, ret_depth=False, ret_color=False, ret_mask=False, ret_all=True):
if isinstance(images, np.ndarray) and isinstance(cameras, dict):
images, cameras = [images], [cameras]
assert isinstance(cameras, list)
rot = trimesh.transformations.rotation_matrix(
np.radians(180), [1, 0, 0])
output_images, output_colors, output_depths = [], [], []
for nv, img in enumerate(images):
cam = cameras[nv]
K, R, T = cam['K'], cam['R'], cam['T']
self.renderer.viewport_height = img.shape[0]
self.renderer.viewport_width = img.shape[1]
scene = pyrender.Scene(bg_color=self.bg_color,
ambient_light=self.ambient_light)
for iextra, _mesh in enumerate(extra_mesh):
mesh = _mesh.copy()
trans_cam = np.eye(4)
trans_cam[:3, :3] = R
trans_cam[:3, 3:] = T
mesh.apply_transform(trans_cam)
mesh.apply_transform(rot)
# mesh.vertices = np.asarray(mesh.vertices) @ R.T + T.T
mesh_ = pyrender.Mesh.from_trimesh(mesh)
scene.add(mesh_, 'extra{}'.format(iextra))
for trackId, data in render_data.items():
vert = data['vertices'].copy()
faces = data['faces']
vert = vert @ R.T + T.T
if 'colors' not in data.keys():
# 如果使用了vid这个键,那么可视化的颜色使用vid的颜色
if False:
col = get_rgb(data.get('vid', trackId))
else:
col = get_colors(data.get('vid', trackId))
mesh = trimesh.Trimesh(vert, faces, process=False)
mesh.apply_transform(rot)
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.0,
roughnessFactor=0.0,
alphaMode='OPAQUE',
baseColorFactor=col)
# material = pyrender.material.SpecularGlossinessMaterial(
# diffuseFactor=1.0, glossinessFactor=0.0
# )
mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=data.get('smooth', True))
else:
mesh = trimesh.Trimesh(vert, faces, vertex_colors=data['colors'], process=False)
mesh.apply_transform(rot)
mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
scene.add(mesh, data['name'])
camera_pose = np.eye(4)
camera = pyrender.camera.IntrinsicsCamera(fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2])
scene.add(camera, pose=camera_pose)
self.add_light(scene, camera=cam)
# pyrender.Viewer(scene, use_raymond_lighting=True)
rend_rgba, rend_depth, rend_cat = offscree_render(self.renderer, scene, img, self.flags)
output_colors.append(rend_rgba)
output_depths.append(rend_depth)
output_images.append(rend_cat)
res = None
if ret_depth:
res = output_depths
elif ret_color:
res = output_colors
elif ret_mask:
res = [val[:, :, 3] for val in output_colors]
elif ret_image:
res = output_images
else:
res = output_colors, output_depths, output_images
return res
def render_image(self, render_data, images, cameras, extra_mesh,
**kwargs):
return self.__call__(render_data, images, cameras, extra_mesh,
ret_all=True, **kwargs)
def plot_meshes(img, meshes, K, R, T, mode='image'):
renderer = Renderer()
out = renderer.render_image(meshes, img, {'K': K, 'R': R, 'T': T}, [])
if mode == 'image':
return out[2][0]
elif mode == 'mask':
return out[0][0][..., -1]
elif mode == 'hstack':
return np.hstack([img, out[0][0][:, :, :3]])
elif mode == 'left':
out = out[0][0]
rend_rgba = np.roll(out, out.shape[1]//10, axis=1)
rend_cat = img.copy()
rend_cat[rend_rgba[:,:,-1]==255] = rend_rgba[:,:,:3][rend_rgba[:,:,-1]==255]
return rend_cat | null |
13,161 | import pyrender
import numpy as np
import trimesh
import cv2
from .pyrender_flags import get_flags
from ..mytools.vis_base import get_rgb
colors_table = {
# colorblind/print/copy safe:
'_blue': [0.65098039, 0.74117647, 0.85882353],
'_pink': [.9, .7, .7],
'_mint': [ 166/255., 229/255., 204/255.],
'_mint2': [ 202/255., 229/255., 223/255.],
'_green': [ 153/255., 216/255., 201/255.],
'_green2': [ 171/255., 221/255., 164/255.],
'_red': [ 251/255., 128/255., 114/255.],
'_orange': [ 253/255., 174/255., 97/255.],
'_yellow': [ 250/255., 230/255., 154/255.],
'r':[255/255,0,0],
'g':[0,255/255,0],
'b':[0,0,255/255],
'k':[0,0,0],
'y':[255/255,255/255,0],
'purple':[128/255,0,128/255]
}
def get_colors(pid):
if isinstance(pid, int):
return colors[pid % len(colors)]
elif isinstance(pid, str):
return colors_table[pid]
elif isinstance(pid, list) or isinstance(pid, tuple):
if len(pid) == 3:
pid = (pid[0], pid[1], pid[2], 1.)
assert len(pid) == 4
return pid | null |
13,162 | from glob import glob
from os.path import join
import numpy as np
from ..mytools.file_utils import read_json
from ..mytools.debug_utils import log
from ..mytools.reader import read_keypoints3d, read_smpl
import os
from ..mytools.camera_utils import read_cameras, Undistort
import cv2
from ..mytools.vis_base import merge, plot_keypoints_auto
from ..config.baseconfig import load_object
from .geometry import load_sphere
def imwrite(imgname, img):
if not os.path.exists(os.path.dirname(imgname)):
os.makedirs(os.path.dirname(imgname))
if img.shape[0] % 2 == 1 or img.shape[1] % 2 == 1:
img = cv2.resize(img, (img.shape[1]//2*2, img.shape[0]//2*2))
cv2.imwrite(imgname, img) | null |
13,163 | from glob import glob
from os.path import join
import numpy as np
from ..mytools.file_utils import read_json
from ..mytools.debug_utils import log
from ..mytools.reader import read_keypoints3d, read_smpl
import os
from ..mytools.camera_utils import read_cameras, Undistort
import cv2
from ..mytools.vis_base import merge, plot_keypoints_auto
from ..config.baseconfig import load_object
from .geometry import load_sphere
def compute_normals(vertices, faces):
normal = np.zeros_like(vertices)
# compute normal per triangle
normal_faces = np.cross(vertices[faces[:,1]] - vertices[faces[:,0]], vertices[faces[:,2]] - vertices[faces[:,0]])
# sum normals at vtx
normal[faces[:, 0]] += normal_faces[:]
normal[faces[:, 1]] += normal_faces[:]
normal[faces[:, 2]] += normal_faces[:]
# compute norms
normal = normal / np.linalg.norm(normal, axis=-1, keepdims=True)
return normal
def get_dilation_of_mesh(delta):
def func(info):
vertices = info['vertices']
normals = compute_normals(info['vertices'], info['faces'])
vertices += delta * normals
return info
return func | null |
13,164 | from pyrender import RenderFlags
render_flags_default = {
'flip_wireframe': False,
'all_wireframe': False,
'all_solid': True,
'shadows': False, # TODO:bug exists in shadow mode
'vertex_normals': False,
'face_normals': False,
'cull_faces': True, # set to False
'point_size': 1.0,
'rgba':True
}
def get_flags(flags):
render_flags = render_flags_default.copy()
render_flags.update(flags)
flags = RenderFlags.NONE
if render_flags['flip_wireframe']:
flags |= RenderFlags.FLIP_WIREFRAME
elif render_flags['all_wireframe']:
flags |= RenderFlags.ALL_WIREFRAME
elif render_flags['all_solid']:
flags |= RenderFlags.ALL_SOLID
if render_flags['shadows']:
flags |= RenderFlags.SHADOWS_DIRECTIONAL | RenderFlags.SHADOWS_SPOT
if render_flags['vertex_normals']:
flags |= RenderFlags.VERTEX_NORMALS
if render_flags['face_normals']:
flags |= RenderFlags.FACE_NORMALS
if not render_flags['cull_faces']:
flags |= RenderFlags.SKIP_CULL_FACES
if render_flags['rgba']:
flags |= RenderFlags.RGBA
return flags | null |
13,165 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
def _create_cylinder():
# create_cylinder(radius=1.0, height=2.0, resolution=20, split=4, create_uv_map=False)
pass | null |
13,166 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
load_mesh = o3d.io.read_triangle_mesh
def read_mesh(filename):
mesh = load_mesh(filename)
mesh.compute_vertex_normals()
return mesh | null |
13,167 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
Vector3dVector = o3d.utility.Vector3dVector
def create_pcd(xyz, color=None, colors=None):
pcd = o3d.geometry.PointCloud()
pcd.points = Vector3dVector(xyz[:, :3])
if color is not None:
pcd.paint_uniform_color(color)
if colors is not None:
pcd.colors = Vector3dVector(colors)
return pcd | null |
13,168 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
def create_mesh(vertices, faces, colors=None, normal=True, **kwargs):
mesh = TriangleMesh()
mesh.vertices = Vector3dVector(vertices)
mesh.triangles = Vector3iVector(faces)
if colors is not None and isinstance(colors, np.ndarray):
mesh.vertex_colors = Vector3dVector(colors)
elif colors is not None and isinstance(colors, list):
mesh.paint_uniform_color(colors)
else:
mesh.paint_uniform_color([1., 0.8, 0.8])
if normal:
mesh.compute_vertex_normals()
return mesh
def create_point(**kwargs):
return create_mesh(**create_point_(**kwargs)) | null |
13,169 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
def create_mesh(vertices, faces, colors=None, normal=True, **kwargs):
mesh = TriangleMesh()
mesh.vertices = Vector3dVector(vertices)
mesh.triangles = Vector3iVector(faces)
if colors is not None and isinstance(colors, np.ndarray):
mesh.vertex_colors = Vector3dVector(colors)
elif colors is not None and isinstance(colors, list):
mesh.paint_uniform_color(colors)
else:
mesh.paint_uniform_color([1., 0.8, 0.8])
if normal:
mesh.compute_vertex_normals()
return mesh
def create_ground(**kwargs):
ground = create_ground_(**kwargs)
return create_mesh(**ground) | null |
13,170 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
TriangleMesh = o3d.geometry.TriangleMesh
def create_coord(camera = [0,0,0], radius=1, scale=1):
camera_frame = TriangleMesh.create_coordinate_frame(
size=radius, origin=camera)
if scale != 1:
camera_frame.scale(scale)
return camera_frame | null |
13,171 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
def create_bbox(min_bound=(-3., -3., 0), max_bound=(3., 3., 2), flip=False):
if flip:
min_bound_ = min_bound.copy()
max_bound_ = max_bound.copy()
min_bound = [min_bound_[0], -max_bound_[1], -max_bound_[2]]
max_bound = [max_bound_[0], -min_bound_[1], -min_bound_[2]]
bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)
bbox.color = [0., 0., 0.]
return bbox | null |
13,172 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
def create_line(**kwargs):
return create_mesh(**create_line_(**kwargs))
def get_bound_corners(bounds):
min_x, min_y, min_z = bounds[0]
max_x, max_y, max_z = bounds[1]
corners_3d = np.array([
[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z],
])
return corners_3d
def create_line(start, end, r=0.01, col=None):
length = np.linalg.norm(end[:3] - start[:3])
vertices, faces = load_cylinder()
vertices[:, :2] *= r
vertices[:, 2] *= length/2
rotmat = calRot(np.array([0, 0, 1]), end - start)
vertices = vertices @ rotmat.T + (start + end)/2
ret = {'vertices': vertices, 'faces': faces, 'name': 'line'}
if col is not None:
ret['colors'] = col.reshape(-1, 3).repeat(vertices.shape[0], 0)
return ret
def create_rt_bbox(rtbbox):
corners = get_bound_corners(rtbbox.aabb)
corners = corners @ rtbbox.R.T + rtbbox.T
lines = []
for (i, j) in [(0, 1), (0, 2), (2, 3), (3, 1),
(4, 5), (4, 6), (6, 7), (5, 7),
(0, 4), (2, 6), (1, 5), (3, 7)]:
line = create_line(start=corners[i], end=corners[j], r=0.001)
line.paint_uniform_color([0., 0., 0.])
lines.append(line)
return lines | null |
13,173 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
def create_my_bbox(min_bound=(-3., -3., 0), max_bound=(3., 3., 2)):
# 使用圆柱去创建一个mesh
bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)
return bbox | null |
13,174 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
def create_mesh(vertices, faces, colors=None, normal=True, **kwargs):
mesh = TriangleMesh()
mesh.vertices = Vector3dVector(vertices)
mesh.triangles = Vector3iVector(faces)
if colors is not None and isinstance(colors, np.ndarray):
mesh.vertex_colors = Vector3dVector(colors)
elif colors is not None and isinstance(colors, list):
mesh.paint_uniform_color(colors)
else:
mesh.paint_uniform_color([1., 0.8, 0.8])
if normal:
mesh.compute_vertex_normals()
return mesh
def read_cameras(path, intri='intri.yml', extri='extri.yml', subs=[]):
cameras = read_camera(join(path, intri), join(path, extri))
cameras.pop('basenames')
if len(subs) > 0:
cameras = {key:cameras[key].astype(np.float32) for key in subs}
return cameras
def create_cameras(cameras):
vertex = np.array([[0.203982,0.061435,0.00717595],[-0.116019,0.061435,0.00717595],[-0.116019,-0.178565,0.00717595],[0.203982,-0.178565,0.00717595],[0.203982,0.061435,-0.092824],[-0.116019,0.061435,-0.092824],[-0.116019,-0.178565,-0.092824],[0.203982,-0.178565,-0.092824],[0.131154,-0.0361827,0.00717595],[0.131154,-0.0361827,0.092176],[0.122849,-0.015207,0.00717595],[0.122849,-0.015207,0.092176],[0.109589,0.00304419,0.00717595],[0.109589,0.00304419,0.092176],[0.092206,0.0174247,0.00717595],[0.092206,0.0174247,0.092176],[0.071793,0.0270302,0.00717595],[0.071793,0.0270302,0.092176],[0.0496327,0.0312577,0.00717595],[0.0496327,0.0312577,0.092176],[0.0271172,0.0298412,0.00717595],[0.0271172,0.0298412,0.092176],[0.00566135,0.0228697,0.00717595],[0.00566135,0.0228697,0.092176],[-0.0133865,0.0107812,0.00717595],[-0.0133865,0.0107812,0.092176],[-0.02883,-0.0056643,0.00717595],[-0.02883,-0.0056643,0.092176],[-0.0396985,-0.0254336,0.00717595],[-0.0396985,-0.0254336,0.092176],[-0.045309,-0.0472848,0.00717595],[-0.045309,-0.0472848,0.092176],[-0.045309,-0.069845,0.00717595],[-0.045309,-0.069845,0.092176],[-0.0396985,-0.091696,0.00717595],[-0.0396985,-0.091696,0.092176],[-0.02883,-0.111466,0.00717595],[-0.02883,-0.111466,0.092176],[-0.0133865,-0.127911,0.00717595],[-0.0133865,-0.127911,0.092176],[0.00566135,-0.14,0.00717595],[0.00566135,-0.14,0.092176],[0.0271172,-0.146971,0.00717595],[0.0271172,-0.146971,0.092176],[0.0496327,-0.148388,0.00717595],[0.0496327,-0.148388,0.092176],[0.071793,-0.14416,0.00717595],[0.071793,-0.14416,0.092176],[0.092206,-0.134554,0.00717595],[0.092206,-0.134554,0.092176],[0.109589,-0.120174,0.00717595],[0.109589,-0.120174,0.092176],[0.122849,-0.101923,0.00717595],[0.122849,-0.101923,0.092176],[0.131154,-0.080947,0.00717595],[0.131154,-0.080947,0.092176],[0.133982,-0.058565,0.00717595],[0.133982,-0.058565,0.092176],[-0.0074325,0.061435,-0.0372285],[-0.0074325,0.074435,-0.0372285],[-0.0115845,0.061435,-0.0319846],[-0.0115845,0.074435,-0.0319846],[-0.018215,0.061435,-0.0274218],[-0.018215,0.074435,-0.0274218],[-0.0269065,0.061435,-0.0238267],[-0.0269065,0.074435,-0.0238267],[-0.0371125,0.061435,-0.0214253],[-0.0371125,0.074435,-0.0214253],[-0.048193,0.061435,-0.0203685],[-0.048193,0.074435,-0.0203685],[-0.0594505,0.061435,-0.0207226],[-0.0594505,0.074435,-0.0207226],[-0.0701785,0.061435,-0.0224655],[-0.0701785,0.074435,-0.0224655],[-0.0797025,0.061435,-0.0254875],[-0.0797025,0.074435,-0.0254875],[-0.0874245,0.061435,-0.0295989],[-0.0874245,0.074435,-0.0295989],[-0.0928585,0.061435,-0.0345412],[-0.0928585,0.074435,-0.0345412],[-0.0956635,0.061435,-0.040004],[-0.0956635,0.074435,-0.040004],[-0.0956635,0.061435,-0.045644],[-0.0956635,0.074435,-0.045644],[-0.0928585,0.061435,-0.051107],[-0.0928585,0.074435,-0.051107],[-0.0874245,0.061435,-0.056049],[-0.0874245,0.074435,-0.056049],[-0.0797025,0.061435,-0.0601605],[-0.0797025,0.074435,-0.0601605],[-0.0701785,0.061435,-0.0631825],[-0.0701785,0.074435,-0.0631825],[-0.0594505,0.061435,-0.0649255],[-0.0594505,0.074435,-0.0649255],[-0.048193,0.061435,-0.0652795],[-0.048193,0.074435,-0.0652795],[-0.0371125,0.061435,-0.064223],[-0.0371125,0.074435,-0.064223],[-0.0269065,0.061435,-0.0618215],[-0.0269065,0.074435,-0.0618215],[-0.018215,0.061435,-0.0582265],[-0.018215,0.074435,-0.0582265],[-0.0115845,0.061435,-0.0536635],[-0.0115845,0.074435,-0.0536635],[-0.0074325,0.061435,-0.0484195],[-0.0074325,0.074435,-0.0484195],[-0.0060185,0.061435,-0.0428241],[-0.0060185,0.074435,-0.0428241]])*0.5
tri = [[4,3,2],[1,4,2],[6,1,2],[6,5,1],[8,4,1],[5,8,1],[3,7,2],[7,6,2],[4,7,3],[8,7,4],[6,7,5],[7,8,5],[43,42,44],[42,43,41],[43,46,45],[46,43,44],[58,9,57],[9,58,10],[55,58,57],[56,58,55],[53,54,55],[54,56,55],[12,11,9],[12,9,10],[21,20,22],[20,21,19],[34,33,32],[32,33,31],[35,36,37],[37,36,38],[33,36,35],[36,33,34],[29,30,31],[30,32,31],[40,39,37],[40,37,38],[39,40,41],[40,42,41],[47,48,49],[49,48,50],[48,47,45],[46,48,45],[49,52,51],[52,49,50],[52,53,51],[52,54,53],[14,15,13],[15,14,16],[11,14,13],[12,14,11],[18,17,15],[18,15,16],[17,18,19],[18,20,19],[27,35,37],[17,27,15],[27,53,55],[27,49,51],[11,27,9],[27,47,49],[27,33,35],[23,27,21],[27,39,41],[27,55,57],[9,27,57],[15,27,13],[39,27,37],[47,27,45],[53,27,51],[27,11,13],[43,27,41],[27,29,31],[27,43,45],[27,17,19],[21,27,19],[33,27,31],[27,23,25],[23,24,25],[25,24,26],[24,21,22],[24,23,21],[28,36,34],[42,28,44],[28,58,56],[54,28,56],[52,28,54],[28,34,32],[28,46,44],[18,28,20],[20,28,22],[30,28,32],[40,28,42],[58,28,10],[28,48,46],[28,12,10],[28,14,12],[36,28,38],[28,24,22],[28,40,38],[48,28,50],[28,52,50],[14,28,16],[28,18,16],[24,28,26],[28,27,25],[28,25,26],[28,30,29],[27,28,29],[108,59,60],[59,108,107],[62,59,61],[59,62,60],[103,102,101],[102,103,104],[64,61,63],[64,62,61],[70,67,69],[67,70,68],[70,71,72],[71,70,69],[83,84,82],[83,82,81],[86,85,87],[86,87,88],[86,83,85],[83,86,84],[77,78,75],[75,78,76],[105,106,103],[103,106,104],[108,106,107],[106,105,107],[97,96,95],[96,97,98],[96,93,95],[93,96,94],[93,92,91],[92,93,94],[79,105,103],[59,79,61],[79,93,91],[83,79,85],[85,79,87],[61,79,63],[79,103,101],[65,79,67],[79,99,97],[89,79,91],[79,77,75],[79,59,107],[67,79,69],[79,89,87],[79,73,71],[105,79,107],[79,97,95],[79,71,69],[79,83,81],[99,79,101],[93,79,95],[79,65,63],[73,79,75],[99,100,97],[97,100,98],[102,100,101],[100,99,101],[89,90,87],[87,90,88],[90,89,91],[92,90,91],[66,67,68],[66,65,67],[66,64,63],[65,66,63],[74,75,76],[74,73,75],[71,74,72],[73,74,71],[80,106,108],[74,80,72],[86,80,84],[84,80,82],[64,80,62],[80,108,60],[80,100,102],[62,80,60],[66,80,64],[80,70,72],[80,102,104],[96,80,94],[80,90,92],[70,80,68],[80,86,88],[78,80,76],[106,80,104],[80,96,98],[80,92,94],[100,80,98],[90,80,88],[80,66,68],[80,74,76],[82,80,81],[80,79,81],[80,78,77],[79,80,77]]
tri = [a[::-1] for a in tri]
triangles = np.array(tri) - 1
meshes = []
for nv, (key, camera) in enumerate(cameras.items()):
vertices = (camera['R'].T @ (vertex.T - camera['T'])).T
meshes.append({
'vertices': vertices, 'faces': triangles, 'name': 'camera_{}'.format(nv), 'vid': nv
})
meshes = merge_meshes(meshes)
return meshes
def create_camera(path=None, cameras=None):
if cameras is None:
from ..mytools.camera_utils import read_cameras
cameras = read_cameras(path)
from .geometry import create_cameras
meshes = create_cameras(cameras)
return create_mesh(**meshes) | null |
13,175 | import open3d as o3d
import numpy as np
from .geometry import create_ground as create_ground_
from .geometry import create_point as create_point_
from .geometry import create_line as create_line_
from os.path import join
load_mesh = o3d.io.read_triangle_mesh
vis = o3d.visualization.draw_geometries
def read_and_vis(filename):
mesh = load_mesh(filename)
mesh.compute_vertex_normals()
# if not mesh.has_texture:
vis([mesh]) | null |
13,176 | def get_ext(mode):
ext = {'image': '.jpg', 'color':'.jpg', 'blend': '.jpg',
'depth':'.png', 'mask':'.png', 'instance':'.png',
'instance-mask': '.png', 'instance-depth': '.png', 'instance-depth-twoside': '.png',
'side': '.jpg'
}.get(mode, '.jpg')
return ext
def get_render_func(mode, backend='pyrender'):
if backend == 'pyrender':
from .pyrender_wrapper import Renderer
render = Renderer()
else:
raise NotImplementedError
renderer = RenderFunc(render)
return renderer.factory(mode)
def get_ext(mode):
ext = {'image': '.jpg', 'color':'.jpg', 'blend': '.jpg',
'depth':'.png', 'mask':'.png', 'instance':'.png',
'instance-mask': '.png', 'instance-depth': '.png', 'instance-depth-twoside': '.png',
'side': '.jpg'
}.get(mode, '.jpg')
return ext | null |
13,177 | None:
self.render = render
self.position = {}
def factory(self, mode):
if mode == 'image':
return self.render_image
elif mode == 'color':
return self.render_color
elif mode == 'depth':
return self.render_depth
elif mode == 'corner':
return self.render_corner
elif mode == 'mask':
return self.render_mask
elif mode == 'instance-mask':
return self.render_mask
elif mode.startswith('instance-depth'):
return self.render_depth
def get_ext(mode):
ext = {'image': '.jpg', 'color':'.jpg', 'blend': '.jpg',
'depth':'.png', 'mask':'.png', 'instance':'.png',
'instance-mask': '.png', 'instance-depth': '.png', 'instance-depth-twoside': '.png',
'side': '.jpg'
}.get(mode, '.jpg')
return ext
def get_render_func(mode, backend='pyrender'):
if backend == 'pyrender':
from .pyrender_wrapper import Renderer
render = Renderer()
else:
raise NotImplementedError
renderer = RenderFunc(render)
return renderer.factory(mode)
class Renderer:
def __init__(self, bg_color=[1.0, 1.0, 1.0, 0.0], ambient_light=[0.5, 0.5, 0.5], flags={}) -> None:
self.bg_color = bg_color
self.ambient_light = ambient_light
self.renderer = pyrender.OffscreenRenderer(1024, 1024)
self.flags = get_flags(flags)
def add_light(scene, camera=None):
# Use 3 directional lights
# Create light source
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=3)
light_forward = np.eye(4)
# here the location of the light is set to be the origin
# and this location doesn't affect the render results
scene.add(light, pose=light_forward)
light_z = np.eye(4)
light_z[:3, :3] = cv2.Rodrigues(np.array([np.pi, 0, 0]))[0]
# if camera is not None:
# light_z[:3, :3] = camera['R'] @ light_z[:3, :3]
scene.add(light, pose=light_z)
def __call__(self, render_data, images, cameras, extra_mesh=[],
ret_image=False, ret_depth=False, ret_color=False, ret_mask=False, ret_all=True):
if isinstance(images, np.ndarray) and isinstance(cameras, dict):
images, cameras = [images], [cameras]
assert isinstance(cameras, list)
rot = trimesh.transformations.rotation_matrix(
np.radians(180), [1, 0, 0])
output_images, output_colors, output_depths = [], [], []
for nv, img in enumerate(images):
cam = cameras[nv]
K, R, T = cam['K'], cam['R'], cam['T']
self.renderer.viewport_height = img.shape[0]
self.renderer.viewport_width = img.shape[1]
scene = pyrender.Scene(bg_color=self.bg_color,
ambient_light=self.ambient_light)
for iextra, _mesh in enumerate(extra_mesh):
mesh = _mesh.copy()
trans_cam = np.eye(4)
trans_cam[:3, :3] = R
trans_cam[:3, 3:] = T
mesh.apply_transform(trans_cam)
mesh.apply_transform(rot)
# mesh.vertices = np.asarray(mesh.vertices) @ R.T + T.T
mesh_ = pyrender.Mesh.from_trimesh(mesh)
scene.add(mesh_, 'extra{}'.format(iextra))
for trackId, data in render_data.items():
vert = data['vertices'].copy()
faces = data['faces']
vert = vert @ R.T + T.T
if 'colors' not in data.keys():
# 如果使用了vid这个键,那么可视化的颜色使用vid的颜色
if False:
col = get_rgb(data.get('vid', trackId))
else:
col = get_colors(data.get('vid', trackId))
mesh = trimesh.Trimesh(vert, faces, process=False)
mesh.apply_transform(rot)
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.0,
roughnessFactor=0.0,
alphaMode='OPAQUE',
baseColorFactor=col)
# material = pyrender.material.SpecularGlossinessMaterial(
# diffuseFactor=1.0, glossinessFactor=0.0
# )
mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=data.get('smooth', True))
else:
mesh = trimesh.Trimesh(vert, faces, vertex_colors=data['colors'], process=False)
mesh.apply_transform(rot)
mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
scene.add(mesh, data['name'])
camera_pose = np.eye(4)
camera = pyrender.camera.IntrinsicsCamera(fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2])
scene.add(camera, pose=camera_pose)
self.add_light(scene, camera=cam)
# pyrender.Viewer(scene, use_raymond_lighting=True)
rend_rgba, rend_depth, rend_cat = offscree_render(self.renderer, scene, img, self.flags)
output_colors.append(rend_rgba)
output_depths.append(rend_depth)
output_images.append(rend_cat)
res = None
if ret_depth:
res = output_depths
elif ret_color:
res = output_colors
elif ret_mask:
res = [val[:, :, 3] for val in output_colors]
elif ret_image:
res = output_images
else:
res = output_colors, output_depths, output_images
return res
def render_image(self, render_data, images, cameras, extra_mesh,
**kwargs):
return self.__call__(render_data, images, cameras, extra_mesh,
ret_all=True, **kwargs)
55, 87/255), # yellow
(74/255., 189/255., 172/255.), # green
(8/255, 76/255, 97/255), # blue
(219/255, 58/255, 52/255), # red
(77/255, 40/255, 49/255), # brown
]
def get_render_func(mode, backend='pyrender'):
if backend == 'pyrender':
from .pyrender_wrapper import Renderer
render = Renderer()
else:
raise NotImplementedError
renderer = RenderFunc(render)
return renderer.factory(mode) | null |
13,178 | from easymocap.config.baseconfig import load_object
import torch
def make_data_sampler(cfg, dataset, shuffle, is_distributed, is_train):
if not is_train and cfg.test.sampler == 'FrameSampler':
from .samplers import FrameSampler
sampler = FrameSampler(dataset)
return sampler
if is_distributed:
from .samplers import DistributedSampler
return DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_batch_data_sampler(cfg, sampler, batch_size, drop_last, max_iter,
is_train):
if is_train:
batch_sampler = cfg.train.batch_sampler
else:
batch_sampler = cfg.test.batch_sampler
if batch_sampler == 'default':
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last)
elif batch_sampler == 'image_size':
raise NotImplementedError
if max_iter != -1:
from .samplers import IterationBasedBatchSampler
batch_sampler = IterationBasedBatchSampler(
batch_sampler, max_iter)
return batch_sampler
def worker_init_fn(worker_id):
import numpy as np
import time
# np.random.seed(worker_id + (int(round(time.time() * 1000) % (2**16))))
def make_collator(cfg, is_train):
_collators = {
}
from torch.utils.data.dataloader import default_collate
collator = cfg.train.collator if is_train else cfg.test.collator
if collator in _collators:
return _collators[collator]
else:
return default_collate
def load_object(module_name, module_args, **extra_args):
module_path = '.'.join(module_name.split('.')[:-1])
module = importlib.import_module(module_path)
name = module_name.split('.')[-1]
obj = getattr(module, name)(**extra_args, **module_args)
return obj
def Dataloader(cfg, split='train', is_train=True, start=0):
is_distributed = cfg.distributed
if split == 'train' and is_train:
batch_size = cfg.train.batch_size
max_iter = cfg.train.ep_iter
# shuffle = True
shuffle = cfg.train.shuffle
drop_last = False
else:
batch_size = cfg.test.batch_size
shuffle = True if is_distributed else False
drop_last = False
max_iter = -1
if split == 'train' and is_train:
dataset = load_object(cfg.data_train_module, cfg.data_train_args)
elif split == 'train' and not is_train:
cfg.data_train_args.split = 'test'
dataset = load_object(cfg.data_train_module, cfg.data_train_args)
elif split in ['test', 'val']:
dataset = load_object(cfg.data_val_module, cfg.data_val_args)
elif split == 'demo':
dataset = load_object(cfg.data_demo_module, cfg.data_demo_args)
elif split == 'mesh':
dataset = load_object(cfg.data_mesh_module, cfg.data_mesh_args)
else:
raise NotImplementedError
is_train = (split == 'train') and is_train
sampler = make_data_sampler(cfg, dataset, shuffle, is_distributed, is_train)
batch_sampler = make_batch_data_sampler(cfg, sampler, batch_size,
drop_last, max_iter, is_train)
num_workers = cfg.train.num_workers if is_train else cfg.test.num_workers
collator = make_collator(cfg, is_train)
data_loader = torch.utils.data.DataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=collator,
worker_init_fn=worker_init_fn)
return data_loader | null |
13,179 | import torch
from collections import Counter
from bisect import bisect_right
class MultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
self.milestones = Counter(milestones)
self.gamma = gamma
super(MultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch not in self.milestones:
return [group['lr'] for group in self.optimizer.param_groups]
return [group['lr'] * self.gamma ** self.milestones[self.last_epoch]
for group in self.optimizer.param_groups]
class ExponentialLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, decay_epochs, gamma=0.1, last_epoch=-1):
self.decay_epochs = decay_epochs
self.gamma = gamma
super(ExponentialLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * self.gamma ** (self.last_epoch / self.decay_epochs)
for base_lr in self.base_lrs]
def Scheduler(cfg_scheduler, optimizer):
if cfg_scheduler.type == 'multi_step':
scheduler = MultiStepLR(optimizer,
milestones=cfg_scheduler.milestones,
gamma=cfg_scheduler.gamma)
elif cfg_scheduler.type == 'exponential':
scheduler = ExponentialLR(optimizer,
decay_epochs=cfg_scheduler.decay_epochs,
gamma=cfg_scheduler.gamma)
else:
raise NotImplementedError
return scheduler | null |
13,180 | import torch
from collections import Counter
from bisect import bisect_right
def set_lr_scheduler(cfg_scheduler, scheduler):
if cfg_scheduler.type == 'multi_step':
scheduler.milestones = Counter(cfg_scheduler.milestones)
elif cfg_scheduler.type == 'exponential':
scheduler.decay_epochs = cfg_scheduler.decay_epochs
scheduler.gamma = cfg_scheduler.gamma | null |
13,181 | import torch
_optimizer_factory = {
'adam': torch.optim.Adam,
'sgd': torch.optim.SGD
}
def Optimizer(net, cfg):
params = []
lr = cfg.lr
weight_decay = cfg.weight_decay
for key, value in net.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if 'adam' in cfg.optim:
optimizer = _optimizer_factory[cfg.optim](params, lr, weight_decay=weight_decay)
else:
optimizer = _optimizer_factory[cfg.optim](params, lr, momentum=0.9)
return optimizer | null |
13,182 | import os
from termcolor import colored
import torch
def load_model(net,
optim,
scheduler,
recorder,
model_dir,
resume=True,
epoch=-1):
if not resume:
os.system('rm -rf {}'.format(model_dir))
if not os.path.exists(model_dir):
return 0
pths = [
int(pth.split('.')[0]) for pth in os.listdir(model_dir)
if pth != 'latest.pth'
]
if len(pths) == 0 and 'latest.pth' not in os.listdir(model_dir):
return 0
if epoch == -1:
if 'latest.pth' in os.listdir(model_dir):
pth = 'latest'
else:
pth = max(pths)
else:
pth = epoch
print('load model: {}'.format(os.path.join(model_dir,
'{}.pth'.format(pth))))
pretrained_model = torch.load(
os.path.join(model_dir, '{}.pth'.format(pth)), 'cpu')
net.load_state_dict(pretrained_model['net'])
optim.load_state_dict(pretrained_model['optim'])
scheduler.load_state_dict(pretrained_model['scheduler'])
recorder.load_state_dict(pretrained_model['recorder'])
return pretrained_model['epoch'] + 1 | null |
13,183 | import os
from termcolor import colored
import torch
def save_model(net, optim, scheduler, recorder, model_dir, epoch, last=False):
os.system('mkdir -p {}'.format(model_dir))
model = {
'net': net.state_dict(),
'optim': optim.state_dict(),
'scheduler': scheduler.state_dict(),
'recorder': recorder.state_dict(),
'epoch': epoch
}
if epoch > 20 and (epoch+1) % 10 != 0 and not last:
return 0
if last:
torch.save(model, os.path.join(model_dir, 'latest.pth'))
else:
torch.save(model, os.path.join(model_dir, '{}.pth'.format(epoch)))
return 0
# remove previous pretrained model if the number of models is too big
pths = [
int(pth.split('.')[0]) for pth in os.listdir(model_dir)
if pth != 'latest.pth'
]
if len(pths) <= 20:
return
os.system('rm {}'.format(
os.path.join(model_dir, '{}.pth'.format(min(pths))))) | null |
13,184 | import os
from termcolor import colored
import torch
def load_network(net, model_dir, resume=True, epoch=-1, strict=True):
if not resume:
return 0
if not os.path.exists(model_dir):
print(colored('pretrained model does not exist', 'red'))
return 0
if os.path.isdir(model_dir):
pths = [
int(pth.split('.')[0]) for pth in os.listdir(model_dir)
if pth != 'latest.pth'
]
if len(pths) == 0 and 'latest.pth' not in os.listdir(model_dir):
return 0
if epoch == -1:
if 'latest.pth' in os.listdir(model_dir):
pth = 'latest'
else:
pth = max(pths)
else:
pth = max(epoch, -1)
model_path = os.path.join(model_dir, '{}.pth'.format(pth))
else:
model_path = model_dir
print('load model: {}'.format(model_path))
pretrained_model = torch.load(model_path)
net.load_state_dict(pretrained_model['net'], strict=strict)
return pretrained_model['epoch'] + 1 | null |
13,185 | import numpy as np
import cv2
from termcolor import colored
import os
from os.path import join
from ..dataset.utils_reader import palette
colors_rgb = [
(1, 1, 1),
(94/255, 124/255, 226/255), # 青色
(255/255, 200/255, 87/255), # yellow
(74/255., 189/255., 172/255.), # green
(8/255, 76/255, 97/255), # blue
(219/255, 58/255, 52/255), # red
(77/255, 40/255, 49/255), # brown
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(110/255, 211/255, 207/255), # light green
(1, 1, 1),
(94/255, 124/255, 226/255), # 青色
(255/255, 200/255, 87/255), # yellow
(74/255., 189/255., 172/255.), # green
(8/255, 76/255, 97/255), # blue
(219/255, 58/255, 52/255), # red
(77/255, 40/255, 49/255), # brown
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(110/255, 211/255, 207/255), # light green
(1, 1, 1),
]
def get_rgb_01(pid):
return colors_rgb[pid][::-1] | null |
13,186 | import cv2
import numpy as np
from ...mytools.file_utils import read_json
def img_to_numpy(img):
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)/255.
return img | null |
13,187 | import cv2
import numpy as np
from ...mytools.file_utils import read_json
def numpy_to_img(img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = (img*255).astype(np.uint8)
return img | null |
13,188 | import cv2
import numpy as np
from ...mytools.file_utils import read_json
def read_json(path):
assert os.path.exists(path), path
with open(path) as f:
try:
data = json.load(f)
except:
print('Reading error {}'.format(path))
data = []
return data
def read_json_with_cache(filename, file_cache):
if filename not in file_cache.keys():
data = read_json(filename)
file_cache[filename] = data
return file_cache[filename] | null |
13,189 | import cv2
import numpy as np
from ...mytools.file_utils import read_json
palette = get_schp_palette(semantic_dim)
The provided code snippet includes necessary dependencies for implementing the `get_schp_palette` function. Write a Python function `def get_schp_palette(num_cls=256)` to solve the following problem:
Returns the color map for visualizing the segmentation mask. Inputs: num_cls: Number of classes. Returns: The color map.
Here is the function:
def get_schp_palette(num_cls=256):
# Copied from SCHP
""" Returns the color map for visualizing the segmentation mask.
Inputs:
num_cls: Number of classes.
Returns:
The color map.
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
palette = np.array(palette, dtype=np.uint8)
palette = palette.reshape(-1, 3) # n_cls, 3
return palette | Returns the color map for visualizing the segmentation mask. Inputs: num_cls: Number of classes. Returns: The color map. |
13,190 | import cv2
import numpy as np
from ...mytools.file_utils import read_json
semantic_dict = {
'background': 0,
'hat': 1,
'hair': 1,
'glove': 1,
'sunglasses': 1,
'upper_cloth': 2,
'dress': 1, #x
'coat': 1,
'sock': 1,
'pant': 3,
'jumpsuit': 1,
'scarf': 1,
'skirt': 1,
'face': 1,
'left_leg': 1,
'right_leg': 1,
'left_arm': 1,
'right_arm': 1,
'left_shoe': 1,
'right_shoe': 1,
}
semantic_labels = list(semantic_dict.keys())
palette = get_schp_palette(semantic_dim)
def parse_semantic(semantic):
msk_cihp = (semantic * 255).astype(np.int) # H, W, 3
sem_msk = np.zeros(msk_cihp.shape[:2], dtype=np.int64)
for i, rgb in enumerate(palette):
if i == 0:continue
belong = np.abs(msk_cihp - rgb).sum(axis=-1) < 4
sem_msk[belong] = semantic_dict[semantic_labels[i]]
return sem_msk | null |
13,191 | from os.path import join
import numpy as np
import cv2
from tqdm import trange
import copy
from .mvbase import BaseDataset, read_json, get_bounds
from ...multistage.mirror import calc_mirror_transform
import torch
from .utils_sample import AABBwMask
def calc_mirror_transform(m_):
""" From mirror vector to mirror matrix
Args:
m (bn, 4): (a, b, c, d)
Returns:
M: (bn, 3, 4)
"""
norm = torch.norm(m_[:, :3], dim=1, keepdim=True)
m = m_[:, :3] / norm
d = m_[:, 3]
coeff_mat = torch.zeros((m.shape[0], 3, 4), device=m.device)
coeff_mat[:, 0, 0] = 1 - 2*m[:, 0]**2
coeff_mat[:, 0, 1] = -2*m[:, 0]*m[:, 1]
coeff_mat[:, 0, 2] = -2*m[:, 0]*m[:, 2]
coeff_mat[:, 0, 3] = -2*m[:, 0]*d
coeff_mat[:, 1, 0] = -2*m[:, 1]*m[:, 0]
coeff_mat[:, 1, 1] = 1-2*m[:, 1]**2
coeff_mat[:, 1, 2] = -2*m[:, 1]*m[:, 2]
coeff_mat[:, 1, 3] = -2*m[:, 1]*d
coeff_mat[:, 2, 0] = -2*m[:, 2]*m[:, 0]
coeff_mat[:, 2, 1] = -2*m[:, 2]*m[:, 1]
coeff_mat[:, 2, 2] = 1-2*m[:, 2]**2
coeff_mat[:, 2, 3] = -2*m[:, 2]*d
return coeff_mat
def mirror_params(params, mirror, T0=np.eye(4, dtype=np.float32)):
params = params.copy()
M = np.eye(4, dtype=np.float32)
M[:3] = calc_mirror_transform(torch.Tensor(mirror))[0].numpy()
T1 = M @ T0
T1[0, :] *= -1
Rh = cv2.Rodrigues(params['Rh'])[0]
Th = params['Th'].T
Rnew = T1[:3, :3] @ Rh
Tnew = T1[:3, :3] @ Th + T1[:3, 3:]
params['Rh'] = cv2.Rodrigues(Rnew)[0].reshape(1, 3)
params['Th'] = Tnew.T
params['vertices'] = params['vertices'] @ T1[:3, :3].T + T1[:3, 3:].T
return params | null |
13,192 | import numpy as np
import cv2
import math
from collections import namedtuple
def get_rays(H, W, K, R, T):
# calculate the camera origin
rays_o = -np.dot(R.T, T).ravel()
# calculate the world coodinates of pixels
i, j = np.meshgrid(np.arange(W, dtype=np.float32),
np.arange(H, dtype=np.float32),
indexing='xy')
xy1 = np.stack([i, j, np.ones_like(i)], axis=2)
pixel_camera = np.dot(xy1, np.linalg.inv(K).T)
pixel_world = np.dot(pixel_camera - T.ravel(), R)
# calculate the ray direction
rays_d = pixel_world - rays_o[None, None]
# ATTN: dont't normalize here
# rays_d = rays_d / np.linalg.norm(rays_d, axis=-1, keepdims=True)
rays_o = np.broadcast_to(rays_o, rays_d.shape)
rays_o = rays_o.astype(np.float32)
rays_d = rays_d.astype(np.float32)
return rays_o, rays_d | null |
13,193 | import numpy as np
import cv2
import math
from collections import namedtuple
The provided code snippet includes necessary dependencies for implementing the `project` function. Write a Python function `def project(xyz, K, R, T)` to solve the following problem:
xyz: [N, 3] K: [3, 3] RT: [3, 4]
Here is the function:
def project(xyz, K, R, T):
"""
xyz: [N, 3]
K: [3, 3]
RT: [3, 4]
"""
xyz = np.dot(xyz, R.T) + T.T
xyz = np.dot(xyz, K.T)
xy = xyz[:, :2] / xyz[:, 2:]
return xy | xyz: [N, 3] K: [3, 3] RT: [3, 4] |
13,194 | import numpy as np
import cv2
import math
from collections import namedtuple
def get_bound_corners(bounds):
min_x, min_y, min_z = bounds[0]
max_x, max_y, max_z = bounds[1]
corners_3d = np.array([
[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z],
])
return corners_3d | null |
13,195 | import numpy as np
import cv2
import math
from collections import namedtuple
def get_bounds(xyz, delta=0.05):
min_xyz = np.min(xyz, axis=0)
max_xyz = np.max(xyz, axis=0)
if isinstance(delta, list):
delta = np.array(delta, dtype=np.float32).reshape(1, 3)
min_xyz -= delta
max_xyz += delta
can_bounds = np.stack([min_xyz, max_xyz], axis=0)
return can_bounds.astype(np.float32) | null |
13,196 | import numpy as np
import cv2
import math
from collections import namedtuple
def sample_rays(bound_sum, mask_back, split, nrays=1024, **kwargs):
coord_body = np.argwhere(bound_sum*mask_back > 0)
if split == 'train':
coord_body = coord_body[np.random.randint(0, len(coord_body), nrays)]
return coord_body | null |
13,197 | import numpy as np
import cv2
import math
from collections import namedtuple
def generate_weight_coords(bounds, rates, back_mask):
coords = []
for key in bounds.keys():
coord_ = np.argwhere(bounds[key]*back_mask > 0)
if rates[key] == 1.:
coords.append(coord_)
elif rates[key] >= 1.:
# repeat the interger part
coord_r = np.vstack([coord_ for _ in range(math.floor(rates[key]))])
if not isinstance(rates[key], int):
# repeat the float part
nsample2 = int(len(coord_)*(rates[key] - math.floor(rates[key])))
coord_f = coord_[np.random.randint(0, len(coord_), nsample2)]
coord_ = np.vstack([coord_r, coord_f])
else:
coord_ = coord_
else:
# sample
coord_ = coord_[np.random.randint(0, len(coord_), int(len(coord_)*rates[key]))]
coords.append(coord_)
coords = np.vstack(coords)
return coords
def sample_rays_rate(bounds, rates, back_mask, nrays=1024, **kwargs):
if 'method' in kwargs and kwargs['method'] == 'patch':
cv2.imwrite('debug/back.jpg', (back_mask*255).astype(np.uint8))
mask_valid = back_mask
# 腐蚀一下
mask_valid[:, 0] = 0
mask_valid[:, -1] = 0
mask_valid[0, :] = 0
mask_valid[-1, :] = 0
# inp = mask_valid.astype(np.uint8) * 255
patch_size = kwargs['patch_size']
kernel = np.ones((2*patch_size//2+1, 2*patch_size//2+1), np.uint8)
back_mask = cv2.erode(mask_valid, kernel, iterations=1)
# TODO: 这里每个object的mask并不会被erode掉
# 导致object的边缘也是会被选中的
coords = generate_weight_coords(bounds, rates, back_mask)
if 'method' in kwargs and kwargs['method'] == 'patch':
patch_size = kwargs['patch_size']
if False:
canvas = np.zeros_like(back_mask)
for (i, j) in coords:
canvas[i, j] += 1
canvas /= canvas.max()
cv2.imwrite('debug.jpg', (canvas*255).astype(np.uint8))
center = coords[np.random.randint(0, len(coords), kwargs['num_patch'])]
coords_list = []
for n_patch in range(center.shape[0]):
cx, cy = center[n_patch]
x_min = cx - patch_size//2
x_max = x_min + patch_size
y_min = cy - patch_size//2
y_max = y_min + patch_size
i, j = np.meshgrid(np.arange(x_min, x_max, dtype=coords.dtype),
np.arange(y_min, y_max, dtype=coords.dtype),
indexing='xy')
coord = np.stack([i.reshape(-1), j.reshape(-1)], axis=1)
coords_list.append(coord)
coords = np.vstack(coords_list)
else:
coords = coords[np.random.randint(0, len(coords), nrays)]
return coords | null |
13,198 | import numpy as np
import cv2
import math
from collections import namedtuple
def create_cameras_mean(cameras, camera_args):
Told = np.stack([d['T'] for d in cameras])
Rold = np.stack([d['R'] for d in cameras])
Kold = np.stack([d['K'] for d in cameras])
Cold = - np.einsum('bmn,bnp->bmp', Rold.transpose(0, 2, 1), Told)
center = Cold.mean(axis=0, keepdims=True)
radius = np.linalg.norm(Cold - center, axis=1).mean()
zmean = Rold[:, 2, 2].mean()
xynorm = np.sqrt(1. - zmean**2)
thetas = np.linspace(0., 2*np.pi, camera_args['allstep'])
# 计算第一个相机对应的theta
dir0 = Cold[0] - center[0]
dir0[2, 0] = 0.
dir0 = dir0 / np.linalg.norm(dir0)
theta0 = np.arctan2(dir0[1,0], dir0[0,0]) + np.pi/2
thetas += theta0
sint = np.sin(thetas)
cost = np.cos(thetas)
R1 = np.stack([cost, sint, np.zeros_like(sint)]).T
R3 = xynorm * np.stack([-sint, cost, np.zeros_like(sint)]).T
R3[:, 2] = zmean
R2 = - np.cross(R1, R3)
Rnew = np.stack([R1, R2, R3], axis=1)
# set locations
loc = np.stack([radius * sint, -radius * cost, np.zeros_like(sint)], axis=1)[..., None] + center
print('[sample] camera centers: ', center[0].T[0])
print('[sample] camera radius: ', radius)
print('[sample] camera start theta: ', theta0)
Tnew = -np.einsum('bmn,bnp->bmp', Rnew, loc)
K = Kold.mean(axis=0, keepdims=True).repeat(Tnew.shape[0], 0)
return K, Rnew, Tnew | null |
13,199 | import numpy as np
import cv2
import math
from collections import namedtuple
def create_center_radius(center, radius=5., up='y', ranges=[0, 360, 36], angle_x=0, **kwargs):
center = np.array(center).reshape(1, 3)
thetas = np.deg2rad(np.linspace(*ranges))
st = np.sin(thetas)
ct = np.cos(thetas)
zero = np.zeros_like(st)
Rotx = cv2.Rodrigues(np.deg2rad(angle_x) * np.array([1., 0., 0.]))[0]
if up == 'z':
center = np.stack([radius*ct, radius*st, zero], axis=1) + center
R = np.stack([-st, ct, zero, zero, zero, zero-1, -ct, -st, zero], axis=-1)
elif up == 'y':
center = np.stack([radius*ct, zero, radius*st, ], axis=1) + center
R = np.stack([
+st, zero, -ct,
zero, zero-1, zero,
-ct, zero, -st], axis=-1)
R = R.reshape(-1, 3, 3)
R = np.einsum('ab,fbc->fac', Rotx, R)
center = center.reshape(-1, 3, 1)
T = - R @ center
RT = np.dstack([R, T])
return RT | null |
13,200 | import numpy as np
import cv2
import torch.nn as nn
import torch
import time
import json
from ..model.base import augment_z_vals, concat
_time_ = 0
def tic():
global _time_
_time_ = time.time() | null |
13,201 | import numpy as np
import cv2
import torch.nn as nn
import torch
import time
import json
from ..model.base import augment_z_vals, concat
_time_ = 0
def toc(name):
global _time_
print('{:15s}: {:.1f}'.format(name, 1000*(time.time() - _time_)))
_time_ = time.time() | null |
13,202 | import numpy as np
import cv2
import torch.nn as nn
import torch
import time
import json
from ..model.base import augment_z_vals, concat
def raw2acc(raw):
alpha = raw[..., -1]
weights = alpha * torch.cumprod(
torch.cat(
[torch.ones((alpha.shape[0], 1)).to(alpha), 1. - alpha + 1e-10],
-1), -1)[:, :-1]
acc_map = torch.sum(weights, -1)
return acc_map | null |
13,203 | import numpy as np
import cv2
import torch.nn as nn
import torch
import time
import json
from ..model.base import augment_z_vals, concat
The provided code snippet includes necessary dependencies for implementing the `raw2outputs` function. Write a Python function `def raw2outputs(outputs, z_vals, rays_d, bkgd=None)` to solve the following problem:
Transforms model's predictions to semantically meaningful values. Args: acc: [num_rays, num_samples along ray, 1]. Prediction from model. feature: [num_rays, num_samples along ray, N]. Prediction from model. z_vals: [num_rays, num_samples along ray]. Integration time. rays_d: [num_rays, 3]. Direction of each ray. Returns: feat_map: [num_rays, 3]. Estimated RGB color of a ray. disp_map: [num_rays]. Disparity map. Inverse of depth map. acc_map: [num_rays]. Sum of weights along each ray. weights: [num_rays, num_samples]. Weights assigned to each sampled color. depth_map: [num_rays]. Estimated distance to object.
Here is the function:
def raw2outputs(outputs, z_vals, rays_d, bkgd=None):
"""Transforms model's predictions to semantically meaningful values.
Args:
acc: [num_rays, num_samples along ray, 1]. Prediction from model.
feature: [num_rays, num_samples along ray, N]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
feat_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
if 'occupancy' in outputs.keys():
alpha = outputs['occupancy'][..., 0]
elif 'density' in outputs.keys():
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat(
[dists,
torch.Tensor([1e10]).expand(dists[..., :1].shape).to(dists)],
-1) # [N_rays, N_samples]
dists = dists * torch.norm(rays_d, dim=-1)
noise = 0.
# alpha = raw2alpha(raw[..., -1] + noise, dists) # [N_rays, N_samples]
alpha = 1 - torch.exp(-dists*torch.relu(outputs['density'][..., 0] + noise)) # (N_rays, N_samples_)
else:
raise NotImplementedError
weights = alpha * torch.cumprod(
torch.cat(
[torch.ones((alpha.shape[0], 1)).to(alpha), 1. - alpha + 1e-10],
-1), -1)[:, :-1]
acc_map = torch.sum(weights, -1)
# ATTN: here depth must /||ray_d||
depth_map = torch.sum(weights * z_vals, -1)/(1e-10 + acc_map)/torch.norm(rays_d, dim=-1).squeeze()
results = {
'acc_map': acc_map,
'depth_map': depth_map,
}
for key, val in outputs.items():
if key == 'occupancy':
continue
results[key+'_map'] = torch.sum(weights[..., None] * val, -2) # [N_rays, 3]
if bkgd is not None:
results['rgb_map'] = results['rgb_map'] + bkgd[0] * (1 - acc_map[..., None])
return results | Transforms model's predictions to semantically meaningful values. Args: acc: [num_rays, num_samples along ray, 1]. Prediction from model. feature: [num_rays, num_samples along ray, N]. Prediction from model. z_vals: [num_rays, num_samples along ray]. Integration time. rays_d: [num_rays, 3]. Direction of each ray. Returns: feat_map: [num_rays, 3]. Estimated RGB color of a ray. disp_map: [num_rays]. Disparity map. Inverse of depth map. acc_map: [num_rays]. Sum of weights along each ray. weights: [num_rays, num_samples]. Weights assigned to each sampled color. depth_map: [num_rays]. Estimated distance to object. |
13,204 | import torch
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(
lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires, input_dims=3):
embed_kwargs = {
'include_input': True,
'input_dims': input_dims,
'max_freq_log2': multires - 1,
'num_freqs': multires,
'log_sampling': True,
'periodic_fns': [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj: eo.embed(x)
return embed, embedder_obj.out_dim | null |
13,205 | import torch
import torch.nn as nn
from torch import searchsorted
def augment_z_vals(z_vals, perturb=1):
# get intervals between samples
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = torch.cat([mids, z_vals[..., -1:]], -1)
lower = torch.cat([z_vals[..., :1], mids], -1)
# stratified samples in those intervals
perturb_rand = perturb * torch.rand(z_vals.shape, device=z_vals.device)
z_vals = lower + (upper - lower) * perturb_rand
return z_vals | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.