code stringlengths 17 6.64M |
|---|
class ComputeMetricsBest(ComputeMetrics):
def update(self, jts_text_: List[Tensor], jts_ref_: List[Tensor], lengths: List[List[int]]):
self.count += sum(lengths[0])
self.count_seq += len(lengths[0])
ntrials = len(jts_text_)
metrics = []
for index in range(ntrials):
(jts_text, poses_text, root_text, traj_text) = self.transform(jts_text_[index], lengths[index])
(jts_ref, poses_ref, root_ref, traj_ref) = self.transform(jts_ref_[index], lengths[index])
mets = []
for i in range(len(lengths[index])):
APE_root = l2_norm(root_text[i], root_ref[i], dim=1).sum()
APE_pose = l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0)
APE_traj = l2_norm(traj_text[i], traj_ref[i], dim=1).sum()
APE_joints = l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0)
root_sigma_text = variance(root_text[i], lengths[index][i], dim=0)
root_sigma_ref = variance(root_ref[i], lengths[index][i], dim=0)
AVE_root = l2_norm(root_sigma_text, root_sigma_ref, dim=0)
traj_sigma_text = variance(traj_text[i], lengths[index][i], dim=0)
traj_sigma_ref = variance(traj_ref[i], lengths[index][i], dim=0)
AVE_traj = l2_norm(traj_sigma_text, traj_sigma_ref, dim=0)
poses_sigma_text = variance(poses_text[i], lengths[index][i], dim=0)
poses_sigma_ref = variance(poses_ref[i], lengths[index][i], dim=0)
AVE_pose = l2_norm(poses_sigma_text, poses_sigma_ref, dim=1)
jts_sigma_text = variance(jts_text[i], lengths[index][i], dim=0)
jts_sigma_ref = variance(jts_ref[i], lengths[index][i], dim=0)
AVE_joints = l2_norm(jts_sigma_text, jts_sigma_ref, dim=1)
met = [APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints]
mets.append(met)
metrics.append(mets)
mmm = metrics[np.argmin([x[0][0] for x in metrics])]
(APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints) = mmm[0]
self.APE_root += APE_root
self.APE_pose += APE_pose
self.APE_traj += APE_traj
self.APE_joints += APE_joints
self.AVE_root += AVE_root
self.AVE_pose += AVE_pose
self.AVE_traj += AVE_traj
self.AVE_joints += AVE_joints
|
class ComputeMetricsWorst(ComputeMetrics):
def update(self, jts_text_: List[Tensor], jts_ref_: List[Tensor], lengths: List[List[int]]):
self.count += sum(lengths[0])
self.count_seq += len(lengths[0])
ntrials = len(jts_text_)
metrics = []
for index in range(ntrials):
(jts_text, poses_text, root_text, traj_text) = self.transform(jts_text_[index], lengths[index])
(jts_ref, poses_ref, root_ref, traj_ref) = self.transform(jts_ref_[index], lengths[index])
mets = []
for i in range(len(lengths[index])):
APE_root = l2_norm(root_text[i], root_ref[i], dim=1).sum()
APE_pose = l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0)
APE_traj = l2_norm(traj_text[i], traj_ref[i], dim=1).sum()
APE_joints = l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0)
root_sigma_text = variance(root_text[i], lengths[index][i], dim=0)
root_sigma_ref = variance(root_ref[i], lengths[index][i], dim=0)
AVE_root = l2_norm(root_sigma_text, root_sigma_ref, dim=0)
traj_sigma_text = variance(traj_text[i], lengths[index][i], dim=0)
traj_sigma_ref = variance(traj_ref[i], lengths[index][i], dim=0)
AVE_traj = l2_norm(traj_sigma_text, traj_sigma_ref, dim=0)
poses_sigma_text = variance(poses_text[i], lengths[index][i], dim=0)
poses_sigma_ref = variance(poses_ref[i], lengths[index][i], dim=0)
AVE_pose = l2_norm(poses_sigma_text, poses_sigma_ref, dim=1)
jts_sigma_text = variance(jts_text[i], lengths[index][i], dim=0)
jts_sigma_ref = variance(jts_ref[i], lengths[index][i], dim=0)
AVE_joints = l2_norm(jts_sigma_text, jts_sigma_ref, dim=1)
met = [APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints]
mets.append(met)
metrics.append(mets)
mmm = metrics[np.argmax([x[0][0] for x in metrics])]
(APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints) = mmm[0]
self.APE_root += APE_root
self.APE_pose += APE_pose
self.APE_traj += APE_traj
self.APE_joints += APE_joints
self.AVE_root += AVE_root
self.AVE_pose += AVE_pose
self.AVE_traj += AVE_traj
self.AVE_joints += AVE_joints
|
class MMMetrics(Metric):
full_state_update = True
def __init__(self, mm_num_times=10, dist_sync_on_step=True, stage=0, **kwargs):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.name = 'MultiModality scores'
self.mm_num_times = mm_num_times
self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum')
self.add_state('count_seq', default=torch.tensor(0), dist_reduce_fx='sum')
self.stage = stage
if (self.stage in [1, 2, 3]):
self.metrics = [f's{str(self.stage)}_MultiModality']
self.add_state(f's{str(self.stage)}_MultiModality', default=torch.tensor(0.0), dist_reduce_fx='sum')
else:
self.metrics = ['MultiModality']
self.add_state('MultiModality', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('mm_motion_embeddings', default=[], dist_reduce_fx=None)
def compute(self, sanity_flag):
count = self.count.item()
count_seq = self.count_seq.item()
metrics = {metric: getattr(self, metric) for metric in self.metrics}
if sanity_flag:
return metrics
all_mm_motions = torch.cat(self.mm_motion_embeddings, axis=0).cpu().numpy()
if (self.stage in [1, 2, 3]):
metrics[f's{str(self.stage)}_MultiModality'] = calculate_multimodality_np(all_mm_motions, self.mm_num_times)
else:
metrics['MultiModality'] = calculate_multimodality_np(all_mm_motions, self.mm_num_times)
return {**metrics}
def update(self, mm_motion_embeddings: Tensor, lengths: List[int]):
self.count += sum(lengths)
self.count_seq += len(lengths)
self.mm_motion_embeddings.append(mm_motion_embeddings)
|
class MRMetrics(Metric):
def __init__(self, njoints, jointstype: str='mmm', force_in_meter: bool=True, align_root: bool=True, dist_sync_on_step=True, **kwargs):
super().__init__(dist_sync_on_step=dist_sync_on_step)
if (jointstype not in ['mmm', 'humanml3d']):
raise NotImplementedError('This jointstype is not implemented.')
self.name = 'Motion Reconstructions'
self.jointstype = jointstype
self.align_root = align_root
self.force_in_meter = force_in_meter
self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum')
self.add_state('count_seq', default=torch.tensor(0), dist_reduce_fx='sum')
self.add_state('MPJPE', default=torch.tensor([0.0]), dist_reduce_fx='sum')
self.add_state('PAMPJPE', default=torch.tensor([0.0]), dist_reduce_fx='sum')
self.add_state('ACCEL', default=torch.tensor([0.0]), dist_reduce_fx='sum')
self.MR_metrics = ['MPJPE', 'PAMPJPE', 'ACCEL']
self.metrics = self.MR_metrics
def compute(self, sanity_flag):
if self.force_in_meter:
factor = 1000.0
else:
factor = 1.0
count = self.count
count_seq = self.count_seq
mr_metrics = {}
mr_metrics['MPJPE'] = ((self.MPJPE / count) * factor)
mr_metrics['PAMPJPE'] = ((self.PAMPJPE / count) * factor)
mr_metrics['ACCEL'] = ((self.ACCEL / (count - (2 * count_seq))) * factor)
return mr_metrics
def update(self, joints_rst: Tensor, joints_ref: Tensor, lengths: List[int]):
assert (joints_rst.shape == joints_ref.shape)
assert (joints_rst.dim() == 4)
self.count += sum(lengths)
self.count_seq += len(lengths)
rst = joints_rst.detach().cpu()
ref = joints_ref.detach().cpu()
if (self.align_root and (self.jointstype in ['mmm', 'humanml3d'])):
align_inds = [0]
else:
align_inds = None
for i in range(len(lengths)):
self.MPJPE += torch.sum(calc_mpjpe(rst[i], ref[i], align_inds=align_inds))
self.PAMPJPE += torch.sum(calc_pampjpe(rst[i], ref[i]))
self.ACCEL += torch.sum(calc_accel(rst[i], ref[i]))
|
class UncondMetrics(Metric):
full_state_update = True
def __init__(self, top_k=3, R_size=32, diversity_times=300, dist_sync_on_step=True, **kwargs):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.name = 'fid, kid, and diversity scores'
self.top_k = top_k
self.R_size = R_size
self.diversity_times = 300
self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum')
self.add_state('count_seq', default=torch.tensor(0), dist_reduce_fx='sum')
self.metrics = []
self.add_state('KID_mean', default=torch.tensor(0.0), dist_reduce_fx='mean')
self.add_state('KID_std', default=torch.tensor(0.0), dist_reduce_fx='mean')
self.metrics.extend(['KID_mean', 'KID_std'])
self.add_state('FID', default=torch.tensor(0.0), dist_reduce_fx='mean')
self.metrics.append('FID')
self.add_state('Diversity', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('gt_Diversity', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.metrics.extend(['Diversity', 'gt_Diversity'])
self.add_state('recmotion_embeddings', default=[], dist_reduce_fx=None)
self.add_state('gtmotion_embeddings', default=[], dist_reduce_fx=None)
def compute(self, sanity_flag):
count = self.count.item()
count_seq = self.count_seq.item()
metrics = {metric: getattr(self, metric) for metric in self.metrics}
if sanity_flag:
return metrics
all_gtmotions = torch.cat(self.gtmotion_embeddings, axis=0).cpu()
all_genmotions = torch.cat(self.recmotion_embeddings, axis=0).cpu()
(KID_mean, KID_std) = calculate_kid(all_gtmotions, all_genmotions)
metrics['KID_mean'] = KID_mean
metrics['KID_std'] = KID_std
all_genmotions = all_genmotions.numpy()
all_gtmotions = all_gtmotions.numpy()
(mu, cov) = calculate_activation_statistics_np(all_genmotions)
(gt_mu, gt_cov) = calculate_activation_statistics_np(all_gtmotions)
metrics['FID'] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov)
assert (count_seq > self.diversity_times)
print(all_genmotions.shape)
print(all_gtmotions.shape)
metrics['Diversity'] = calculate_diversity_np(all_genmotions, self.diversity_times)
metrics['gt_Diversity'] = calculate_diversity_np(all_gtmotions, self.diversity_times)
return {**metrics}
def update(self, gtmotion_embeddings: Tensor, lengths: List[int], recmotion_embeddings=None):
self.count += sum(lengths)
self.count_seq += len(lengths)
if (recmotion_embeddings is not None):
recmotion_embeddings = torch.flatten(recmotion_embeddings, start_dim=1).detach()
self.recmotion_embeddings.append(recmotion_embeddings)
gtmotion_embeddings = torch.flatten(gtmotion_embeddings, start_dim=1).detach()
self.gtmotion_embeddings.append(gtmotion_embeddings)
|
class MLP(nn.Module):
def __init__(self, cfg, out_dim, is_init):
super(MLP, self).__init__()
dims = cfg.MODEL.MOTION_DECODER.MLP_DIM
n_blk = len(dims)
norm = 'none'
acti = 'lrelu'
layers = []
for i in range((n_blk - 1)):
layers += LinearBlock(dims[i], dims[(i + 1)], norm=norm, acti=acti)
layers += LinearBlock(dims[(- 1)], out_dim, norm='none', acti='none')
self.model = nn.Sequential(*layers)
if is_init:
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.constant_(m.weight, 1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
return self.model(x.view(x.size(0), (- 1)))
|
def ZeroPad1d(sizes):
return nn.ConstantPad1d(sizes, 0)
|
def get_acti_layer(acti='relu', inplace=True):
if (acti == 'relu'):
return [nn.ReLU(inplace=inplace)]
elif (acti == 'lrelu'):
return [nn.LeakyReLU(0.2, inplace=inplace)]
elif (acti == 'tanh'):
return [nn.Tanh()]
elif (acti == 'none'):
return []
else:
assert 0, 'Unsupported activation: {}'.format(acti)
|
def get_norm_layer(norm='none', norm_dim=None):
if (norm == 'bn'):
return [nn.BatchNorm1d(norm_dim)]
elif (norm == 'in'):
return [nn.InstanceNorm1d(norm_dim, affine=True)]
elif (norm == 'adain'):
return [AdaptiveInstanceNorm1d(norm_dim)]
elif (norm == 'none'):
return []
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
|
def get_dropout_layer(dropout=None):
if (dropout is not None):
return [nn.Dropout(p=dropout)]
else:
return []
|
def ConvLayers(kernel_size, in_channels, out_channels, stride=1, pad_type='reflect', use_bias=True):
'\n returns a list of [pad, conv] => should be += to some list, then apply sequential\n '
if (pad_type == 'reflect'):
pad = nn.ReflectionPad1d
elif (pad_type == 'replicate'):
pad = nn.ReplicationPad1d
elif (pad_type == 'zero'):
pad = ZeroPad1d
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
pad_l = ((kernel_size - 1) // 2)
pad_r = ((kernel_size - 1) - pad_l)
return [pad((pad_l, pad_r)), nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, bias=use_bias)]
|
def ConvBlock(kernel_size, in_channels, out_channels, stride=1, pad_type='reflect', dropout=None, norm='none', acti='lrelu', acti_first=False, use_bias=True, inplace=True):
'\n returns a list of [pad, conv, norm, acti] or [acti, pad, conv, norm]\n '
layers = ConvLayers(kernel_size, in_channels, out_channels, stride=stride, pad_type=pad_type, use_bias=use_bias)
layers += get_dropout_layer(dropout)
layers += get_norm_layer(norm, norm_dim=out_channels)
acti_layers = get_acti_layer(acti, inplace=inplace)
if acti_first:
return (acti_layers + layers)
else:
return (layers + acti_layers)
|
def LinearBlock(in_dim, out_dim, dropout=None, norm='none', acti='relu'):
use_bias = True
layers = []
layers.append(nn.Linear(in_dim, out_dim, bias=use_bias))
layers += get_dropout_layer(dropout)
layers += get_norm_layer(norm, norm_dim=out_dim)
layers += get_acti_layer(acti)
return layers
|
@contextlib.contextmanager
def no_weight_gradients():
global weight_gradients_disabled
old = weight_gradients_disabled
weight_gradients_disabled = True
(yield)
weight_gradients_disabled = old
|
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if could_use_op(input):
return conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return F.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
|
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if could_use_op(input):
return conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
return F.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, dilation=dilation, groups=groups)
|
def could_use_op(input):
if ((not enabled) or (not torch.backends.cudnn.enabled)):
return False
if (input.device.type != 'cuda'):
return False
if any((torch.__version__.startswith(x) for x in ['1.7.', '1.8.'])):
return True
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
return False
|
def ensure_tuple(xs, ndim):
xs = (tuple(xs) if isinstance(xs, (tuple, list)) else ((xs,) * ndim))
return xs
|
def conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
ndim = 2
weight_shape = tuple(weight_shape)
stride = ensure_tuple(stride, ndim)
padding = ensure_tuple(padding, ndim)
output_padding = ensure_tuple(output_padding, ndim)
dilation = ensure_tuple(dilation, ndim)
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if (key in conv2d_gradfix_cache):
return conv2d_gradfix_cache[key]
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [(((input_shape[(i + 2)] - ((output_shape[(i + 2)] - 1) * stride[i])) - (1 - (2 * padding[i]))) - (dilation[i] * (weight_shape[(i + 2)] - 1))) for i in range(ndim)]
class Conv2d(autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
if (not transpose):
out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
else:
out = F.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
ctx.save_for_backward(input, weight)
return out
@staticmethod
def backward(ctx, grad_output):
(input, weight) = ctx.saved_tensors
(grad_input, grad_weight, grad_bias) = (None, None, None)
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_input = conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
if (ctx.needs_input_grad[1] and (not weight_gradients_disabled)):
grad_weight = Conv2dGradWeight.apply(grad_output, input)
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum((0, 2, 3))
return (grad_input, grad_weight, grad_bias)
class Conv2dGradWeight(autograd.Function):
@staticmethod
def forward(ctx, grad_output, input):
op = torch._C._jit_get_operation(('aten::cudnn_convolution_backward_weight' if (not transpose) else 'aten::cudnn_convolution_transpose_backward_weight'))
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
ctx.save_for_backward(grad_output, input)
return grad_weight
@staticmethod
def backward(ctx, grad_grad_weight):
(grad_output, input) = ctx.saved_tensors
(grad_grad_output, grad_grad_input) = (None, None)
if ctx.needs_input_grad[0]:
grad_grad_output = Conv2d.apply(input, grad_grad_weight, None)
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_grad_input = conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad_grad_weight, None)
return (grad_grad_output, grad_grad_input)
conv2d_gradfix_cache[key] = Conv2d
return Conv2d
|
class SkipTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.d_model = encoder_layer.d_model
self.num_layers = num_layers
self.norm = norm
assert ((num_layers % 2) == 1)
num_block = ((num_layers - 1) // 2)
self.input_blocks = _get_clones(encoder_layer, num_block)
self.middle_block = _get_clone(encoder_layer)
self.output_blocks = _get_clones(encoder_layer, num_block)
self.linear_blocks = _get_clones(nn.Linear((2 * self.d_model), self.d_model), num_block)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def forward(self, src, mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
x = src
xs = []
for module in self.input_blocks:
x = module(x, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
xs.append(x)
x = self.middle_block(x, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
for (module, linear) in zip(self.output_blocks, self.linear_blocks):
x = torch.cat([x, xs.pop()], dim=(- 1))
x = linear(x)
x = module(x, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
if (self.norm is not None):
x = self.norm(x)
return x
|
class SkipTransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None):
super().__init__()
self.d_model = decoder_layer.d_model
self.num_layers = num_layers
self.norm = norm
assert ((num_layers % 2) == 1)
num_block = ((num_layers - 1) // 2)
self.input_blocks = _get_clones(decoder_layer, num_block)
self.middle_block = _get_clone(decoder_layer)
self.output_blocks = _get_clones(decoder_layer, num_block)
self.linear_blocks = _get_clones(nn.Linear((2 * self.d_model), self.d_model), num_block)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
x = tgt
xs = []
for module in self.input_blocks:
x = module(x, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos)
xs.append(x)
x = self.middle_block(x, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos)
for (module, linear) in zip(self.output_blocks, self.linear_blocks):
x = torch.cat([x, xs.pop()], dim=(- 1))
x = linear(x)
x = module(x, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos)
if (self.norm is not None):
x = self.norm(x)
return x
|
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
encoder_norm = (nn.LayerNorm(d_model) if normalize_before else None)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
(bs, c, h, w) = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed)
return (hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w))
|
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
if (self.norm is not None):
output = self.norm(output)
return output
|
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if (self.norm is not None):
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
|
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
q = k = self.with_pos_embed(src, pos)
if (src_mask is not None):
src_mask = torch.repeat_interleave(src_mask, self.nhead, dim=0)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = (src + self.dropout2(src2))
src = self.norm2(src)
return src
def forward_pre(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = (src + self.dropout2(src2))
return src
def forward(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
|
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.d_model = d_model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = (tgt + self.dropout1(tgt2))
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0]
tgt = (tgt + self.dropout2(tgt2))
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = (tgt + self.dropout3(tgt2))
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = (tgt + self.dropout1(tgt2))
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0]
tgt = (tgt + self.dropout2(tgt2))
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = (tgt + self.dropout3(tgt2))
return tgt
def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
|
def _get_clone(module):
return copy.deepcopy(module)
|
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
def build_transformer(args):
return Transformer(d_model=args.hidden_dim, dropout=args.dropout, nhead=args.nheads, dim_feedforward=args.dim_feedforward, num_encoder_layers=args.enc_layers, num_decoder_layers=args.dec_layers, normalize_before=args.pre_norm, return_intermediate_dec=True)
|
def _get_activation_fn(activation):
'Return an activation function given a string'
if (activation == 'relu'):
return F.relu
if (activation == 'gelu'):
return F.gelu
if (activation == 'glu'):
return F.glu
raise RuntimeError(f'activation should be relu/gelu, not {activation}.')
|
def remove_padding(tensors, lengths):
return [tensor[:tensor_length] for (tensor, tensor_length) in zip(tensors, lengths)]
|
class AutoParams(nn.Module):
def __init__(self, **kargs):
try:
for param in self.needed_params:
if (param in kargs):
setattr(self, param, kargs[param])
else:
raise ValueError(f'{param} is needed.')
except:
pass
try:
for (param, default) in self.optional_params.items():
if ((param in kargs) and (kargs[param] is not None)):
setattr(self, param, kargs[param])
else:
setattr(self, param, default)
except:
pass
super().__init__()
|
def freeze_params(module: nn.Module) -> None:
'\n Freeze the parameters of this module,\n i.e. do not update them during training\n\n :param module: freeze parameters of this module\n '
for (_, p) in module.named_parameters():
p.requires_grad = False
|
class Camera():
def __init__(self, *, first_root, mode, is_mesh):
camera = bpy.data.objects['Camera']
camera.location.x = 7.36
camera.location.y = (- 6.93)
if is_mesh:
camera.location.z = 5.6
else:
camera.location.z = 5.2
if (mode == 'sequence'):
if is_mesh:
camera.data.lens = 65
else:
camera.data.lens = 85
elif (mode == 'frame'):
if is_mesh:
camera.data.lens = 130
else:
camera.data.lens = 85
elif (mode == 'video'):
if is_mesh:
camera.data.lens = 110
else:
camera.data.lens = 85
self.mode = mode
self.camera = camera
self.camera.location.x += first_root[0]
self.camera.location.y += first_root[1]
self._root = first_root
def update(self, newroot):
delta_root = (newroot - self._root)
self.camera.location.x += delta_root[0]
self.camera.location.y += delta_root[1]
self._root = newroot
|
class Data():
def __len__(self):
return self.N
|
def clear_material(material):
if material.node_tree:
material.node_tree.links.clear()
material.node_tree.nodes.clear()
|
def colored_material_diffuse_BSDF(r, g, b, a=1, roughness=0.127451):
materials = bpy.data.materials
material = materials.new(name='body')
material.use_nodes = True
clear_material(material)
nodes = material.node_tree.nodes
links = material.node_tree.links
output = nodes.new(type='ShaderNodeOutputMaterial')
diffuse = nodes.new(type='ShaderNodeBsdfDiffuse')
diffuse.inputs['Color'].default_value = (r, g, b, a)
diffuse.inputs['Roughness'].default_value = roughness
links.new(diffuse.outputs['BSDF'], output.inputs['Surface'])
return material
|
def colored_material_relection_BSDF(r, g, b, a=1, roughness=0.127451, saturation_factor=1):
materials = bpy.data.materials
material = materials.new(name='body')
material.use_nodes = True
nodes = material.node_tree.nodes
links = material.node_tree.links
output = nodes.new(type='ShaderNodeOutputMaterial')
diffuse = nodes['Principled BSDF']
diffuse.inputs['Base Color'].default_value = ((r * saturation_factor), (g * saturation_factor), (b * saturation_factor), a)
diffuse.inputs['Roughness'].default_value = roughness
links.new(diffuse.outputs['BSDF'], output.inputs['Surface'])
return material
|
def body_material(r, g, b, a=1, name='body', oldrender=True):
if oldrender:
material = colored_material_diffuse_BSDF(r, g, b, a=a)
else:
materials = bpy.data.materials
material = materials.new(name=name)
material.use_nodes = True
nodes = material.node_tree.nodes
diffuse = nodes['Principled BSDF']
inputs = diffuse.inputs
settings = DEFAULT_BSDF_SETTINGS.copy()
settings['Base Color'] = (r, g, b, a)
settings['Subsurface Color'] = (r, g, b, a)
settings['Subsurface'] = 0.0
for (setting, val) in settings.items():
inputs[setting].default_value = val
return material
|
def colored_material_bsdf(name, **kwargs):
materials = bpy.data.materials
material = materials.new(name=name)
material.use_nodes = True
nodes = material.node_tree.nodes
diffuse = nodes['Principled BSDF']
inputs = diffuse.inputs
settings = DEFAULT_BSDF_SETTINGS.copy()
for (key, val) in kwargs.items():
settings[key] = val
for (setting, val) in settings.items():
inputs[setting].default_value = val
return material
|
def floor_mat(name='floor_mat', color=(0.1, 0.1, 0.1, 1), roughness=0.127451):
return colored_material_diffuse_BSDF(color[0], color[1], color[2], a=color[3], roughness=roughness)
|
def plane_mat():
materials = bpy.data.materials
material = materials.new(name='plane')
material.use_nodes = True
clear_material(material)
nodes = material.node_tree.nodes
links = material.node_tree.links
output = nodes.new(type='ShaderNodeOutputMaterial')
diffuse = nodes.new(type='ShaderNodeBsdfDiffuse')
checker = nodes.new(type='ShaderNodeTexChecker')
checker.inputs['Scale'].default_value = 1024
checker.inputs['Color1'].default_value = (0.8, 0.8, 0.8, 1)
checker.inputs['Color2'].default_value = (0.3, 0.3, 0.3, 1)
links.new(checker.outputs['Color'], diffuse.inputs['Color'])
links.new(diffuse.outputs['BSDF'], output.inputs['Surface'])
diffuse.inputs['Roughness'].default_value = 0.127451
return material
|
def plane_mat_uni():
materials = bpy.data.materials
material = materials.new(name='plane_uni')
material.use_nodes = True
clear_material(material)
nodes = material.node_tree.nodes
links = material.node_tree.links
output = nodes.new(type='ShaderNodeOutputMaterial')
diffuse = nodes.new(type='ShaderNodeBsdfDiffuse')
diffuse.inputs['Color'].default_value = (0.8, 0.8, 0.8, 1)
diffuse.inputs['Roughness'].default_value = 0.127451
links.new(diffuse.outputs['BSDF'], output.inputs['Surface'])
return material
|
def prune_begin_end(data, perc):
to_remove = int((len(data) * perc))
if (to_remove == 0):
return data
return data[to_remove:(- to_remove)]
|
def render_current_frame(path):
bpy.context.scene.render.filepath = path
bpy.ops.render.render(use_viewport=True, write_still=True)
|
def render(npydata, frames_folder, *, mode, faces_path, gt=False, exact_frame=None, num=8, downsample=True, canonicalize=True, always_on_floor=False, denoising=True, oldrender=True, jointstype='mmm', res='high', init=True, accelerator='gpu', device=[0]):
if init:
setup_scene(res=res, denoising=denoising, oldrender=oldrender, accelerator=accelerator, device=device)
is_mesh = mesh_detect(npydata)
if (mode == 'video'):
if always_on_floor:
frames_folder += '_of'
os.makedirs(frames_folder, exist_ok=True)
if (downsample and (not is_mesh)):
npydata = npydata[::8]
elif (mode == 'sequence'):
(img_name, ext) = os.path.splitext(frames_folder)
if always_on_floor:
img_name += '_of'
img_path = f'{img_name}{ext}'
elif (mode == 'frame'):
(img_name, ext) = os.path.splitext(frames_folder)
if always_on_floor:
img_name += '_of'
img_path = f'{img_name}_{exact_frame}{ext}'
if (mode == 'sequence'):
perc = 0.2
npydata = prune_begin_end(npydata, perc)
if is_mesh:
from .meshes import Meshes
data = Meshes(npydata, gt=gt, mode=mode, faces_path=faces_path, canonicalize=canonicalize, always_on_floor=always_on_floor)
else:
from .joints import Joints
data = Joints(npydata, gt=gt, mode=mode, canonicalize=canonicalize, always_on_floor=always_on_floor, jointstype=jointstype)
nframes = len(data)
show_traj(data.trajectory)
plot_floor(data.data, big_plane=False)
camera = Camera(first_root=data.get_root(0), mode=mode, is_mesh=is_mesh)
frameidx = get_frameidx(mode=mode, nframes=nframes, exact_frame=exact_frame, frames_to_keep=num)
nframes_to_render = len(frameidx)
if (mode == 'sequence'):
camera.update(data.get_mean_root())
imported_obj_names = []
for (index, frameidx) in enumerate(frameidx):
if (mode == 'sequence'):
frac = (index / (nframes_to_render - 1))
mat = data.get_sequence_mat(frac)
else:
mat = data.mat
camera.update(data.get_root(frameidx))
islast = (index == (nframes_to_render - 1))
objname = data.load_in_blender(frameidx, mat)
name = f'{str(index).zfill(4)}'
if (mode == 'video'):
path = os.path.join(frames_folder, f'frame_{name}.png')
else:
path = img_path
if (mode == 'sequence'):
imported_obj_names.extend(objname)
elif (mode == 'frame'):
camera.update(data.get_root(frameidx))
if ((mode != 'sequence') or islast):
render_current_frame(path)
delete_objs(objname)
delete_objs(imported_obj_names)
delete_objs(['Plane', 'myCurve', 'Cylinder'])
if (mode == 'video'):
return frames_folder
else:
return img_path
|
def get_frameidx(*, mode, nframes, exact_frame, frames_to_keep):
if (mode == 'sequence'):
frameidx = np.linspace(0, (nframes - 1), frames_to_keep)
frameidx = np.round(frameidx).astype(int)
frameidx = list(frameidx)
elif (mode == 'frame'):
index_frame = int((exact_frame * nframes))
frameidx = [index_frame]
elif (mode == 'video'):
frameidx = range(0, nframes)
else:
raise ValueError(f'Not support {mode} render mode')
return frameidx
|
def setup_renderer(denoising=True, oldrender=True, accelerator='gpu', device=[0]):
bpy.context.scene.render.engine = 'CYCLES'
bpy.data.scenes[0].render.engine = 'CYCLES'
if (accelerator.lower() == 'gpu'):
bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.scene.cycles.device = 'GPU'
i = 0
bpy.context.preferences.addons['cycles'].preferences.get_devices()
for d in bpy.context.preferences.addons['cycles'].preferences.devices:
if (i in device):
d['use'] = 1
print(d['name'], ''.join((str(i) for i in device)))
else:
d['use'] = 0
i += 1
if denoising:
bpy.context.scene.cycles.use_denoising = True
bpy.context.scene.render.tile_x = 256
bpy.context.scene.render.tile_y = 256
bpy.context.scene.cycles.samples = 64
if (not oldrender):
bpy.context.scene.view_settings.view_transform = 'Standard'
bpy.context.scene.render.film_transparent = True
bpy.context.scene.display_settings.display_device = 'sRGB'
bpy.context.scene.view_settings.gamma = 1.2
bpy.context.scene.view_settings.exposure = (- 0.75)
|
def setup_scene(res='high', denoising=True, oldrender=True, accelerator='gpu', device=[0]):
scene = bpy.data.scenes['Scene']
assert (res in ['ultra', 'high', 'med', 'low'])
if (res == 'high'):
scene.render.resolution_x = 1280
scene.render.resolution_y = 1024
elif (res == 'med'):
scene.render.resolution_x = (1280 // 2)
scene.render.resolution_y = (1024 // 2)
elif (res == 'low'):
scene.render.resolution_x = (1280 // 4)
scene.render.resolution_y = (1024 // 4)
elif (res == 'ultra'):
scene.render.resolution_x = (1280 * 2)
scene.render.resolution_y = (1024 * 2)
scene.render.film_transparent = True
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = (1.0, 1.0, 1.0)
bg.inputs[1].default_value = 1.0
if ('Cube' in bpy.data.objects):
bpy.data.objects['Cube'].select_set(True)
bpy.ops.object.delete()
bpy.ops.object.light_add(type='SUN', align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
bpy.data.objects['Sun'].data.energy = 1.5
bpy.ops.object.empty_add(type='PLAIN_AXES', align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
bpy.ops.transform.resize(value=(10, 10, 10), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.select_all(action='DESELECT')
setup_renderer(denoising=denoising, oldrender=oldrender, accelerator=accelerator, device=device)
return scene
|
def mesh_detect(data):
if (data.shape[1] > 1000):
return True
return False
|
class ndarray_pydata(np.ndarray):
def __bool__(self) -> bool:
return (len(self) > 0)
|
def load_numpy_vertices_into_blender(vertices, faces, name, mat):
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(vertices, [], faces.view(ndarray_pydata))
mesh.validate()
obj = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(obj)
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
obj.active_material = mat
bpy.context.view_layer.objects.active = obj
bpy.ops.object.shade_smooth()
bpy.ops.object.select_all(action='DESELECT')
return True
|
def delete_objs(names):
if (not isinstance(names, list)):
names = [names]
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.context.scene.objects:
for name in names:
if (obj.name.startswith(name) or obj.name.endswith(name)):
obj.select_set(True)
bpy.ops.object.delete()
bpy.ops.object.select_all(action='DESELECT')
|
class LevelsFilter(logging.Filter):
def __init__(self, levels):
self.levels = [getattr(logging, level) for level in levels]
def filter(self, record):
return (record.levelno in self.levels)
|
class StreamToLogger(object):
'\n Fake file-like stream object that redirects writes to a logger instance.\n '
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
|
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except Exception:
self.handleError(record)
|
def generate_id() -> str:
run_gen = shortuuid.ShortUUID(alphabet=list('0123456789abcdefghijklmnopqrstuvwxyz'))
return run_gen.random(8)
|
class Transform():
def collate(self, lst_datastruct):
from mld.datasets.utils import collate_tensor_with_padding
example = lst_datastruct[0]
def collate_or_none(key):
if (example[key] is None):
return None
key_lst = [x[key] for x in lst_datastruct]
return collate_tensor_with_padding(key_lst)
kwargs = {key: collate_or_none(key) for key in example.datakeys}
return self.Datastruct(**kwargs)
|
@dataclass
class Datastruct():
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
self.__dict__[key] = value
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
return self.keys()
def keys(self):
keys = [t.name for t in fields(self)]
return iter(keys)
def values(self):
values = [getattr(self, t.name) for t in fields(self)]
return iter(values)
def items(self):
data = [(t.name, getattr(self, t.name)) for t in fields(self)]
return iter(data)
def to(self, *args, **kwargs):
for key in self.datakeys:
if (self[key] is not None):
self[key] = self[key].to(*args, **kwargs)
return self
@property
def device(self):
return self[self.datakeys[0]].device
def detach(self):
def detach_or_none(tensor):
if (tensor is not None):
return tensor.detach()
return None
kwargs = {key: detach_or_none(self[key]) for key in self.datakeys}
return self.transforms.Datastruct(**kwargs)
|
def main():
data_root = '../datasets/humanml3d'
feastures_path = 'in.npy'
animation_save_path = 'in.mp4'
fps = 20
mean = np.load(pjoin(data_root, 'Mean.npy'))
std = np.load(pjoin(data_root, 'Std.npy'))
motion = np.load(feastures_path)
motion = ((motion * std) + mean)
motion_rec = recover_from_ric(torch.tensor(motion), 22).cpu().numpy()
motion_rec = (motion_rec * 1.3)
plot_3d_motion(animation_save_path, motion_rec, title='input', fps=fps)
|
class IdentityTransform(Transform):
def __init__(self, **kwargs):
return
def Datastruct(self, **kwargs):
return IdentityDatastruct(**kwargs)
def __repr__(self):
return 'IdentityTransform()'
|
@dataclass
class IdentityDatastruct(Datastruct):
transforms: IdentityTransform
features: Optional[Tensor] = None
def __post_init__(self):
self.datakeys = ['features']
def __len__(self):
return len(self.rfeats)
|
class Joints2Jfeats(nn.Module):
def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None:
if (normalization and (path is None)):
raise TypeError('You should provide a path if normalization is on.')
super().__init__()
self.normalization = normalization
self.eps = eps
if normalization:
mean_path = (Path(path) / 'jfeats_mean.pt')
std_path = (Path(path) / 'jfeats_std.pt')
self.register_buffer('mean', torch.load(mean_path))
self.register_buffer('std', torch.load(std_path))
def normalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features - self.mean) / (self.std + self.eps))
return features
def unnormalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features * self.std) + self.mean)
return features
|
class Rots2Joints(nn.Module):
def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None:
if (normalization and (path is None)):
raise TypeError('You should provide a path if normalization is on.')
super().__init__()
self.normalization = normalization
self.eps = eps
if normalization:
mean_path = (Path(path) / 'mean.pt')
std_path = (Path(path) / 'std.pt')
self.register_buffer('mean', torch.load(mean_path))
self.register_buffer('std', torch.load(std_path))
def normalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features - self.mean) / (self.std + self.eps))
return features
def unnormalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features * self.std) + self.mean)
return features
|
class Rots2Rfeats(nn.Module):
def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None:
if (normalization and (path is None)):
raise TypeError('You should provide a path if normalization is on.')
super().__init__()
self.normalization = normalization
self.eps = eps
if normalization:
mean_path = (Path(path) / 'rfeats_mean.pt')
std_path = (Path(path) / 'rfeats_std.pt')
self.register_buffer('mean', torch.load(mean_path))
self.register_buffer('std', torch.load(std_path))
def normalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features - self.mean) / (self.std + self.eps))
return features
def unnormalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features * self.std) + self.mean)
return features
|
class XYZTransform(Transform):
def __init__(self, joints2jfeats: Joints2Jfeats, **kwargs):
self.joints2jfeats = joints2jfeats
def Datastruct(self, **kwargs):
return XYZDatastruct(_joints2jfeats=self.joints2jfeats, transforms=self, **kwargs)
def __repr__(self):
return 'XYZTransform()'
|
@dataclass
class XYZDatastruct(Datastruct):
transforms: XYZTransform
_joints2jfeats: Joints2Jfeats
features: Optional[Tensor] = None
joints_: Optional[Tensor] = None
jfeats_: Optional[Tensor] = None
def __post_init__(self):
self.datakeys = ['features', 'joints_', 'jfeats_']
if ((self.features is not None) and (self.jfeats_ is None)):
self.jfeats_ = self.features
@property
def joints(self):
if (self.joints_ is not None):
return self.joints_
assert (self.jfeats_ is not None)
self._joints2jfeats.to(self.jfeats.device)
self.joints_ = self._joints2jfeats.inverse(self.jfeats)
return self.joints_
@property
def jfeats(self):
if (self.jfeats_ is not None):
return self.jfeats_
assert (self.joints_ is not None)
self._joints2jfeats.to(self.joints.device)
self.jfeats_ = self._joints2jfeats(self.joints)
return self.jfeats_
def __len__(self):
return len(self.jfeats)
|
def load_example_input(txt_path):
file = open(txt_path, 'r')
Lines = file.readlines()
count = 0
(texts, lens) = ([], [])
for line in Lines:
count += 1
s = line.strip()
s_l = s.split(' ')[0]
s_t = s[(len(s_l) + 1):]
lens.append(int(s_l))
texts.append(s_t)
print('Length-{}: {}'.format(s_l, s_t))
return (texts, lens)
|
def render_batch(npy_dir, execute_python='./scripts/visualize_motion.sh', mode='sequence'):
os.system(f'{execute_python} {npy_dir} {mode}')
|
def render(execute_python, npy_path, jointtype, cfg_path):
export_scripts = 'render.py'
os.system(f'{execute_python} --background --python {export_scripts} -- --cfg={cfg_path} --npy={npy_path} --joint_type={jointtype}')
fig_path = Path(str(npy_path).replace('.npy', '.png'))
return fig_path
|
def export_fbx_hand(pkl_path):
input = pkl_path
output = pkl_path.replace('.pkl', '.fbx')
execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender'
export_scripts = './scripts/fbx_output_smplx.py'
os.system(f'{execute_python} -noaudio --background --python {export_scripts} --input {input} --output {output}')
|
def export_fbx(pkl_path):
input = pkl_path
output = pkl_path.replace('.pkl', '.fbx')
execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender'
export_scripts = './scripts/fbx_output.py'
os.system(f'{execute_python} -noaudio --background --python {export_scripts} --input {input} --output {output}')
|
def nfeats_of(rottype):
if (rottype in ['rotvec', 'axisangle']):
return 3
elif (rottype in ['rotquat', 'quaternion']):
return 4
elif (rottype in ['rot6d', '6drot', 'rotation6d']):
return 6
elif (rottype in ['rotmat']):
return 9
else:
return TypeError("This rotation type doesn't have features.")
|
def axis_angle_to(newtype, rotations):
if (newtype in ['matrix']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
elif (newtype in ['rotmat']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rotmat', rotations)
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rot6d', rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.axis_angle_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
return rotations
else:
raise NotImplementedError
|
def matrix_to(newtype, rotations):
if (newtype in ['matrix']):
return rotations
if (newtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 9))
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.matrix_to_rotation_6d(rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.matrix_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
rotations = geometry.matrix_to_axis_angle(rotations)
return rotations
else:
raise NotImplementedError
|
def to_matrix(oldtype, rotations):
if (oldtype in ['matrix']):
return rotations
if (oldtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 3, 3))
return rotations
elif (oldtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.rotation_6d_to_matrix(rotations)
return rotations
elif (oldtype in ['rotquat', 'quaternion']):
rotations = geometry.quaternion_to_matrix(rotations)
return rotations
elif (oldtype in ['rotvec', 'axisangle']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
else:
raise NotImplementedError
|
def fixseed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
|
def get_root_idx(joinstype):
return root_joints[joinstype]
|
def create_logger(cfg, phase='train'):
root_output_dir = Path(cfg.FOLDER)
if (not root_output_dir.exists()):
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
cfg_name = cfg.NAME
model = cfg.model.model_type
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = ((root_output_dir / model) / cfg_name)
cfg.FOLDER_EXP = str(final_output_dir)
time_str = time.strftime('%Y-%m-%d-%H-%M-%S')
new_dir(cfg, phase, time_str, final_output_dir)
head = '%(asctime)-15s %(message)s'
logger = config_logger(final_output_dir, time_str, phase, head)
if (logger is None):
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
logging.basicConfig(format=head)
return logger
|
@rank_zero_only
def config_logger(final_output_dir, time_str, phase, head):
log_file = '{}_{}_{}.log'.format('log', time_str, phase)
final_log_file = (final_output_dir / log_file)
logging.basicConfig(filename=str(final_log_file))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
formatter = logging.Formatter(head)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
file_handler = logging.FileHandler(final_log_file, 'w')
file_handler.setFormatter(logging.Formatter(head))
file_handler.setLevel(logging.INFO)
logging.getLogger('').addHandler(file_handler)
return logger
|
@rank_zero_only
def new_dir(cfg, phase, time_str, final_output_dir):
cfg.TIME = str(time_str)
if (os.path.exists(final_output_dir) and (cfg.TRAIN.RESUME is None) and (not cfg.DEBUG)):
file_list = sorted(os.listdir(final_output_dir), reverse=True)
for item in file_list:
if item.endswith('.log'):
os.rename(str(final_output_dir), ((str(final_output_dir) + '_') + cfg.TIME))
break
final_output_dir.mkdir(parents=True, exist_ok=True)
config_file = '{}_{}_{}.yaml'.format('config', time_str, phase)
final_config_file = (final_output_dir / config_file)
OmegaConf.save(config=cfg, f=final_config_file)
|
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif (type(tensor).__module__ != 'numpy'):
raise ValueError('Cannot convert {} to numpy array'.format(type(tensor)))
return tensor
|
def to_torch(ndarray):
if (type(ndarray).__module__ == 'numpy'):
return torch.from_numpy(ndarray)
elif (not torch.is_tensor(ndarray)):
raise ValueError('Cannot convert {} to torch tensor'.format(type(ndarray)))
return ndarray
|
def cleanexit():
import sys
import os
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
def cfg_mean_nsamples_resolution(cfg):
if (cfg.mean and (cfg.number_of_samples > 1)):
logger.error('All the samples will be the mean.. cfg.number_of_samples=1 will be forced.')
cfg.number_of_samples = 1
return (cfg.number_of_samples == 1)
|
def get_path(sample_path: Path, is_amass: bool, gender: str, split: str, onesample: bool, mean: bool, fact: float):
extra_str = (('_mean' if mean else '') if onesample else '_multi')
fact_str = ('' if (fact == 1) else f'{fact}_')
gender_str = ((gender + '_') if is_amass else '')
path = (sample_path / f'{fact_str}{gender_str}{split}{extra_str}')
return path
|
def lengths_to_mask(lengths: List[int], device: torch.device, max_len: int=None) -> Tensor:
lengths = torch.tensor(lengths, device=device)
max_len = (max_len if max_len else max(lengths))
mask = (torch.arange(max_len, device=device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def detach_to_numpy(tensor):
return tensor.detach().cpu().numpy()
|
def remove_padding(tensors, lengths):
return [tensor[:tensor_length] for (tensor, tensor_length) in zip(tensors, lengths)]
|
def nfeats_of(rottype):
if (rottype in ['rotvec', 'axisangle']):
return 3
elif (rottype in ['rotquat', 'quaternion']):
return 4
elif (rottype in ['rot6d', '6drot', 'rotation6d']):
return 6
elif (rottype in ['rotmat']):
return 9
else:
return TypeError("This rotation type doesn't have features.")
|
def axis_angle_to(newtype, rotations):
if (newtype in ['matrix']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
elif (newtype in ['rotmat']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rotmat', rotations)
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rot6d', rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.axis_angle_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
return rotations
else:
raise NotImplementedError
|
def matrix_to(newtype, rotations):
if (newtype in ['matrix']):
return rotations
if (newtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 9))
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.matrix_to_rotation_6d(rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.matrix_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
rotations = geometry.matrix_to_axis_angle(rotations)
return rotations
else:
raise NotImplementedError
|
def to_matrix(oldtype, rotations):
if (oldtype in ['matrix']):
return rotations
if (oldtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 3, 3))
return rotations
elif (oldtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.rotation_6d_to_matrix(rotations)
return rotations
elif (oldtype in ['rotquat', 'quaternion']):
rotations = geometry.quaternion_to_matrix(rotations)
return rotations
elif (oldtype in ['rotvec', 'axisangle']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
else:
raise NotImplementedError
|
def subsample(num_frames, last_framerate, new_framerate):
step = int((last_framerate / new_framerate))
assert (step >= 1)
frames = np.arange(0, num_frames, step)
return frames
|
def upsample(motion, last_framerate, new_framerate):
step = int((new_framerate / last_framerate))
assert (step >= 1)
alpha = np.linspace(0, 1, (step + 1))
last = np.einsum('l,...->l...', (1 - alpha), motion[:(- 1)])
new = np.einsum('l,...->l...', alpha, motion[1:])
chuncks = (last + new)[:(- 1)]
output = np.concatenate(chuncks.swapaxes(1, 0))
output = np.concatenate((output, motion[[(- 1)]]))
return output
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor}
return batch
|
def collate_data3d_slow(batch):
batchTensor = {}
for key in batch[0].keys():
databatch = [b[key] for b in batch]
batchTensor[key] = collate_tensors(databatch)
batch = batchTensor
return batch
|
def collate_data3d(batch):
batchTensor = {}
for key in batch[0].keys():
databatch = [b[key] for b in batch]
if (key == 'paths'):
batchTensor[key] = databatch
else:
batchTensor[key] = torch.stack(databatch, axis=0)
batch = batchTensor
return batch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.