code stringlengths 17 6.64M |
|---|
def plane_mat():
materials = bpy.data.materials
material = materials.new(name='plane')
material.use_nodes = True
clear_material(material)
nodes = material.node_tree.nodes
links = material.node_tree.links
output = nodes.new(type='ShaderNodeOutputMaterial')
diffuse = nodes.new(type='ShaderNodeBsdfDiffuse')
checker = nodes.new(type='ShaderNodeTexChecker')
checker.inputs['Scale'].default_value = 1024
checker.inputs['Color1'].default_value = (0.8, 0.8, 0.8, 1)
checker.inputs['Color2'].default_value = (0.3, 0.3, 0.3, 1)
links.new(checker.outputs['Color'], diffuse.inputs['Color'])
links.new(diffuse.outputs['BSDF'], output.inputs['Surface'])
diffuse.inputs['Roughness'].default_value = 0.127451
return material
|
def plane_mat_uni():
materials = bpy.data.materials
material = materials.new(name='plane_uni')
material.use_nodes = True
clear_material(material)
nodes = material.node_tree.nodes
links = material.node_tree.links
output = nodes.new(type='ShaderNodeOutputMaterial')
diffuse = nodes.new(type='ShaderNodeBsdfDiffuse')
diffuse.inputs['Color'].default_value = (0.8, 0.8, 0.8, 1)
diffuse.inputs['Roughness'].default_value = 0.127451
links.new(diffuse.outputs['BSDF'], output.inputs['Surface'])
return material
|
def prune_begin_end(data, perc):
to_remove = int((len(data) * perc))
if (to_remove == 0):
return data
return data[to_remove:(- to_remove)]
|
def render_current_frame(path):
bpy.context.scene.render.filepath = path
bpy.ops.render.render(use_viewport=True, write_still=True)
|
def render(npydata, frames_folder, *, mode, faces_path, gt=False, exact_frame=None, num=8, downsample=True, canonicalize=True, always_on_floor=False, denoising=True, oldrender=True, jointstype='mmm', res='high', init=True, accelerator='gpu', device=[0]):
if init:
setup_scene(res=res, denoising=denoising, oldrender=oldrender, accelerator=accelerator, device=device)
is_mesh = mesh_detect(npydata)
if (mode == 'video'):
if always_on_floor:
frames_folder += '_of'
os.makedirs(frames_folder, exist_ok=True)
if (downsample and (not is_mesh)):
npydata = npydata[::8]
elif (mode == 'sequence'):
(img_name, ext) = os.path.splitext(frames_folder)
if always_on_floor:
img_name += '_of'
img_path = f'{img_name}{ext}'
elif (mode == 'frame'):
(img_name, ext) = os.path.splitext(frames_folder)
if always_on_floor:
img_name += '_of'
img_path = f'{img_name}_{exact_frame}{ext}'
if (mode == 'sequence'):
perc = 0.2
npydata = prune_begin_end(npydata, perc)
if is_mesh:
from .meshes import Meshes
data = Meshes(npydata, gt=gt, mode=mode, faces_path=faces_path, canonicalize=canonicalize, always_on_floor=always_on_floor)
else:
from .joints import Joints
data = Joints(npydata, gt=gt, mode=mode, canonicalize=canonicalize, always_on_floor=always_on_floor, jointstype=jointstype)
nframes = len(data)
show_traj(data.trajectory)
plot_floor(data.data, big_plane=False)
camera = Camera(first_root=data.get_root(0), mode=mode, is_mesh=is_mesh)
frameidx = get_frameidx(mode=mode, nframes=nframes, exact_frame=exact_frame, frames_to_keep=num)
nframes_to_render = len(frameidx)
if (mode == 'sequence'):
camera.update(data.get_mean_root())
imported_obj_names = []
for (index, frameidx) in enumerate(frameidx):
if (mode == 'sequence'):
frac = (index / (nframes_to_render - 1))
mat = data.get_sequence_mat(frac)
else:
mat = data.mat
camera.update(data.get_root(frameidx))
islast = (index == (nframes_to_render - 1))
objname = data.load_in_blender(frameidx, mat)
name = f'{str(index).zfill(4)}'
if (mode == 'video'):
path = os.path.join(frames_folder, f'frame_{name}.png')
else:
path = img_path
if (mode == 'sequence'):
imported_obj_names.extend(objname)
elif (mode == 'frame'):
camera.update(data.get_root(frameidx))
if ((mode != 'sequence') or islast):
render_current_frame(path)
delete_objs(objname)
delete_objs(imported_obj_names)
delete_objs(['Plane', 'myCurve', 'Cylinder'])
if (mode == 'video'):
return frames_folder
else:
return img_path
|
def get_frameidx(*, mode, nframes, exact_frame, frames_to_keep):
if (mode == 'sequence'):
frameidx = np.linspace(0, (nframes - 1), frames_to_keep)
frameidx = np.round(frameidx).astype(int)
frameidx = list(frameidx)
elif (mode == 'frame'):
index_frame = int((exact_frame * nframes))
frameidx = [index_frame]
elif (mode == 'video'):
frameidx = range(0, nframes)
else:
raise ValueError(f'Not support {mode} render mode')
return frameidx
|
def setup_renderer(denoising=True, oldrender=True, accelerator='gpu', device=[0]):
bpy.context.scene.render.engine = 'CYCLES'
bpy.data.scenes[0].render.engine = 'CYCLES'
if (accelerator.lower() == 'gpu'):
bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.scene.cycles.device = 'GPU'
i = 0
bpy.context.preferences.addons['cycles'].preferences.get_devices()
for d in bpy.context.preferences.addons['cycles'].preferences.devices:
if (i in device):
d['use'] = 1
print(d['name'], ''.join((str(i) for i in device)))
else:
d['use'] = 0
i += 1
if denoising:
bpy.context.scene.cycles.use_denoising = True
bpy.context.scene.render.tile_x = 256
bpy.context.scene.render.tile_y = 256
bpy.context.scene.cycles.samples = 64
if (not oldrender):
bpy.context.scene.view_settings.view_transform = 'Standard'
bpy.context.scene.render.film_transparent = True
bpy.context.scene.display_settings.display_device = 'sRGB'
bpy.context.scene.view_settings.gamma = 1.2
bpy.context.scene.view_settings.exposure = (- 0.75)
|
def setup_scene(res='high', denoising=True, oldrender=True, accelerator='gpu', device=[0]):
scene = bpy.data.scenes['Scene']
assert (res in ['ultra', 'high', 'med', 'low'])
if (res == 'high'):
scene.render.resolution_x = 1280
scene.render.resolution_y = 1024
elif (res == 'med'):
scene.render.resolution_x = (1280 // 2)
scene.render.resolution_y = (1024 // 2)
elif (res == 'low'):
scene.render.resolution_x = (1280 // 4)
scene.render.resolution_y = (1024 // 4)
elif (res == 'ultra'):
scene.render.resolution_x = (1280 * 2)
scene.render.resolution_y = (1024 * 2)
scene.render.film_transparent = True
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = (1.0, 1.0, 1.0)
bg.inputs[1].default_value = 1.0
if ('Cube' in bpy.data.objects):
bpy.data.objects['Cube'].select_set(True)
bpy.ops.object.delete()
bpy.ops.object.light_add(type='SUN', align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
bpy.data.objects['Sun'].data.energy = 1.5
bpy.ops.object.empty_add(type='PLAIN_AXES', align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
bpy.ops.transform.resize(value=(10, 10, 10), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.select_all(action='DESELECT')
setup_renderer(denoising=denoising, oldrender=oldrender, accelerator=accelerator, device=device)
return scene
|
def mesh_detect(data):
if (data.shape[1] > 1000):
return True
return False
|
class ndarray_pydata(np.ndarray):
def __bool__(self) -> bool:
return (len(self) > 0)
|
def load_numpy_vertices_into_blender(vertices, faces, name, mat):
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(vertices, [], faces.view(ndarray_pydata))
mesh.validate()
obj = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(obj)
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
obj.active_material = mat
bpy.context.view_layer.objects.active = obj
bpy.ops.object.shade_smooth()
bpy.ops.object.select_all(action='DESELECT')
return True
|
def delete_objs(names):
if (not isinstance(names, list)):
names = [names]
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.context.scene.objects:
for name in names:
if (obj.name.startswith(name) or obj.name.endswith(name)):
obj.select_set(True)
bpy.ops.object.delete()
bpy.ops.object.select_all(action='DESELECT')
|
class LevelsFilter(logging.Filter):
def __init__(self, levels):
self.levels = [getattr(logging, level) for level in levels]
def filter(self, record):
return (record.levelno in self.levels)
|
class StreamToLogger(object):
'\n Fake file-like stream object that redirects writes to a logger instance.\n '
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
|
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except Exception:
self.handleError(record)
|
def generate_id() -> str:
run_gen = shortuuid.ShortUUID(alphabet=list('0123456789abcdefghijklmnopqrstuvwxyz'))
return run_gen.random(8)
|
class Transform():
def collate(self, lst_datastruct):
from GraphMotion.datasets.utils import collate_tensor_with_padding
example = lst_datastruct[0]
def collate_or_none(key):
if (example[key] is None):
return None
key_lst = [x[key] for x in lst_datastruct]
return collate_tensor_with_padding(key_lst)
kwargs = {key: collate_or_none(key) for key in example.datakeys}
return self.Datastruct(**kwargs)
|
@dataclass
class Datastruct():
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
self.__dict__[key] = value
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
return self.keys()
def keys(self):
keys = [t.name for t in fields(self)]
return iter(keys)
def values(self):
values = [getattr(self, t.name) for t in fields(self)]
return iter(values)
def items(self):
data = [(t.name, getattr(self, t.name)) for t in fields(self)]
return iter(data)
def to(self, *args, **kwargs):
for key in self.datakeys:
if (self[key] is not None):
self[key] = self[key].to(*args, **kwargs)
return self
@property
def device(self):
return self[self.datakeys[0]].device
def detach(self):
def detach_or_none(tensor):
if (tensor is not None):
return tensor.detach()
return None
kwargs = {key: detach_or_none(self[key]) for key in self.datakeys}
return self.transforms.Datastruct(**kwargs)
|
def main():
data_root = '../datasets/humanml3d'
feastures_path = 'in.npy'
animation_save_path = 'in.mp4'
fps = 20
mean = np.load(pjoin(data_root, 'Mean.npy'))
std = np.load(pjoin(data_root, 'Std.npy'))
motion = np.load(feastures_path)
motion = ((motion * std) + mean)
motion_rec = recover_from_ric(torch.tensor(motion), 22).cpu().numpy()
motion_rec = (motion_rec * 1.3)
plot_3d_motion(animation_save_path, motion_rec, title='input', fps=fps)
|
class IdentityTransform(Transform):
def __init__(self, **kwargs):
return
def Datastruct(self, **kwargs):
return IdentityDatastruct(**kwargs)
def __repr__(self):
return 'IdentityTransform()'
|
@dataclass
class IdentityDatastruct(Datastruct):
transforms: IdentityTransform
features: Optional[Tensor] = None
def __post_init__(self):
self.datakeys = ['features']
def __len__(self):
return len(self.rfeats)
|
class Joints2Jfeats(nn.Module):
def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None:
if (normalization and (path is None)):
raise TypeError('You should provide a path if normalization is on.')
super().__init__()
self.normalization = normalization
self.eps = eps
if normalization:
mean_path = (Path(path) / 'jfeats_mean.pt')
std_path = (Path(path) / 'jfeats_std.pt')
self.register_buffer('mean', torch.load(mean_path))
self.register_buffer('std', torch.load(std_path))
def normalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features - self.mean) / (self.std + self.eps))
return features
def unnormalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features * self.std) + self.mean)
return features
|
class Rots2Joints(nn.Module):
def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None:
if (normalization and (path is None)):
raise TypeError('You should provide a path if normalization is on.')
super().__init__()
self.normalization = normalization
self.eps = eps
if normalization:
mean_path = (Path(path) / 'mean.pt')
std_path = (Path(path) / 'std.pt')
self.register_buffer('mean', torch.load(mean_path))
self.register_buffer('std', torch.load(std_path))
def normalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features - self.mean) / (self.std + self.eps))
return features
def unnormalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features * self.std) + self.mean)
return features
|
class Rots2Rfeats(nn.Module):
def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None:
if (normalization and (path is None)):
raise TypeError('You should provide a path if normalization is on.')
super().__init__()
self.normalization = normalization
self.eps = eps
if normalization:
mean_path = (Path(path) / 'rfeats_mean.pt')
std_path = (Path(path) / 'rfeats_std.pt')
self.register_buffer('mean', torch.load(mean_path))
self.register_buffer('std', torch.load(std_path))
def normalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features - self.mean) / (self.std + self.eps))
return features
def unnormalize(self, features: Tensor) -> Tensor:
if self.normalization:
features = ((features * self.std) + self.mean)
return features
|
class XYZTransform(Transform):
def __init__(self, joints2jfeats: Joints2Jfeats, **kwargs):
self.joints2jfeats = joints2jfeats
def Datastruct(self, **kwargs):
return XYZDatastruct(_joints2jfeats=self.joints2jfeats, transforms=self, **kwargs)
def __repr__(self):
return 'XYZTransform()'
|
@dataclass
class XYZDatastruct(Datastruct):
transforms: XYZTransform
_joints2jfeats: Joints2Jfeats
features: Optional[Tensor] = None
joints_: Optional[Tensor] = None
jfeats_: Optional[Tensor] = None
def __post_init__(self):
self.datakeys = ['features', 'joints_', 'jfeats_']
if ((self.features is not None) and (self.jfeats_ is None)):
self.jfeats_ = self.features
@property
def joints(self):
if (self.joints_ is not None):
return self.joints_
assert (self.jfeats_ is not None)
self._joints2jfeats.to(self.jfeats.device)
self.joints_ = self._joints2jfeats.inverse(self.jfeats)
return self.joints_
@property
def jfeats(self):
if (self.jfeats_ is not None):
return self.jfeats_
assert (self.joints_ is not None)
self._joints2jfeats.to(self.joints.device)
self.jfeats_ = self._joints2jfeats(self.joints)
return self.jfeats_
def __len__(self):
return len(self.jfeats)
|
def load_example_input(txt_path):
file = open(txt_path, 'r')
Lines = file.readlines()
count = 0
(texts, lens) = ([], [])
for line in Lines:
count += 1
s = line.strip()
s_l = s.split(' ')[0]
s_t = s[(len(s_l) + 1):]
lens.append(int(s_l))
texts.append(s_t)
print('Length-{}: {}'.format(s_l, s_t))
return (texts, lens)
|
def render_batch(npy_dir, execute_python='./scripts/visualize_motion.sh', mode='sequence'):
os.system(f'{execute_python} {npy_dir} {mode}')
|
def render(execute_python, npy_path, jointtype, cfg_path):
export_scripts = 'render.py'
os.system(f'{execute_python} --background --python {export_scripts} -- --cfg={cfg_path} --npy={npy_path} --joint_type={jointtype}')
fig_path = Path(str(npy_path).replace('.npy', '.png'))
return fig_path
|
def export_fbx_hand(pkl_path):
input = pkl_path
output = pkl_path.replace('.pkl', '.fbx')
execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender'
export_scripts = './scripts/fbx_output_smplx.py'
os.system(f'{execute_python} -noaudio --background --python {export_scripts} --input {input} --output {output}')
|
def export_fbx(pkl_path):
input = pkl_path
output = pkl_path.replace('.pkl', '.fbx')
execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender'
export_scripts = './scripts/fbx_output.py'
os.system(f'{execute_python} -noaudio --background --python {export_scripts} --input {input} --output {output}')
|
def nfeats_of(rottype):
if (rottype in ['rotvec', 'axisangle']):
return 3
elif (rottype in ['rotquat', 'quaternion']):
return 4
elif (rottype in ['rot6d', '6drot', 'rotation6d']):
return 6
elif (rottype in ['rotmat']):
return 9
else:
return TypeError("This rotation type doesn't have features.")
|
def axis_angle_to(newtype, rotations):
if (newtype in ['matrix']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
elif (newtype in ['rotmat']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rotmat', rotations)
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rot6d', rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.axis_angle_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
return rotations
else:
raise NotImplementedError
|
def matrix_to(newtype, rotations):
if (newtype in ['matrix']):
return rotations
if (newtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 9))
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.matrix_to_rotation_6d(rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.matrix_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
rotations = geometry.matrix_to_axis_angle(rotations)
return rotations
else:
raise NotImplementedError
|
def to_matrix(oldtype, rotations):
if (oldtype in ['matrix']):
return rotations
if (oldtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 3, 3))
return rotations
elif (oldtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.rotation_6d_to_matrix(rotations)
return rotations
elif (oldtype in ['rotquat', 'quaternion']):
rotations = geometry.quaternion_to_matrix(rotations)
return rotations
elif (oldtype in ['rotvec', 'axisangle']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
else:
raise NotImplementedError
|
def fixseed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
|
def get_root_idx(joinstype):
return root_joints[joinstype]
|
def create_logger(cfg, phase='train'):
root_output_dir = Path(cfg.FOLDER)
if (not root_output_dir.exists()):
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
cfg_name = cfg.NAME
model = cfg.model.model_type
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = ((root_output_dir / model) / cfg_name)
cfg.FOLDER_EXP = str(final_output_dir)
time_str = time.strftime('%Y-%m-%d-%H-%M-%S')
new_dir(cfg, phase, time_str, final_output_dir)
head = '%(asctime)-15s %(message)s'
logger = config_logger(final_output_dir, time_str, phase, head)
if (logger is None):
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
logging.basicConfig(format=head)
return logger
|
@rank_zero_only
def config_logger(final_output_dir, time_str, phase, head):
log_file = '{}_{}_{}.log'.format('log', time_str, phase)
final_log_file = (final_output_dir / log_file)
logging.basicConfig(filename=str(final_log_file))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
formatter = logging.Formatter(head)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
file_handler = logging.FileHandler(final_log_file, 'w')
file_handler.setFormatter(logging.Formatter(head))
file_handler.setLevel(logging.INFO)
logging.getLogger('').addHandler(file_handler)
return logger
|
@rank_zero_only
def new_dir(cfg, phase, time_str, final_output_dir):
cfg.TIME = str(time_str)
if (os.path.exists(final_output_dir) and (cfg.TRAIN.RESUME is None) and (not cfg.DEBUG)):
file_list = sorted(os.listdir(final_output_dir), reverse=True)
for item in file_list:
if item.endswith('.log'):
os.rename(str(final_output_dir), ((str(final_output_dir) + '_') + cfg.TIME))
break
final_output_dir.mkdir(parents=True, exist_ok=True)
config_file = '{}_{}_{}.yaml'.format('config', time_str, phase)
final_config_file = (final_output_dir / config_file)
OmegaConf.save(config=cfg, f=final_config_file)
|
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif (type(tensor).__module__ != 'numpy'):
raise ValueError('Cannot convert {} to numpy array'.format(type(tensor)))
return tensor
|
def to_torch(ndarray):
if (type(ndarray).__module__ == 'numpy'):
return torch.from_numpy(ndarray)
elif (not torch.is_tensor(ndarray)):
raise ValueError('Cannot convert {} to torch tensor'.format(type(ndarray)))
return ndarray
|
def cleanexit():
import sys
import os
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
def cfg_mean_nsamples_resolution(cfg):
if (cfg.mean and (cfg.number_of_samples > 1)):
logger.error('All the samples will be the mean.. cfg.number_of_samples=1 will be forced.')
cfg.number_of_samples = 1
return (cfg.number_of_samples == 1)
|
def get_path(sample_path: Path, is_amass: bool, gender: str, split: str, onesample: bool, mean: bool, fact: float):
extra_str = (('_mean' if mean else '') if onesample else '_multi')
fact_str = ('' if (fact == 1) else f'{fact}_')
gender_str = ((gender + '_') if is_amass else '')
path = (sample_path / f'{fact_str}{gender_str}{split}{extra_str}')
return path
|
def lengths_to_mask(lengths: List[int], device: torch.device, max_len: int=None) -> Tensor:
lengths = torch.tensor(lengths, device=device)
max_len = (max_len if max_len else max(lengths))
mask = (torch.arange(max_len, device=device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def detach_to_numpy(tensor):
return tensor.detach().cpu().numpy()
|
def remove_padding(tensors, lengths):
return [tensor[:tensor_length] for (tensor, tensor_length) in zip(tensors, lengths)]
|
def nfeats_of(rottype):
if (rottype in ['rotvec', 'axisangle']):
return 3
elif (rottype in ['rotquat', 'quaternion']):
return 4
elif (rottype in ['rot6d', '6drot', 'rotation6d']):
return 6
elif (rottype in ['rotmat']):
return 9
else:
return TypeError("This rotation type doesn't have features.")
|
def axis_angle_to(newtype, rotations):
if (newtype in ['matrix']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
elif (newtype in ['rotmat']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rotmat', rotations)
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rot6d', rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.axis_angle_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
return rotations
else:
raise NotImplementedError
|
def matrix_to(newtype, rotations):
if (newtype in ['matrix']):
return rotations
if (newtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 9))
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.matrix_to_rotation_6d(rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.matrix_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
rotations = geometry.matrix_to_axis_angle(rotations)
return rotations
else:
raise NotImplementedError
|
def to_matrix(oldtype, rotations):
if (oldtype in ['matrix']):
return rotations
if (oldtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 3, 3))
return rotations
elif (oldtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.rotation_6d_to_matrix(rotations)
return rotations
elif (oldtype in ['rotquat', 'quaternion']):
rotations = geometry.quaternion_to_matrix(rotations)
return rotations
elif (oldtype in ['rotvec', 'axisangle']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
else:
raise NotImplementedError
|
def subsample(num_frames, last_framerate, new_framerate):
step = int((last_framerate / new_framerate))
assert (step >= 1)
frames = np.arange(0, num_frames, step)
return frames
|
def upsample(motion, last_framerate, new_framerate):
step = int((new_framerate / last_framerate))
assert (step >= 1)
alpha = np.linspace(0, 1, (step + 1))
last = np.einsum('l,...->l...', (1 - alpha), motion[:(- 1)])
new = np.einsum('l,...->l...', alpha, motion[1:])
chuncks = (last + new)[:(- 1)]
output = np.concatenate(chuncks.swapaxes(1, 0))
output = np.concatenate((output, motion[[(- 1)]]))
return output
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor}
return batch
|
def collate_data3d_slow(batch):
batchTensor = {}
for key in batch[0].keys():
databatch = [b[key] for b in batch]
batchTensor[key] = collate_tensors(databatch)
batch = batchTensor
return batch
|
def collate_data3d(batch):
batchTensor = {}
for key in batch[0].keys():
databatch = [b[key] for b in batch]
if (key == 'paths'):
batchTensor[key] = databatch
else:
batchTensor[key] = torch.stack(databatch, axis=0)
batch = batchTensor
return batch
|
def main():
'\n get input text\n ToDo skip if user input text in command\n current tasks:\n 1 text 2 mtion\n 2 motion transfer\n 3 random sampling\n 4 reconstruction\n\n ToDo \n 1 use one funtion for all expoert\n 2 fitting smpl and export fbx in this file\n 3 \n\n '
cfg = parse_args(phase='demo')
cfg.FOLDER = cfg.TEST.FOLDER
cfg.Name = ('demo--' + cfg.NAME)
logger = create_logger(cfg, phase='demo')
if cfg.DEMO.EXAMPLE:
from GraphMotion.utils.demo_utils import load_example_input
(text, length) = load_example_input(cfg.DEMO.EXAMPLE)
task = 'Example'
elif cfg.DEMO.TASK:
task = cfg.DEMO.TASK
text = None
else:
task = 'Keyborad_input'
text = input('Please enter texts, none for random latent sampling:')
length = input('Please enter length, range 16~196, e.g. 50, none for random latent sampling:')
if text:
motion_path = input('Please enter npy_path for motion transfer, none for skip:')
if (text and (not motion_path)):
cfg.DEMO.MOTION_TRANSFER = False
elif (text and motion_path):
joints = np.load(motion_path)
frames = subsample(len(joints), last_framerate=cfg.DEMO.FRAME_RATE, new_framerate=cfg.DATASET.KIT.FRAME_RATE)
joints_sample = torch.from_numpy(joints[frames]).float()
features = model.transforms.joints2jfeats(joints_sample[None])
motion = xx
cfg.DEMO.MOTION_TRANSFER = True
length = (200 if (not length) else length)
length = [int(length)]
text = [text]
output_dir = Path(os.path.join(cfg.FOLDER, str(cfg.model.model_type), str(cfg.NAME), ('samples_' + cfg.TIME)))
output_dir.mkdir(parents=True, exist_ok=True)
if (cfg.ACCELERATOR == 'gpu'):
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join((str(x) for x in cfg.DEVICE))
device = torch.device('cuda')
dataset = get_datasets(cfg, logger=logger, phase='test')[0]
total_time = time.time()
model = get_model(cfg, dataset)
if (not text):
logger.info(f'Begin specific task{task}')
logger.info('Loading checkpoints from {}'.format(cfg.TEST.CHECKPOINTS))
state_dict = torch.load(cfg.TEST.CHECKPOINTS, map_location='cpu')['state_dict']
model.load_state_dict(state_dict, strict=True)
logger.info('model {} loaded'.format(cfg.model.model_type))
model.sample_mean = cfg.TEST.MEAN
model.fact = cfg.TEST.FACT
model.to(device)
model.eval()
mld_time = time.time()
with torch.no_grad():
rep_lst = []
rep_ref_lst = []
texts_lst = []
if text:
batch = {'length': length, 'text': text}
for rep in range(cfg.DEMO.REPLICATION):
if cfg.DEMO.MOTION_TRANSFER:
joints = model.forward_motion_style_transfer(batch)
else:
joints = model(batch)
infer_time = (time.time() - mld_time)
num_batch = 1
num_all_frame = sum(batch['length'])
num_ave_frame = (sum(batch['length']) / len(batch['length']))
nsample = len(joints)
id = 0
for i in range(nsample):
npypath = str((output_dir / f'{task}_{length[i]}_batch{id}_{i}.npy'))
with open(npypath.replace('.npy', '.txt'), 'w') as text_file:
text_file.write(batch['text'][i])
np.save(npypath, joints[i].detach().cpu().numpy())
logger.info(f'''Motions are generated here:
{npypath}''')
if cfg.DEMO.OUTALL:
rep_lst.append(joints)
texts_lst.append(batch['text'])
if cfg.DEMO.OUTALL:
grouped_lst = []
for n in range(nsample):
grouped_lst.append(torch.cat([r[n][None] for r in rep_lst], dim=0)[None])
combinedOut = torch.cat(grouped_lst, dim=0)
try:
npypath = str((output_dir / f'{task}_{length[i]}_all.npy'))
np.save(npypath, combinedOut.detach().cpu().numpy())
with open(npypath.replace('npy', 'txt'), 'w') as text_file:
for texts in texts_lst:
for text in texts:
text_file.write(text)
text_file.write('\n')
logger.info(f'''All reconstructed motions are generated here:
{npypath}''')
except:
raise ValueError('Lengths of motions are different, so we cannot save all motions in one file.')
if (not text):
if (task == 'random_sampling'):
text = 'random sampling'
length = 196
(nsample, latent_dim) = (500, 256)
batch = {'latent': torch.randn(1, nsample, latent_dim, device=model.device), 'length': ([int(length)] * nsample)}
joints = model.gen_from_latent(batch)
(num_batch, num_all_frame, num_ave_frame) = (100, (100 * 196), 196)
infer_time = (time.time() - mld_time)
for i in range(nsample):
npypath = (output_dir / f"{text.split(' ')[0]}_{length}_{i}.npy")
np.save(npypath, joints[i].detach().cpu().numpy())
logger.info(f'''Motions are generated here:
{npypath}''')
elif (task in ['reconstrucion', 'text_motion']):
for rep in range(cfg.DEMO.REPLICATION):
logger.info(f'Replication {rep}')
joints_lst = []
ref_lst = []
for (id, batch) in enumerate(dataset.test_dataloader()):
if (task == 'reconstrucion'):
batch['motion'] = batch['motion'].to(device)
length = batch['length']
(joints, joints_ref) = model.recon_from_motion(batch)
elif (task == 'text_motion'):
batch['motion'] = batch['motion'].to(device)
(joints, joints_ref) = model(batch, return_ref=True)
nsample = len(joints)
length = batch['length']
for i in range(nsample):
npypath = str((output_dir / f'{task}_{length[i]}_batch{id}_{i}_{rep}.npy'))
np.save(npypath, joints[i].detach().cpu().numpy())
np.save(npypath.replace('.npy', '_ref.npy'), joints_ref[i].detach().cpu().numpy())
with open(npypath.replace('.npy', '.txt'), 'w') as text_file:
text_file.write(batch['text'][i])
logger.info(f'''Reconstructed motions are generated here:
{npypath}''')
else:
raise ValueError(f'Not support task {task}, only support random_sampling, reconstrucion, text_motion')
total_time = (time.time() - total_time)
print(f'MLD Infer time - This/Ave batch: {(infer_time / num_batch):.2f}')
print(f'MLD Infer FPS - Total batch: {(num_all_frame / infer_time):.2f}')
print(f'MLD Infer time - This/Ave batch: {(infer_time / num_batch):.2f}')
print(f'MLD Infer FPS - Total batch: {(num_all_frame / infer_time):.2f}')
print(f'MLD Infer FPS - Running Poses Per Second: {((num_ave_frame * infer_time) / num_batch):.2f}')
print(f'MLD Infer FPS - {(num_all_frame / infer_time):.2f}s')
print(f'MLD Infer FPS - Running Poses Per Second: {((num_ave_frame * infer_time) / num_batch):.2f}')
print(f'MLD Infer FPS - time for 100 Poses: {((infer_time / (num_batch * num_ave_frame)) * 100):.2f}')
print(f'Total time spent: {total_time:.2f} seconds (including model loading time and exporting time).')
if cfg.DEMO.RENDER:
from GraphMotion.utils.demo_utils import render_batch
blenderpath = cfg.RENDER.BLENDER_PATH
render_batch(os.path.dirname(npypath), execute_python=blenderpath, mode='sequence')
logger.info(f'''Motions are rendered here:
{os.path.dirname(npypath)}''')
|
class ProgressLogger(Callback):
def __init__(self, metric_monitor: dict, precision: int=3):
self.metric_monitor = metric_monitor
self.precision = precision
def on_train_start(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
logger.info('Training started')
def on_train_end(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
logger.info('Training done')
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
if trainer.sanity_checking:
logger.info('Sanity checking ok.')
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule, padding=False, **kwargs) -> None:
metric_format = f'{{:.{self.precision}e}}'
line = f'Epoch {trainer.current_epoch}'
if padding:
line = f"{line:>{len('Epoch xxxx')}}"
metrics_str = []
losses_dict = trainer.callback_metrics
for (metric_name, dico_name) in self.metric_monitor.items():
if (dico_name in losses_dict):
metric = losses_dict[dico_name].item()
metric = metric_format.format(metric)
metric = f'{metric_name} {metric}'
metrics_str.append(metric)
if (len(metrics_str) == 0):
return
memory = f'Memory {psutil.virtual_memory().percent}%'
line = ((((line + ': ') + ' '.join(metrics_str)) + ' ') + memory)
logger.info(line)
|
def get_module_config(cfg_model, path='modules'):
files = os.listdir(f'./configs/{path}/')
for file in files:
if file.endswith('.yaml'):
with open((f'./configs/{path}/' + file), 'r') as f:
cfg_model.merge_with(OmegaConf.load(f))
return cfg_model
|
def get_obj_from_str(string, reload=False):
(module, cls) = string.rsplit('.', 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
|
def instantiate_from_config(config):
if (not ('target' in config)):
if (config == '__is_first_stage__'):
return None
elif (config == '__is_unconditional__'):
return None
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params', dict()))
|
def parse_args(phase='train'):
parser = ArgumentParser()
group = parser.add_argument_group('Training options')
if (phase in ['train', 'test', 'demo']):
group.add_argument('--cfg', type=str, required=False, default='./configs/config.yaml', help='config file')
group.add_argument('--cfg_assets', type=str, required=False, default='./configs/assets.yaml', help='config file for asset paths')
group.add_argument('--batch_size', type=int, required=False, help='training batch size')
group.add_argument('--device', type=int, nargs='+', required=False, help='training device')
group.add_argument('--nodebug', action='store_true', required=False, help='debug or not')
group.add_argument('--dir', type=str, required=False, help='evaluate existing npys')
if (phase == 'demo'):
group.add_argument('--render', action='store_true', help='Render visulizaed figures')
group.add_argument('--render_mode', type=str, help='video or sequence')
group.add_argument('--frame_rate', type=float, default=12.5, help='the frame rate for the input/output motion')
group.add_argument('--replication', type=int, default=1, help='the frame rate for the input/output motion')
group.add_argument('--example', type=str, required=False, help='input text and lengths with txt format')
group.add_argument('--task', type=str, required=False, help='random_sampling, reconstrucion or text_motion')
group.add_argument('--out_dir', type=str, required=False, help='output dir')
group.add_argument('--allinone', action='store_true', required=False, help='output seperate or combined npy file')
if (phase == 'render'):
group.add_argument('--cfg', type=str, required=False, default='./configs/render.yaml', help='config file')
group.add_argument('--cfg_assets', type=str, required=False, default='./configs/assets.yaml', help='config file for asset paths')
group.add_argument('--npy', type=str, required=False, default=None, help='npy motion files')
group.add_argument('--dir', type=str, required=False, default=None, help='npy motion folder')
group.add_argument('--mode', type=str, required=False, default='sequence', help='render target: video, sequence, frame')
group.add_argument('--joint_type', type=str, required=False, default=None, help='mmm or vertices for skeleton')
params = parser.parse_args()
cfg_base = OmegaConf.load('./configs/base.yaml')
cfg_exp = OmegaConf.merge(cfg_base, OmegaConf.load(params.cfg))
cfg_model = get_module_config(cfg_exp.model, cfg_exp.model.target)
cfg_assets = OmegaConf.load(params.cfg_assets)
cfg = OmegaConf.merge(cfg_exp, cfg_model, cfg_assets)
if (phase in ['train', 'test']):
cfg.TRAIN.BATCH_SIZE = (params.batch_size if params.batch_size else cfg.TRAIN.BATCH_SIZE)
cfg.DEVICE = (params.device if params.device else cfg.DEVICE)
cfg.DEBUG = ((not params.nodebug) if (params.nodebug is not None) else cfg.DEBUG)
cfg.DEBUG = (False if (phase == 'test') else cfg.DEBUG)
if (phase == 'test'):
cfg.DEBUG = False
cfg.DEVICE = [0]
print('Force no debugging and one gpu when testing')
cfg.TEST.TEST_DIR = (params.dir if params.dir else cfg.TEST.TEST_DIR)
if (phase == 'demo'):
cfg.DEMO.RENDER = params.render
cfg.DEMO.FRAME_RATE = params.frame_rate
cfg.DEMO.EXAMPLE = params.example
cfg.DEMO.TASK = params.task
cfg.TEST.FOLDER = (params.out_dir if params.dir else cfg.TEST.FOLDER)
cfg.DEMO.REPLICATION = params.replication
cfg.DEMO.OUTALL = params.allinone
if (phase == 'render'):
if params.npy:
cfg.RENDER.NPY = params.npy
cfg.RENDER.INPUT_MODE = 'npy'
if params.dir:
cfg.RENDER.DIR = params.dir
cfg.RENDER.INPUT_MODE = 'dir'
cfg.RENDER.JOINT_TYPE = params.joint_type
cfg.RENDER.MODE = params.mode
if cfg.DEBUG:
cfg.NAME = ('debug--' + cfg.NAME)
cfg.LOGGER.WANDB.OFFLINE = True
cfg.LOGGER.VAL_EVERY_STEPS = 1
return cfg
|
class HumanML3DDataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'humanml3d'
self.njoints = 22
if (phase == 'text_only'):
self.Dataset = TextOnlyDataset
else:
self.Dataset = Text2MotionDatasetV2
self.cfg = cfg
sample_overrides = {'split': 'val', 'tiny': True, 'progress_bar': False}
self._sample_set = self.get_sample_set(overrides=sample_overrides)
self.nfeats = self._sample_set.nfeats
def feats2joints(self, features):
mean = torch.tensor(self.hparams.mean).to(features)
std = torch.tensor(self.hparams.std).to(features)
features = ((features * std) + mean)
return recover_from_ric(features, self.njoints)
def joints2feats(self, features):
features = process_file(features, self.njoints)[0]
return features
def renorm4t2m(self, features):
ori_mean = torch.tensor(self.hparams.mean).to(features)
ori_std = torch.tensor(self.hparams.std).to(features)
eval_mean = torch.tensor(self.hparams.mean_eval).to(features)
eval_std = torch.tensor(self.hparams.std_eval).to(features)
features = ((features * ori_std) + ori_mean)
features = ((features - eval_mean) / eval_std)
return features
def mm_mode(self, mm_on=True):
if mm_on:
self.is_mm = True
self.name_list = self.test_dataset.name_list
self.mm_list = np.random.choice(self.name_list, self.cfg.TEST.MM_NUM_SAMPLES, replace=False)
self.test_dataset.name_list = self.mm_list
else:
self.is_mm = False
self.test_dataset.name_list = self.name_list
|
class Humanact12DataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'HumanAct12'
self.Dataset = HumanAct12Poses
self.cfg = cfg
sample_overrides = {'num_seq_max': 2, 'split': 'test', 'tiny': True, 'progress_bar': False}
self.nfeats = 150
self.njoints = 25
self.nclasses = 12
|
class KitDataModule(BASEDataModule):
def __init__(self, cfg, phase='train', collate_fn=all_collate, batch_size: int=32, num_workers: int=16, **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'kit'
self.njoints = 21
if (phase == 'text_only'):
self.Dataset = TextOnlyDataset
else:
self.Dataset = Text2MotionDatasetV2
self.cfg = cfg
sample_overrides = {'split': 'val', 'tiny': True, 'progress_bar': False}
self._sample_set = self.get_sample_set(overrides=sample_overrides)
self.nfeats = self._sample_set.nfeats
def feats2joints(self, features):
mean = torch.tensor(self.hparams.mean).to(features)
std = torch.tensor(self.hparams.std).to(features)
features = ((features * std) + mean)
return recover_from_ric(features, self.njoints)
def renorm4t2m(self, features):
ori_mean = torch.tensor(self.hparams.mean).to(features)
ori_std = torch.tensor(self.hparams.std).to(features)
eval_mean = torch.tensor(self.hparams.mean_eval).to(features)
eval_std = torch.tensor(self.hparams.std_eval).to(features)
features = ((features * ori_std) + ori_mean)
features = ((features - eval_mean) / eval_std)
return features
def mm_mode(self, mm_on=True):
if mm_on:
self.is_mm = True
self.name_list = self.test_dataset.name_list
self.mm_list = np.random.choice(self.name_list, self.cfg.TEST.MM_NUM_SAMPLES, replace=False)
self.test_dataset.name_list = self.mm_list
else:
self.is_mm = False
self.test_dataset.name_list = self.name_list
|
class UestcDataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, method_name='vibe', phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'Uestc'
self.Dataset = UESTC
self.cfg = cfg
self.nfeats = 150
self.njoints = 25
self.nclasses = 40
|
class HumanAct12Poses(Dataset):
dataname = 'humanact12'
def __init__(self, datapath='data/HumanAct12Poses', **kargs):
self.datapath = datapath
super().__init__(**kargs)
pkldatafilepath = os.path.join(datapath, 'humanact12poses.pkl')
with rich.progress.open(pkldatafilepath, 'rb', description='loading humanact12 pkl') as f:
data = pkl.load(f)
self._pose = [x for x in data['poses']]
self._num_frames_in_video = [p.shape[0] for p in self._pose]
self._joints = [x for x in data['joints3D']]
self._actions = [x for x in data['y']]
total_num_actions = 12
self.num_classes = total_num_actions
self._train = list(range(len(self._pose)))
keep_actions = np.arange(0, total_num_actions)
self._action_to_label = {x: i for (i, x) in enumerate(keep_actions)}
self._label_to_action = {i: x for (i, x) in enumerate(keep_actions)}
self._action_classes = humanact12_coarse_action_enumerator
def _load_joints3D(self, ind, frame_ix):
return self._joints[ind][frame_ix]
def _load_rotvec(self, ind, frame_ix):
pose = self._pose[ind][frame_ix].reshape((- 1), 24, 3)
return pose
|
def parse_info_name(path):
name = os.path.splitext(os.path.split(path)[(- 1)])[0]
info = {}
current_letter = None
for letter in name:
if (letter in string.ascii_letters):
info[letter] = []
current_letter = letter
else:
info[current_letter].append(letter)
for key in info.keys():
info[key] = ''.join(info[key])
return info
|
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif (type(tensor).__module__ != 'numpy'):
raise ValueError('Cannot convert {} to numpy array'.format(type(tensor)))
return tensor
|
def to_torch(ndarray):
if (type(ndarray).__module__ == 'numpy'):
return torch.from_numpy(ndarray)
elif (not torch.is_tensor(ndarray)):
raise ValueError('Cannot convert {} to torch tensor'.format(type(ndarray)))
return ndarray
|
def cleanexit():
import sys
import os
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor}
return batch
|
class BASEDataModule(pl.LightningDataModule):
def __init__(self, collate_fn, batch_size: int, num_workers: int):
super().__init__()
self.dataloader_options = {'batch_size': batch_size, 'num_workers': num_workers, 'collate_fn': collate_fn}
self.persistent_workers = True
self.is_mm = False
def get_sample_set(self, overrides={}):
sample_params = self.hparams.copy()
sample_params.update(overrides)
split_file = pjoin(eval(f'self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT'), (self.cfg.EVAL.SPLIT + '.txt'))
return self.Dataset(split_file=split_file, **sample_params)
def __getattr__(self, item):
if (item.endswith('_dataset') and (not item.startswith('_'))):
subset = item[:(- len('_dataset'))]
item_c = ('_' + item)
if (item_c not in self.__dict__):
subset = (subset.upper() if (subset != 'val') else 'EVAL')
split = eval(f'self.cfg.{subset}.SPLIT')
split_file = pjoin(eval(f'self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT'), (eval(f'self.cfg.{subset}.SPLIT') + '.txt'))
self.__dict__[item_c] = self.Dataset(split_file=split_file, split=split, **self.hparams)
return getattr(self, item_c)
classname = self.__class__.__name__
raise AttributeError(f"'{classname}' object has no attribute '{item}'")
def setup(self, stage=None):
self.stage = stage
if (stage in (None, 'fit')):
_ = self.train_dataset
_ = self.val_dataset
if (stage in (None, 'test')):
_ = self.test_dataset
def train_dataloader(self):
return DataLoader(self.train_dataset, shuffle=True, persistent_workers=True, **self.dataloader_options)
def predict_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = (1 if self.is_mm else self.cfg.TEST.BATCH_SIZE)
dataloader_options['num_workers'] = self.cfg.TEST.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.test_dataset, persistent_workers=True, **dataloader_options)
def val_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = self.cfg.EVAL.BATCH_SIZE
dataloader_options['num_workers'] = self.cfg.EVAL.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.val_dataset, persistent_workers=True, **dataloader_options)
def test_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = (1 if self.is_mm else self.cfg.TEST.BATCH_SIZE)
dataloader_options['num_workers'] = self.cfg.TEST.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.test_dataset, persistent_workers=True, **dataloader_options)
|
def get_mean_std(phase, cfg, dataset_name):
name = ('t2m' if (dataset_name == 'humanml3d') else dataset_name)
assert (name in ['t2m', 'kit'])
if (phase in ['val']):
if (name == 't2m'):
data_root = pjoin(cfg.model.t2m_path, name, 'Comp_v6_KLD01', 'meta')
elif (name == 'kit'):
data_root = pjoin(cfg.model.t2m_path, name, 'Comp_v6_KLD005', 'meta')
else:
raise ValueError('Only support t2m and kit')
mean = np.load(pjoin(data_root, 'mean.npy'))
std = np.load(pjoin(data_root, 'std.npy'))
else:
data_root = eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT')
mean = np.load(pjoin(data_root, 'Mean.npy'))
std = np.load(pjoin(data_root, 'Std.npy'))
return (mean, std)
|
def get_WordVectorizer(cfg, phase, dataset_name):
if (phase not in ['text_only']):
if (dataset_name.lower() in ['humanml3d', 'kit']):
return WordVectorizer(cfg.DATASET.WORD_VERTILIZER_PATH, 'our_vab')
else:
raise ValueError('Only support WordVectorizer for HumanML3D')
else:
return None
|
def get_collate_fn(name, phase='train'):
if (name.lower() in ['humanml3d', 'kit']):
return mld_collate
elif (name.lower() in ['humanact12', 'uestc']):
return a2m_collate
|
def get_datasets(cfg, logger=None, phase='train'):
dataset_names = eval(f'cfg.{phase.upper()}.DATASETS')
datasets = []
for dataset_name in dataset_names:
if (dataset_name.lower() in ['humanml3d', 'kit']):
data_root = eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT')
(mean, std) = get_mean_std(phase, cfg, dataset_name)
(mean_eval, std_eval) = get_mean_std('val', cfg, dataset_name)
wordVectorizer = get_WordVectorizer(cfg, phase, dataset_name)
collate_fn = get_collate_fn(dataset_name, phase)
if (dataset_name.lower() in ['kit']):
dataset = dataset_module_map[dataset_name.lower()](cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, mean=mean, std=std, mean_eval=mean_eval, std_eval=std_eval, w_vectorizer=wordVectorizer, text_dir=pjoin(data_root, 'texts'), motion_dir=pjoin(data_root, motion_subdir[dataset_name]), max_motion_length=cfg.DATASET.SAMPLER.MAX_LEN, min_motion_length=24, max_text_len=cfg.DATASET.SAMPLER.MAX_TEXT_LEN, unit_length=eval(f'cfg.DATASET.{dataset_name.upper()}.UNIT_LEN'))
datasets.append(dataset)
else:
dataset = dataset_module_map[dataset_name.lower()](cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, mean=mean, std=std, mean_eval=mean_eval, std_eval=std_eval, w_vectorizer=wordVectorizer, text_dir=pjoin(data_root, 'texts'), motion_dir=pjoin(data_root, motion_subdir[dataset_name]), max_motion_length=cfg.DATASET.SAMPLER.MAX_LEN, min_motion_length=cfg.DATASET.SAMPLER.MIN_LEN, max_text_len=cfg.DATASET.SAMPLER.MAX_TEXT_LEN, unit_length=eval(f'cfg.DATASET.{dataset_name.upper()}.UNIT_LEN'))
datasets.append(dataset)
elif (dataset_name.lower() in ['humanact12', 'uestc']):
collate_fn = get_collate_fn(dataset_name, phase)
dataset = dataset_module_map[dataset_name.lower()](datapath=eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT'), cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, num_frames=cfg.DATASET.HUMANACT12.NUM_FRAMES, sampling=cfg.DATASET.SAMPLER.SAMPLING, sampling_step=cfg.DATASET.SAMPLER.SAMPLING_STEP, pose_rep=cfg.DATASET.HUMANACT12.POSE_REP, max_len=cfg.DATASET.SAMPLER.MAX_LEN, min_len=cfg.DATASET.SAMPLER.MIN_LEN, num_seq_max=(cfg.DATASET.SAMPLER.MAX_SQE if (not cfg.DEBUG) else 100), glob=cfg.DATASET.HUMANACT12.GLOB, translation=cfg.DATASET.HUMANACT12.TRANSLATION)
cfg.DATASET.NCLASSES = dataset.nclasses
datasets.append(dataset)
elif (dataset_name.lower() in ['amass']):
raise NotImplementedError
else:
raise NotImplementedError
cfg.DATASET.NFEATS = datasets[0].nfeats
cfg.DATASET.NJOINTS = datasets[0].njoints
return datasets
|
def is_float(numStr):
flag = False
numStr = str(numStr).strip().lstrip('-').lstrip('+')
try:
reg = re.compile('^[-+]?[0-9]+\\.[0-9]+$')
res = reg.match(str(numStr))
if res:
flag = True
except Exception as ex:
print(('is_float() - error: ' + str(ex)))
return flag
|
def is_number(numStr):
flag = False
numStr = str(numStr).strip().lstrip('-').lstrip('+')
if str(numStr).isdigit():
flag = True
return flag
|
def get_opt(opt_path, device):
opt = Namespace()
opt_dict = vars(opt)
skip = ('-------------- End ----------------', '------------ Options -------------', '\n')
print('Reading', opt_path)
with open(opt_path) as f:
for line in f:
if (line.strip() not in skip):
(key, value) = line.strip().split(': ')
if (value in ('True', 'False')):
opt_dict[key] = bool(value)
elif is_float(value):
opt_dict[key] = float(value)
elif is_number(value):
opt_dict[key] = int(value)
else:
opt_dict[key] = str(value)
opt_dict['which_epoch'] = 'latest'
opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)
opt.model_dir = pjoin(opt.save_root, 'model')
opt.meta_dir = pjoin(opt.save_root, 'meta')
if (opt.dataset_name == 't2m'):
opt.data_root = './dataset/HumanML3D'
opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')
opt.text_dir = pjoin(opt.data_root, 'texts')
opt.joints_num = 22
opt.dim_pose = 263
opt.max_motion_length = 196
elif (opt.dataset_name == 'kit'):
opt.data_root = './dataset/KIT-ML'
opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')
opt.text_dir = pjoin(opt.data_root, 'texts')
opt.joints_num = 21
opt.dim_pose = 251
opt.max_motion_length = 196
else:
raise KeyError('Dataset not recognized')
opt.dim_word = 300
opt.num_classes = (200 // opt.unit_length)
opt.dim_pos_ohot = len(POS_enumerator)
opt.is_train = False
opt.is_continue = False
opt.device = device
return opt
|
def save_json(save_path, data):
with open(save_path, 'w') as file:
json.dump(data, file)
|
def load_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
return data
|
def process(graph):
(entities, relations) = ({}, [])
for i in graph['verbs']:
description = i['description']
pos = 0
flag = 0
(_words, _spans) = ([], [])
for i in description.split():
(tags, verb) = ({}, 0)
if ('[' in i):
_role = i[1:(- 1)]
flag = 1
_spans = [pos]
_words = []
elif (']' in i):
_words.append(i[:(- 1)])
entities[len(entities)] = {'role': _role, 'spans': _spans, 'words': _words}
pos += 1
flag = 0
if (_role != 'V'):
tags[len(entities)] = _role
else:
verb = len(entities)
else:
pos += 1
if flag:
_words.append(i)
_spans.append(pos)
for i in tags:
relations.append((verb, i, tags[i]))
output = {'entities': entities, 'relations': relations}
return output
|
class WordVectorizer(object):
def __init__(self, meta_root, prefix):
vectors = np.load(pjoin(meta_root, ('%s_data.npy' % prefix)))
words = pickle.load(open(pjoin(meta_root, ('%s_words.pkl' % prefix)), 'rb'))
word2idx = pickle.load(open(pjoin(meta_root, ('%s_idx.pkl' % prefix)), 'rb'))
self.word2vec = {w: vectors[word2idx[w]] for w in words}
def _get_pos_ohot(self, pos):
pos_vec = np.zeros(len(POS_enumerator))
if (pos in POS_enumerator):
pos_vec[POS_enumerator[pos]] = 1
else:
pos_vec[POS_enumerator['OTHER']] = 1
return pos_vec
def __len__(self):
return len(self.word2vec)
def __getitem__(self, item):
(word, pos) = item.split('/')
if (word in self.word2vec):
word_vec = self.word2vec[word]
vip_pos = None
for (key, values) in VIP_dict.items():
if (word in values):
vip_pos = key
break
if (vip_pos is not None):
pos_vec = self._get_pos_ohot(vip_pos)
else:
pos_vec = self._get_pos_ohot(pos)
else:
word_vec = self.word2vec['unk']
pos_vec = self._get_pos_ohot('OTHER')
return (word_vec, pos_vec)
|
class FrameSampler():
def __init__(self, sampling='conseq', sampling_step=1, request_frames=None, threshold_reject=0.75, max_len=1000, min_len=10):
self.sampling = sampling
self.sampling_step = sampling_step
self.request_frames = request_frames
self.threshold_reject = threshold_reject
self.max_len = max_len
self.min_len = min_len
def __call__(self, num_frames):
return get_frameix_from_data_index(num_frames, self.request_frames, self.sampling, self.sampling_step)
def accept(self, duration):
if (self.request_frames is None):
if (duration > self.max_len):
return False
elif (duration < self.min_len):
return False
else:
min_number = (self.threshold_reject * self.request_frames)
if (duration < min_number):
return False
return True
def get(self, key, default=None):
return getattr(self, key, default)
def __getitem__(self, key):
return getattr(self, key)
|
def subsample(num_frames, last_framerate, new_framerate):
step = int((last_framerate / new_framerate))
assert (step >= 1)
frames = np.arange(0, num_frames, step)
return frames
|
def upsample(motion, last_framerate, new_framerate):
step = int((new_framerate / last_framerate))
assert (step >= 1)
alpha = np.linspace(0, 1, (step + 1))
last = np.einsum('l,...->l...', (1 - alpha), motion[:(- 1)])
new = np.einsum('l,...->l...', alpha, motion[1:])
chuncks = (last + new)[:(- 1)]
output = np.concatenate(chuncks.swapaxes(1, 0))
output = np.concatenate((output, motion[[(- 1)]]))
return output
|
def get_frameix_from_data_index(num_frames: int, request_frames: Optional[int], sampling: str='conseq', sampling_step: int=1) -> Array:
nframes = num_frames
if (request_frames is None):
frame_ix = np.arange(nframes)
elif (request_frames > nframes):
fair = False
if fair:
choices = np.random.choice(range(nframes), request_frames, replace=True)
frame_ix = sorted(choices)
else:
ntoadd = max(0, (request_frames - nframes))
lastframe = (nframes - 1)
padding = (lastframe * np.ones(ntoadd, dtype=int))
frame_ix = np.concatenate((np.arange(0, nframes), padding))
elif (sampling in ['conseq', 'random_conseq']):
step_max = ((nframes - 1) // (request_frames - 1))
if (sampling == 'conseq'):
if ((sampling_step == (- 1)) or ((sampling_step * (request_frames - 1)) >= nframes)):
step = step_max
else:
step = sampling_step
elif (sampling == 'random_conseq'):
step = random.randint(1, step_max)
lastone = (step * (request_frames - 1))
shift_max = ((nframes - lastone) - 1)
shift = random.randint(0, max(0, (shift_max - 1)))
frame_ix = (shift + np.arange(0, (lastone + 1), step))
elif (sampling == 'random'):
choices = np.random.choice(range(nframes), request_frames, replace=False)
frame_ix = sorted(choices)
else:
raise ValueError('Sampling not recognized.')
return frame_ix
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def all_collate(batch):
notnone_batches = [b for b in batch if (b is not None)]
databatch = [b['motion'] for b in notnone_batches]
if ('lengths' in notnone_batches[0]):
lenbatch = [b['lengths'] for b in notnone_batches]
else:
lenbatch = [len(b['inp'][0][0]) for b in notnone_batches]
databatchTensor = collate_tensors(databatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor, databatchTensor.shape[(- 1)]).unsqueeze(1).unsqueeze(1)
motion = databatchTensor
cond = {'y': {'mask': maskbatchTensor, 'lengths': lenbatchTensor}}
if ('text' in notnone_batches[0]):
textbatch = [b['text'] for b in notnone_batches]
cond['y'].update({'text': textbatch})
if ('action_text' in notnone_batches[0]):
action_text = [b['action_text'] for b in notnone_batches]
cond['y'].update({'action_text': action_text})
return (motion, cond)
|
def mld_collate(batch):
notnone_batches = [b for b in batch if (b is not None)]
notnone_batches.sort(key=(lambda x: x[3]), reverse=True)
adapted_batch = {'motion': collate_tensors([torch.tensor(b[4]).float() for b in notnone_batches]), 'text': [b[2] for b in notnone_batches], 'length': [b[5] for b in notnone_batches], 'word_embs': collate_tensors([torch.tensor(b[0]).float() for b in notnone_batches]), 'pos_ohot': collate_tensors([torch.tensor(b[1]).float() for b in notnone_batches]), 'text_len': collate_tensors([torch.tensor(b[3]) for b in notnone_batches]), 'tokens': [b[6] for b in notnone_batches], 'V': [b[7] for b in notnone_batches], 'entities': [b[8] for b in notnone_batches], 'relations': [b[9] for b in notnone_batches]}
return adapted_batch
|
def a2m_collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
labeltextbatch = [b[3] for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch).unsqueeze(1)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
adapted_batch = {'motion': databatchTensor.permute(0, 3, 2, 1).flatten(start_dim=2), 'action': labelbatchTensor, 'action_text': labeltextbatch, 'mask': maskbatchTensor, 'length': lenbatchTensor}
return adapted_batch
|
def parse_args(self, args=None, namespace=None):
if (args is not None):
return self.parse_args_bak(args=args, namespace=namespace)
try:
idx = sys.argv.index('--')
args = sys.argv[(idx + 1):]
except ValueError as e:
args = []
return self.parse_args_bak(args=args, namespace=namespace)
|
def code_path(path=''):
code_dir = hydra.utils.get_original_cwd()
code_dir = Path(code_dir)
return str((code_dir / path))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.