code
stringlengths
17
6.64M
class TqdmLoggingHandler(logging.Handler): def __init__(self, level=logging.NOTSET): super().__init__(level) def emit(self, record): try: msg = self.format(record) tqdm.tqdm.write(msg) self.flush() except Exception: self.handleError(record)
def generate_id() -> str: run_gen = shortuuid.ShortUUID(alphabet=list('0123456789abcdefghijklmnopqrstuvwxyz')) return run_gen.random(8)
class Transform(): def collate(self, lst_datastruct): from mld.datasets.utils import collate_tensor_with_padding example = lst_datastruct[0] def collate_or_none(key): if (example[key] is None): return None key_lst = [x[key] for x in lst_datastruct] return collate_tensor_with_padding(key_lst) kwargs = {key: collate_or_none(key) for key in example.datakeys} return self.Datastruct(**kwargs)
@dataclass class Datastruct(): def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, value): self.__dict__[key] = value def get(self, key, default=None): return getattr(self, key, default) def __iter__(self): return self.keys() def keys(self): keys = [t.name for t in fields(self)] return iter(keys) def values(self): values = [getattr(self, t.name) for t in fields(self)] return iter(values) def items(self): data = [(t.name, getattr(self, t.name)) for t in fields(self)] return iter(data) def to(self, *args, **kwargs): for key in self.datakeys: if (self[key] is not None): self[key] = self[key].to(*args, **kwargs) return self @property def device(self): return self[self.datakeys[0]].device def detach(self): def detach_or_none(tensor): if (tensor is not None): return tensor.detach() return None kwargs = {key: detach_or_none(self[key]) for key in self.datakeys} return self.transforms.Datastruct(**kwargs)
def main(): data_root = '../datasets/humanml3d' feastures_path = 'in.npy' animation_save_path = 'in.mp4' fps = 20 mean = np.load(pjoin(data_root, 'Mean.npy')) std = np.load(pjoin(data_root, 'Std.npy')) motion = np.load(feastures_path) motion = ((motion * std) + mean) motion_rec = recover_from_ric(torch.tensor(motion), 22).cpu().numpy() motion_rec = (motion_rec * 1.3) plot_3d_motion(animation_save_path, motion_rec, title='input', fps=fps)
class IdentityTransform(Transform): def __init__(self, **kwargs): return def Datastruct(self, **kwargs): return IdentityDatastruct(**kwargs) def __repr__(self): return 'IdentityTransform()'
@dataclass class IdentityDatastruct(Datastruct): transforms: IdentityTransform features: Optional[Tensor] = None def __post_init__(self): self.datakeys = ['features'] def __len__(self): return len(self.rfeats)
class Joints2Jfeats(nn.Module): def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None: if (normalization and (path is None)): raise TypeError('You should provide a path if normalization is on.') super().__init__() self.normalization = normalization self.eps = eps if normalization: mean_path = (Path(path) / 'jfeats_mean.pt') std_path = (Path(path) / 'jfeats_std.pt') self.register_buffer('mean', torch.load(mean_path)) self.register_buffer('std', torch.load(std_path)) def normalize(self, features: Tensor) -> Tensor: if self.normalization: features = ((features - self.mean) / (self.std + self.eps)) return features def unnormalize(self, features: Tensor) -> Tensor: if self.normalization: features = ((features * self.std) + self.mean) return features
class Rots2Joints(nn.Module): def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None: if (normalization and (path is None)): raise TypeError('You should provide a path if normalization is on.') super().__init__() self.normalization = normalization self.eps = eps if normalization: mean_path = (Path(path) / 'mean.pt') std_path = (Path(path) / 'std.pt') self.register_buffer('mean', torch.load(mean_path)) self.register_buffer('std', torch.load(std_path)) def normalize(self, features: Tensor) -> Tensor: if self.normalization: features = ((features - self.mean) / (self.std + self.eps)) return features def unnormalize(self, features: Tensor) -> Tensor: if self.normalization: features = ((features * self.std) + self.mean) return features
class Rots2Rfeats(nn.Module): def __init__(self, path: Optional[str]=None, normalization: bool=False, eps: float=1e-12, **kwargs) -> None: if (normalization and (path is None)): raise TypeError('You should provide a path if normalization is on.') super().__init__() self.normalization = normalization self.eps = eps if normalization: mean_path = (Path(path) / 'rfeats_mean.pt') std_path = (Path(path) / 'rfeats_std.pt') self.register_buffer('mean', torch.load(mean_path)) self.register_buffer('std', torch.load(std_path)) def normalize(self, features: Tensor) -> Tensor: if self.normalization: features = ((features - self.mean) / (self.std + self.eps)) return features def unnormalize(self, features: Tensor) -> Tensor: if self.normalization: features = ((features * self.std) + self.mean) return features
class XYZTransform(Transform): def __init__(self, joints2jfeats: Joints2Jfeats, **kwargs): self.joints2jfeats = joints2jfeats def Datastruct(self, **kwargs): return XYZDatastruct(_joints2jfeats=self.joints2jfeats, transforms=self, **kwargs) def __repr__(self): return 'XYZTransform()'
@dataclass class XYZDatastruct(Datastruct): transforms: XYZTransform _joints2jfeats: Joints2Jfeats features: Optional[Tensor] = None joints_: Optional[Tensor] = None jfeats_: Optional[Tensor] = None def __post_init__(self): self.datakeys = ['features', 'joints_', 'jfeats_'] if ((self.features is not None) and (self.jfeats_ is None)): self.jfeats_ = self.features @property def joints(self): if (self.joints_ is not None): return self.joints_ assert (self.jfeats_ is not None) self._joints2jfeats.to(self.jfeats.device) self.joints_ = self._joints2jfeats.inverse(self.jfeats) return self.joints_ @property def jfeats(self): if (self.jfeats_ is not None): return self.jfeats_ assert (self.joints_ is not None) self._joints2jfeats.to(self.joints.device) self.jfeats_ = self._joints2jfeats(self.joints) return self.jfeats_ def __len__(self): return len(self.jfeats)
def load_example_input(txt_path): file = open(txt_path, 'r') Lines = file.readlines() count = 0 (texts, lens) = ([], []) for line in Lines: count += 1 s = line.strip() s_l = s.split(' ')[0] s_t = s[(len(s_l) + 1):] lens.append(int(s_l)) texts.append(s_t) print('Length-{}: {}'.format(s_l, s_t)) return (texts, lens)
def render_batch(npy_dir, execute_python='./scripts/visualize_motion.sh', mode='sequence'): os.system(f'{execute_python} {npy_dir} {mode}')
def render(execute_python, npy_path, jointtype, cfg_path): export_scripts = 'render.py' os.system(f'{execute_python} --background --python {export_scripts} -- --cfg={cfg_path} --npy={npy_path} --joint_type={jointtype}') fig_path = Path(str(npy_path).replace('.npy', '.png')) return fig_path
def export_fbx_hand(pkl_path): input = pkl_path output = pkl_path.replace('.pkl', '.fbx') execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender' export_scripts = './scripts/fbx_output_smplx.py' os.system(f'{execute_python} -noaudio --background --python {export_scripts} --input {input} --output {output}')
def export_fbx(pkl_path): input = pkl_path output = pkl_path.replace('.pkl', '.fbx') execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender' export_scripts = './scripts/fbx_output.py' os.system(f'{execute_python} -noaudio --background --python {export_scripts} --input {input} --output {output}')
def nfeats_of(rottype): if (rottype in ['rotvec', 'axisangle']): return 3 elif (rottype in ['rotquat', 'quaternion']): return 4 elif (rottype in ['rot6d', '6drot', 'rotation6d']): return 6 elif (rottype in ['rotmat']): return 9 else: return TypeError("This rotation type doesn't have features.")
def axis_angle_to(newtype, rotations): if (newtype in ['matrix']): rotations = geometry.axis_angle_to_matrix(rotations) return rotations elif (newtype in ['rotmat']): rotations = geometry.axis_angle_to_matrix(rotations) rotations = matrix_to('rotmat', rotations) return rotations elif (newtype in ['rot6d', '6drot', 'rotation6d']): rotations = geometry.axis_angle_to_matrix(rotations) rotations = matrix_to('rot6d', rotations) return rotations elif (newtype in ['rotquat', 'quaternion']): rotations = geometry.axis_angle_to_quaternion(rotations) return rotations elif (newtype in ['rotvec', 'axisangle']): return rotations else: raise NotImplementedError
def matrix_to(newtype, rotations): if (newtype in ['matrix']): return rotations if (newtype in ['rotmat']): rotations = rotations.reshape((*rotations.shape[:(- 2)], 9)) return rotations elif (newtype in ['rot6d', '6drot', 'rotation6d']): rotations = geometry.matrix_to_rotation_6d(rotations) return rotations elif (newtype in ['rotquat', 'quaternion']): rotations = geometry.matrix_to_quaternion(rotations) return rotations elif (newtype in ['rotvec', 'axisangle']): rotations = geometry.matrix_to_axis_angle(rotations) return rotations else: raise NotImplementedError
def to_matrix(oldtype, rotations): if (oldtype in ['matrix']): return rotations if (oldtype in ['rotmat']): rotations = rotations.reshape((*rotations.shape[:(- 2)], 3, 3)) return rotations elif (oldtype in ['rot6d', '6drot', 'rotation6d']): rotations = geometry.rotation_6d_to_matrix(rotations) return rotations elif (oldtype in ['rotquat', 'quaternion']): rotations = geometry.quaternion_to_matrix(rotations) return rotations elif (oldtype in ['rotvec', 'axisangle']): rotations = geometry.axis_angle_to_matrix(rotations) return rotations else: raise NotImplementedError
def fixseed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed)
def get_root_idx(joinstype): return root_joints[joinstype]
def create_logger(cfg, phase='train'): root_output_dir = Path(cfg.FOLDER) if (not root_output_dir.exists()): print('=> creating {}'.format(root_output_dir)) root_output_dir.mkdir() cfg_name = cfg.NAME model = cfg.model.model_type cfg_name = os.path.basename(cfg_name).split('.')[0] final_output_dir = ((root_output_dir / model) / cfg_name) cfg.FOLDER_EXP = str(final_output_dir) time_str = time.strftime('%Y-%m-%d-%H-%M-%S') new_dir(cfg, phase, time_str, final_output_dir) head = '%(asctime)-15s %(message)s' logger = config_logger(final_output_dir, time_str, phase, head) if (logger is None): logger = logging.getLogger() logger.setLevel(logging.CRITICAL) logging.basicConfig(format=head) return logger
@rank_zero_only def config_logger(final_output_dir, time_str, phase, head): log_file = '{}_{}_{}.log'.format('log', time_str, phase) final_log_file = (final_output_dir / log_file) logging.basicConfig(filename=str(final_log_file)) logger = logging.getLogger() logger.setLevel(logging.INFO) console = logging.StreamHandler() formatter = logging.Formatter(head) console.setFormatter(formatter) logging.getLogger('').addHandler(console) file_handler = logging.FileHandler(final_log_file, 'w') file_handler.setFormatter(logging.Formatter(head)) file_handler.setLevel(logging.INFO) logging.getLogger('').addHandler(file_handler) return logger
@rank_zero_only def new_dir(cfg, phase, time_str, final_output_dir): cfg.TIME = str(time_str) if (os.path.exists(final_output_dir) and (cfg.TRAIN.RESUME is None) and (not cfg.DEBUG)): file_list = sorted(os.listdir(final_output_dir), reverse=True) for item in file_list: if item.endswith('.log'): os.rename(str(final_output_dir), ((str(final_output_dir) + '_') + cfg.TIME)) break final_output_dir.mkdir(parents=True, exist_ok=True) config_file = '{}_{}_{}.yaml'.format('config', time_str, phase) final_config_file = (final_output_dir / config_file) OmegaConf.save(config=cfg, f=final_config_file)
def to_numpy(tensor): if torch.is_tensor(tensor): return tensor.cpu().numpy() elif (type(tensor).__module__ != 'numpy'): raise ValueError('Cannot convert {} to numpy array'.format(type(tensor))) return tensor
def to_torch(ndarray): if (type(ndarray).__module__ == 'numpy'): return torch.from_numpy(ndarray) elif (not torch.is_tensor(ndarray)): raise ValueError('Cannot convert {} to torch tensor'.format(type(ndarray))) return ndarray
def cleanexit(): import sys import os try: sys.exit(0) except SystemExit: os._exit(0)
def cfg_mean_nsamples_resolution(cfg): if (cfg.mean and (cfg.number_of_samples > 1)): logger.error('All the samples will be the mean.. cfg.number_of_samples=1 will be forced.') cfg.number_of_samples = 1 return (cfg.number_of_samples == 1)
def get_path(sample_path: Path, is_amass: bool, gender: str, split: str, onesample: bool, mean: bool, fact: float): extra_str = (('_mean' if mean else '') if onesample else '_multi') fact_str = ('' if (fact == 1) else f'{fact}_') gender_str = ((gender + '_') if is_amass else '') path = (sample_path / f'{fact_str}{gender_str}{split}{extra_str}') return path
def lengths_to_mask(lengths: List[int], device: torch.device, max_len: int=None) -> Tensor: lengths = torch.tensor(lengths, device=device) max_len = (max_len if max_len else max(lengths)) mask = (torch.arange(max_len, device=device).expand(len(lengths), max_len) < lengths.unsqueeze(1)) return mask
def detach_to_numpy(tensor): return tensor.detach().cpu().numpy()
def remove_padding(tensors, lengths): return [tensor[:tensor_length] for (tensor, tensor_length) in zip(tensors, lengths)]
def nfeats_of(rottype): if (rottype in ['rotvec', 'axisangle']): return 3 elif (rottype in ['rotquat', 'quaternion']): return 4 elif (rottype in ['rot6d', '6drot', 'rotation6d']): return 6 elif (rottype in ['rotmat']): return 9 else: return TypeError("This rotation type doesn't have features.")
def axis_angle_to(newtype, rotations): if (newtype in ['matrix']): rotations = geometry.axis_angle_to_matrix(rotations) return rotations elif (newtype in ['rotmat']): rotations = geometry.axis_angle_to_matrix(rotations) rotations = matrix_to('rotmat', rotations) return rotations elif (newtype in ['rot6d', '6drot', 'rotation6d']): rotations = geometry.axis_angle_to_matrix(rotations) rotations = matrix_to('rot6d', rotations) return rotations elif (newtype in ['rotquat', 'quaternion']): rotations = geometry.axis_angle_to_quaternion(rotations) return rotations elif (newtype in ['rotvec', 'axisangle']): return rotations else: raise NotImplementedError
def matrix_to(newtype, rotations): if (newtype in ['matrix']): return rotations if (newtype in ['rotmat']): rotations = rotations.reshape((*rotations.shape[:(- 2)], 9)) return rotations elif (newtype in ['rot6d', '6drot', 'rotation6d']): rotations = geometry.matrix_to_rotation_6d(rotations) return rotations elif (newtype in ['rotquat', 'quaternion']): rotations = geometry.matrix_to_quaternion(rotations) return rotations elif (newtype in ['rotvec', 'axisangle']): rotations = geometry.matrix_to_axis_angle(rotations) return rotations else: raise NotImplementedError
def to_matrix(oldtype, rotations): if (oldtype in ['matrix']): return rotations if (oldtype in ['rotmat']): rotations = rotations.reshape((*rotations.shape[:(- 2)], 3, 3)) return rotations elif (oldtype in ['rot6d', '6drot', 'rotation6d']): rotations = geometry.rotation_6d_to_matrix(rotations) return rotations elif (oldtype in ['rotquat', 'quaternion']): rotations = geometry.quaternion_to_matrix(rotations) return rotations elif (oldtype in ['rotvec', 'axisangle']): rotations = geometry.axis_angle_to_matrix(rotations) return rotations else: raise NotImplementedError
def subsample(num_frames, last_framerate, new_framerate): step = int((last_framerate / new_framerate)) assert (step >= 1) frames = np.arange(0, num_frames, step) return frames
def upsample(motion, last_framerate, new_framerate): step = int((new_framerate / last_framerate)) assert (step >= 1) alpha = np.linspace(0, 1, (step + 1)) last = np.einsum('l,...->l...', (1 - alpha), motion[:(- 1)]) new = np.einsum('l,...->l...', alpha, motion[1:]) chuncks = (last + new)[:(- 1)] output = np.concatenate(chuncks.swapaxes(1, 0)) output = np.concatenate((output, motion[[(- 1)]])) return output
def lengths_to_mask(lengths): max_len = max(lengths) mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1)) return mask
def collate_tensors(batch): dims = batch[0].dim() max_size = [max([b.size(i) for b in batch]) for i in range(dims)] size = ((len(batch),) + tuple(max_size)) canvas = batch[0].new_zeros(size=size) for (i, b) in enumerate(batch): sub_tensor = canvas[i] for d in range(dims): sub_tensor = sub_tensor.narrow(d, 0, b.size(d)) sub_tensor.add_(b) return canvas
def collate(batch): databatch = [b[0] for b in batch] labelbatch = [b[1] for b in batch] lenbatch = [len(b[0][0][0]) for b in batch] databatchTensor = collate_tensors(databatch) labelbatchTensor = torch.as_tensor(labelbatch) lenbatchTensor = torch.as_tensor(lenbatch) maskbatchTensor = lengths_to_mask(lenbatchTensor) batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor} return batch
def collate_data3d_slow(batch): batchTensor = {} for key in batch[0].keys(): databatch = [b[key] for b in batch] batchTensor[key] = collate_tensors(databatch) batch = batchTensor return batch
def collate_data3d(batch): batchTensor = {} for key in batch[0].keys(): databatch = [b[key] for b in batch] if (key == 'paths'): batchTensor[key] = databatch else: batchTensor[key] = torch.stack(databatch, axis=0) batch = batchTensor return batch
def remove_chumpy_dep(dico): output_dict = {} for (key, val) in dico.items(): if ('chumpy' in str(type(val))): output_dict[key] = np.array(val) else: output_dict[key] = val return output_dict
def load_and_remove_chumpy_dep(path): with open(path, 'rb') as pkl_file: import warnings warnings.filterwarnings('ignore', category=DeprecationWarning) data = pickle.load(pkl_file, encoding='latin1') data = remove_chumpy_dep(data) return data
def load_npz_into_dict(path): data = {key: val for (key, val) in np.load(smplh_fn).items()} data = remove_chumpy_dep(data) return data
def load_and_clean_data(path): ext = os.path.splitext(path)[(- 1)] if (ext == '.npz'): data = load_npz_into_dict(path) elif (ext == '.pkl'): data = load_and_remove_chumpy_dep(path) else: raise TypeError('The format should be pkl or npz') return data
def merge_models(smplh_fn, mano_left_fn, mano_right_fn, output_folder='output'): body_data = load_and_clean_data(smplh_fn) lhand_data = load_and_clean_data(mano_left_fn) rhand_data = load_and_clean_data(mano_right_fn) modelname = osp.split(smplh_fn)[1] parent_folder = osp.split(osp.split(smplh_fn)[0])[1] if ('female' in ((parent_folder + '_') + modelname.lower())): out_fn = 'SMPLH_FEMALE.npz' elif ('male' in ((parent_folder + '_') + modelname.lower())): out_fn = 'SMPLH_MALE.npz' elif ('neutral' in ((parent_folder + '_') + modelname.lower())): out_fn = 'SMPLH_NEUTRAL.npz' else: out_fn = modelname output_data = body_data.copy() output_data['hands_componentsl'] = lhand_data['hands_components'] output_data['hands_componentsr'] = rhand_data['hands_components'] output_data['hands_coeffsl'] = lhand_data['hands_coeffs'] output_data['hands_coeffsr'] = rhand_data['hands_coeffs'] output_data['hands_meanl'] = lhand_data['hands_mean'] output_data['hands_meanr'] = rhand_data['hands_mean'] output_data = remove_chumpy_dep(output_data) out_path = osp.join(output_folder, out_fn) print('Saving to {}'.format(out_path)) np.savez_compressed(out_path, **output_data)
def save_json(save_path, data): with open(save_path, 'w') as file: json.dump(data, file)
def load_json(file_path): with open(file_path, 'r') as file: data = json.load(file) return data
def process(graph): (V, entities, relations) = ({}, {}, []) for i in graph['verbs']: description = i['description'] pos = 0 flag = 0 (_words, _spans) = ([], []) (tags, verb) = ({}, 0) for i in description.split(): if ('[' in i): _role = i[1:(- 1)] flag = 1 _spans = [pos] _words = [] elif (']' in i): _words.append(i[:(- 1)]) pos += 1 flag = 0 if (_role == 'V'): V[len(V)] = {'role': _role, 'spans': _spans, 'words': _words} verb = (len(V) - 1) else: entities[len(entities)] = {'role': _role, 'spans': _spans, 'words': _words} tags[(len(entities) - 1)] = _role else: pos += 1 if flag: _words.append(i) _spans.append(pos) for i in tags: relations.append((verb, i, tags[i])) output = {'V': V, 'entities': entities, 'relations': relations} return output
def extend_paths(path, keyids, *, onesample=True, number_of_samples=1): if (not onesample): template_path = str((path / 'KEYID_INDEX.npy')) paths = [template_path.replace('INDEX', str(index)) for i in range(number_of_samples)] else: paths = [str((path / 'KEYID.npy'))] all_paths = [] for path in paths: all_paths.extend([path.replace('KEYID', keyid) for keyid in keyids]) return all_paths
def render_cli() -> None: cfg = parse_args(phase='render') cfg.FOLDER = cfg.RENDER.FOLDER if (cfg.RENDER.INPUT_MODE.lower() == 'npy'): output_dir = Path(os.path.dirname(cfg.RENDER.NPY)) paths = [cfg.RENDER.NPY] elif (cfg.RENDER.INPUT_MODE.lower() == 'dir'): output_dir = Path(cfg.RENDER.DIR) paths = [] file_list = natsort.natsorted(os.listdir(cfg.RENDER.DIR)) begin_id = random.randrange(0, len(file_list)) file_list = (file_list[begin_id:] + file_list[:begin_id]) for item in file_list: if item.endswith('_mesh.npy'): paths.append(os.path.join(cfg.RENDER.DIR, item)) for item in file_list: if (item.endswith('.npy') and (not item.endswith('_mesh.npy'))): paths.append(os.path.join(cfg.RENDER.DIR, item)) print(f'begin to render for {paths[0]}') import numpy as np from GraphMotion.render.blender import render from GraphMotion.render.blender.tools import mesh_detect from GraphMotion.render.video import Video init = True for path in paths: if (cfg.RENDER.MODE == 'video'): if (os.path.exists(path.replace('.npy', '.mp4')) or os.path.exists(path.replace('.npy', '_frames'))): print(f'npy is rendered or under rendering {path}') continue elif os.path.exists(path.replace('.npy', '.png')): print(f'npy is rendered or under rendering {path}') continue if (cfg.RENDER.MODE == 'video'): frames_folder = os.path.join(output_dir, path.replace('.npy', '_frames').split('/')[(- 1)]) os.makedirs(frames_folder, exist_ok=True) else: frames_folder = os.path.join(output_dir, path.replace('.npy', '.png').split('/')[(- 1)]) try: data = np.load(path) if (cfg.RENDER.JOINT_TYPE.lower() == 'humanml3d'): is_mesh = mesh_detect(data) if (not is_mesh): data = (data * smplh_to_mmm_scaling_factor) except FileNotFoundError: print(f'{path} not found') continue if (cfg.RENDER.MODE == 'video'): frames_folder = os.path.join(output_dir, path.replace('.npy', '_frames').split('/')[(- 1)]) else: frames_folder = os.path.join(output_dir, path.replace('.npy', '.png').split('/')[(- 1)]) out = render(data, frames_folder, canonicalize=cfg.RENDER.CANONICALIZE, exact_frame=cfg.RENDER.EXACT_FRAME, num=cfg.RENDER.NUM, mode=cfg.RENDER.MODE, faces_path=cfg.RENDER.FACES_PATH, downsample=cfg.RENDER.DOWNSAMPLE, always_on_floor=cfg.RENDER.ALWAYS_ON_FLOOR, oldrender=cfg.RENDER.OLDRENDER, jointstype=cfg.RENDER.JOINT_TYPE.lower(), res=cfg.RENDER.RES, init=init, gt=cfg.RENDER.GT, accelerator=cfg.ACCELERATOR, device=cfg.DEVICE) init = False if (cfg.RENDER.MODE == 'video'): if cfg.RENDER.DOWNSAMPLE: video = Video(frames_folder, fps=cfg.RENDER.FPS) else: video = Video(frames_folder, fps=cfg.RENDER.FPS) vid_path = frames_folder.replace('_frames', '.mp4') video.save(out_path=vid_path) shutil.rmtree(frames_folder) print(f'remove tmp fig folder and save video in {vid_path}') else: print(f'Frame generated at: {out}')
def Rodrigues(rotvec): theta = np.linalg.norm(rotvec) r = ((rotvec / theta).reshape(3, 1) if (theta > 0.0) else rotvec) cost = np.cos(theta) mat = np.asarray([[0, (- r[2]), r[1]], [r[2], 0, (- r[0])], [(- r[1]), r[0], 0]]) return (((cost * np.eye(3)) + ((1 - cost) * r.dot(r.T))) + (np.sin(theta) * mat))
def setup_scene(model_path, fps_target): scene = bpy.data.scenes['Scene'] scene.render.fps = fps_target if ('Cube' in bpy.data.objects): bpy.data.objects['Cube'].select_set(True) bpy.ops.object.delete() bpy.ops.import_scene.fbx(filepath=model_path)
def process_pose(current_frame, pose, trans, pelvis_position): if (pose.shape[0] == 72): rod_rots = pose.reshape(24, 3) else: rod_rots = pose.reshape(26, 3) mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots] armature = bpy.data.objects['Armature'] bones = armature.pose.bones bones[bone_name_from_index[0]].location = (Vector(((100 * trans[1]), (100 * trans[2]), (100 * trans[0]))) - pelvis_position) bones[bone_name_from_index[0]].keyframe_insert('location', frame=current_frame) for (index, mat_rot) in enumerate(mat_rots, 0): if (index >= 24): continue bone = bones[bone_name_from_index[index]] bone_rotation = Matrix(mat_rot).to_quaternion() quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians((- 90))) quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians((- 90))) if (index == 0): bone.rotation_quaternion = ((quat_x_90_cw @ quat_z_90_cw) @ bone_rotation) else: bone.rotation_quaternion = bone_rotation bone.keyframe_insert('rotation_quaternion', frame=current_frame) return
def process_poses(input_path, gender, fps_source, fps_target, start_origin, person_id=1): print(('Processing: ' + input_path)) data = joblib.load(input_path) person_id = list(data.keys())[0] poses = data[person_id]['pose'] if ('trans' not in data[person_id].keys()): trans = np.zeros((poses.shape[0], 3)) else: trans = data[person_id]['trans'] if (gender == 'female'): model_path = female_model_path for (k, v) in bone_name_from_index.items(): bone_name_from_index[k] = ('f_avg_' + v) elif (gender == 'male'): model_path = male_model_path for (k, v) in bone_name_from_index.items(): bone_name_from_index[k] = ('m_avg_' + v) else: print(('ERROR: Unsupported gender: ' + gender)) sys.exit(1) if (fps_target > fps_source): fps_target = fps_source print(f'Gender: {gender}') print(f'Number of source poses: {str(poses.shape[0])}') print(f'Source frames-per-second: {str(fps_source)}') print(f'Target frames-per-second: {str(fps_target)}') print('--------------------------------------------------') setup_scene(model_path, fps_target) scene = bpy.data.scenes['Scene'] sample_rate = int((fps_source / fps_target)) scene.frame_end = int((poses.shape[0] / sample_rate)) bpy.ops.object.mode_set(mode='EDIT') pelvis_position = Vector(bpy.data.armatures[0].edit_bones[bone_name_from_index[0]].head) bpy.ops.object.mode_set(mode='OBJECT') source_index = 0 frame = 1 offset = np.array([0.0, 0.0, 0.0]) while (source_index < poses.shape[0]): print(('Adding pose: ' + str(source_index))) if start_origin: if (source_index == 0): offset = np.array([trans[source_index][0], trans[source_index][1], 0]) scene.frame_set(frame) process_pose(frame, poses[source_index], (trans[source_index] - offset), pelvis_position) source_index += sample_rate frame += 1 return frame
def export_animated_mesh(output_path): output_dir = os.path.dirname(output_path) if (not os.path.isdir(output_dir)): os.makedirs(output_dir, exist_ok=True) bpy.ops.object.select_all(action='DESELECT') bpy.data.objects['Armature'].select_set(True) bpy.data.objects['Armature'].children[0].select_set(True) if output_path.endswith('.glb'): print('Exporting to glTF binary (.glb)') bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=True, export_morph=False) elif output_path.endswith('.fbx'): print('Exporting to FBX binary (.fbx)') bpy.ops.export_scene.fbx(filepath=output_path, use_selection=True, add_leaf_bones=False) else: print(('ERROR: Unsupported export format: ' + output_path)) sys.exit(1) return
def Rodrigues(rotvec): theta = np.linalg.norm(rotvec) r = ((rotvec / theta).reshape(3, 1) if (theta > 0.0) else rotvec) cost = np.cos(theta) mat = np.asarray([[0, (- r[2]), r[1]], [r[2], 0, (- r[0])], [(- r[1]), r[0], 0]]) return (((cost * np.eye(3)) + ((1 - cost) * r.dot(r.T))) + (np.sin(theta) * mat))
def setup_scene(model_path, fps_target): scene = bpy.data.scenes['Scene'] scene.render.fps = fps_target if ('Cube' in bpy.data.objects): bpy.data.objects['Cube'].select_set(True) bpy.ops.object.delete() bpy.ops.import_scene.fbx(filepath=model_path)
def process_pose(current_frame, pose, lhandpose, rhandpose, trans, pelvis_position): rod_rots = pose.reshape(24, 4) lhrod_rots = lhandpose.reshape(15, 4) rhrod_rots = rhandpose.reshape(15, 4) armature = bpy.data.objects[ROOT_NAME] bones = armature.pose.bones bones[BODY_JOINT_NAMES[0]].location = (Vector(((100 * trans[1]), (100 * trans[2]), (100 * trans[0]))) - pelvis_position) bones[BODY_JOINT_NAMES[0]].keyframe_insert('location', frame=current_frame) for (index, mat_rot) in enumerate(rod_rots, 0): if (index >= 24): continue bone = bones[BODY_JOINT_NAMES[index]] bone_rotation = Quaternion(mat_rot) quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians((- 90))) quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians((- 90))) if (index == 0): bone.rotation_quaternion = ((quat_x_90_cw @ quat_z_90_cw) @ bone_rotation) else: bone.rotation_quaternion = bone_rotation bone.keyframe_insert('rotation_quaternion', frame=current_frame) for (index, mat_rot) in enumerate(lhrod_rots, 0): if (index >= 15): continue bone = bones[LHAND_JOINT_NAMES[index]] bone_rotation = Quaternion(mat_rot) bone.rotation_quaternion = bone_rotation bone.keyframe_insert('rotation_quaternion', frame=current_frame) for (index, mat_rot) in enumerate(rhrod_rots, 0): if (index >= 15): continue bone = bones[RHAND_JOINT_NAMES[index]] bone_rotation = Quaternion(mat_rot) bone.rotation_quaternion = bone_rotation bone.keyframe_insert('rotation_quaternion', frame=current_frame) return
def process_poses(input_path, gender, fps_source, fps_target, start_origin, person_id=1): print(('Processing: ' + input_path)) smpl_params = joblib.load(input_path) (poses, lhposes, rhposes) = ([], [], []) for iframe in smpl_params.keys(): poses.append(smpl_params[iframe]['rot']) lhposes.append(smpl_params[iframe]['hand_quaternions'][4:64].copy().reshape((- 1), 4)) rhposes.append(smpl_params[iframe]['hand_quaternions'][68:128].copy().reshape((- 1), 4)) poses = np.vstack(poses) lhposes = np.stack(lhposes) rhposes = np.stack(rhposes) trans = np.zeros((poses.shape[0], 3)) model_path = neural_smplx_path if (fps_target > fps_source): fps_target = fps_source print(f'Gender: {gender}') print(f'Number of source poses: {str(poses.shape[0])}') print(f'Source frames-per-second: {str(fps_source)}') print(f'Target frames-per-second: {str(fps_target)}') print('--------------------------------------------------') setup_scene(model_path, fps_target) scene = bpy.data.scenes['Scene'] sample_rate = int((fps_source / fps_target)) scene.frame_end = int((poses.shape[0] / sample_rate)) bpy.ops.object.mode_set(mode='EDIT') pelvis_position = Vector(bpy.data.armatures[0].edit_bones[BODY_JOINT_NAMES[0]].head) bpy.ops.object.mode_set(mode='OBJECT') source_index = 0 frame = 1 offset = np.array([0.0, 0.0, 0.0]) while (source_index < poses.shape[0]): if start_origin: if (source_index == 0): offset = np.array([trans[source_index][0], trans[source_index][1], 0]) scene.frame_set(frame) process_pose(frame, poses[source_index], lhposes[source_index], rhposes[source_index], (trans[source_index] - offset), pelvis_position) source_index += sample_rate frame += 1 return frame
def export_animated_mesh(output_path): output_dir = os.path.dirname(output_path) if (not os.path.isdir(output_dir)): os.makedirs(output_dir, exist_ok=True) bpy.ops.object.select_all(action='DESELECT') bpy.data.objects[ROOT_NAME].select_set(True) bpy.data.objects[ROOT_NAME].children[0].select_set(True) if output_path.endswith('.glb'): print('Exporting to glTF binary (.glb)') bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=True, export_morph=False) elif output_path.endswith('.fbx'): print('Exporting to FBX binary (.fbx)') bpy.ops.export_scene.fbx(filepath=output_path, use_selection=True, add_leaf_bones=False) else: print(('ERROR: Unsupported export format: ' + output_path)) sys.exit(1) return
def print_table(title, metrics): table = Table(title=title) table.add_column('Metrics', style='cyan', no_wrap=True) table.add_column('Value', style='magenta') for (key, value) in metrics.items(): table.add_row(key, str(value)) console = get_console() console.print(table, justify='center')
def get_metric_statistics(values, replication_times): mean = np.mean(values, axis=0) std = np.std(values, axis=0) conf_interval = ((1.96 * std) / np.sqrt(replication_times)) return (mean, conf_interval)
def main(): cfg = parse_args(phase='test') cfg.FOLDER = cfg.TEST.FOLDER logger = create_logger(cfg, phase='test') output_dir = Path(os.path.join(cfg.FOLDER, str(cfg.model.model_type), str(cfg.NAME), ('samples_' + cfg.TIME))) output_dir.mkdir(parents=True, exist_ok=True) logger.info(OmegaConf.to_yaml(cfg)) pl.seed_everything(cfg.SEED_VALUE) if (cfg.ACCELERATOR == 'gpu'): os.environ['PYTHONWARNINGS'] = 'ignore' os.environ['TOKENIZERS_PARALLELISM'] = 'false' datasets = get_datasets(cfg, logger=logger, phase='test')[0] logger.info('datasets module {} initialized'.format(''.join(cfg.TRAIN.DATASETS))) model = get_model(cfg, datasets) logger.info('model {} loaded'.format(cfg.model.model_type)) metric_monitor = {'Train_jf': 'recons/text2jfeats/train', 'Val_jf': 'recons/text2jfeats/val', 'Train_rf': 'recons/text2rfeats/train', 'Val_rf': 'recons/text2rfeats/val', 'APE root': 'Metrics/APE_root', 'APE mean pose': 'Metrics/APE_mean_pose', 'AVE root': 'Metrics/AVE_root', 'AVE mean pose': 'Metrics/AVE_mean_pose'} callbacks = [pl.callbacks.RichProgressBar(), ProgressLogger(metric_monitor=metric_monitor)] logger.info('Callbacks initialized') trainer = pl.Trainer(benchmark=False, max_epochs=cfg.TRAIN.END_EPOCH, accelerator=cfg.ACCELERATOR, devices=list(range(len(cfg.DEVICE))), default_root_dir=cfg.FOLDER_EXP, reload_dataloaders_every_n_epochs=1, log_every_n_steps=cfg.LOGGER.LOG_EVERY_STEPS, deterministic=False, detect_anomaly=False, enable_progress_bar=True, logger=None, callbacks=callbacks) logger.info('Loading checkpoints from {}'.format(cfg.TEST.CHECKPOINTS)) state_dict = torch.load(cfg.TEST.CHECKPOINTS, map_location='cpu')['state_dict'] model.load_state_dict(state_dict) macs_lst = [] params_lst = [] flops_lst = [] for (i, batch) in enumerate(datasets.test_dataloader()): print('batch size', len(batch['text'])) (macs, params) = profile(model, (batch,)) print('macs', (macs / 1000000000.0), 'G') return macs_lst.append(macs) params_lst.append(params) if (len(flops_lst) == 1): break print(macs_lst) print(params_lst) print((np.mean(macs_lst) / 1000000000.0)) print((np.mean(params_lst) / 1000000.0))
def main(): parser = ArgumentParser() group = parser.add_argument_group('Params') group.add_argument('--ply_dir', type=str, required=True, help='ply set') group.add_argument('--out_dir', type=str, required=True, help='output folder') params = parser.parse_args() plys2npy(params.ply_dir, params.out_dir)
def plys2npy(ply_dir, out_dir): ply_dir = Path(ply_dir) paths = [] file_list = natsort.natsorted(os.listdir(ply_dir)) for item in file_list: if (item.endswith('.ply') and (not item.endswith('_gt.ply'))): paths.append(os.path.join(ply_dir, item)) meshs = np.zeros((len(paths), 6890, 3)) for (i, path) in enumerate(paths): mesh = trimesh.load_mesh(path, process=False) vs = mesh.vertices assert (vs.shape == (6890, 3)) meshs[i] = vs basename = os.path.basename(ply_dir) if basename.startswith('SMPLFit_'): basename = basename[len('SMPLFit_'):] file_name = os.path.join(out_dir, (basename + '_mesh.npy')) np.save(file_name, meshs)
def print_table(title, metrics): table = Table(title=title) table.add_column('Metrics', style='cyan', no_wrap=True) table.add_column('Value', style='magenta') for (key, value) in metrics.items(): table.add_row(key, str(value)) console = get_console() console.print(table, justify='center')
def get_metric_statistics(values, replication_times): mean = np.mean(values, axis=0) std = np.std(values, axis=0) conf_interval = ((1.96 * std) / np.sqrt(replication_times)) return (mean, conf_interval)
def main(): cfg = parse_args(phase='test') cfg.FOLDER = cfg.TEST.FOLDER logger = create_logger(cfg, phase='test') output_dir = Path(os.path.join(cfg.FOLDER, str(cfg.model.model_type), str(cfg.NAME), ('samples_' + cfg.TIME))) output_dir.mkdir(parents=True, exist_ok=True) logger.info(OmegaConf.to_yaml(cfg)) pl.seed_everything(cfg.SEED_VALUE) if (cfg.ACCELERATOR == 'gpu'): os.environ['PYTHONWARNINGS'] = 'ignore' os.environ['TOKENIZERS_PARALLELISM'] = 'false' datasets = get_datasets(cfg, logger=logger, phase='test')[0] logger.info('datasets module {} initialized'.format(''.join(cfg.TRAIN.DATASETS))) model = get_model(cfg, datasets) logger.info('model {} loaded'.format(cfg.model.model_type)) metric_monitor = {'Train_jf': 'recons/text2jfeats/train', 'Val_jf': 'recons/text2jfeats/val', 'Train_rf': 'recons/text2rfeats/train', 'Val_rf': 'recons/text2rfeats/val', 'APE root': 'Metrics/APE_root', 'APE mean pose': 'Metrics/APE_mean_pose', 'AVE root': 'Metrics/AVE_root', 'AVE mean pose': 'Metrics/AVE_mean_pose'} callbacks = [pl.callbacks.RichProgressBar(), ProgressLogger(metric_monitor=metric_monitor)] logger.info('Callbacks initialized') trainer = pl.Trainer(benchmark=False, max_epochs=cfg.TRAIN.END_EPOCH, accelerator=cfg.ACCELERATOR, devices=list(range(len(cfg.DEVICE))), default_root_dir=cfg.FOLDER_EXP, reload_dataloaders_every_n_epochs=1, log_every_n_steps=cfg.LOGGER.LOG_EVERY_STEPS, deterministic=False, detect_anomaly=False, enable_progress_bar=True, logger=None, callbacks=callbacks) logger.info('Loading checkpoints from {}'.format(cfg.TEST.CHECKPOINTS)) state_dict = torch.load(cfg.TEST.CHECKPOINTS, map_location='cpu')['state_dict'] model.load_state_dict(state_dict) all_metrics = {} replication_times = cfg.TEST.REPLICATION_TIMES for i in range(replication_times): metrics_type = ', '.join(cfg.METRIC.TYPE) logger.info(f'Evaluating {metrics_type} - Replication {i}') metrics = trainer.test(model, datamodule=datasets)[0] if ('TM2TMetrics' in metrics_type): logger.info(f'Evaluating MultiModality - Replication {i}') datasets.mm_mode(True) mm_metrics = trainer.test(model, datamodule=datasets)[0] metrics.update(mm_metrics) datasets.mm_mode(False) for (key, item) in metrics.items(): if (key not in all_metrics): all_metrics[key] = [item] else: all_metrics[key] += [item] all_metrics_new = {} for (key, item) in all_metrics.items(): (mean, conf_interval) = get_metric_statistics(np.array(item), replication_times) all_metrics_new[(key + '/mean')] = mean all_metrics_new[(key + '/conf_interval')] = conf_interval print_table(f'Mean Metrics', all_metrics_new) all_metrics_new.update(all_metrics) metric_file = (output_dir.parent / f'metrics_{cfg.TIME}.json') with open(metric_file, 'w', encoding='utf-8') as f: json.dump(all_metrics_new, f, indent=4) logger.info(f'Testing done, the metrics are saved to {str(metric_file)}')
def main(): cfg = parse_args() logger = create_logger(cfg, phase='train') if cfg.TRAIN.RESUME: resume = cfg.TRAIN.RESUME backcfg = cfg.TRAIN.copy() if os.path.exists(resume): file_list = sorted(os.listdir(resume), reverse=True) for item in file_list: if item.endswith('.yaml'): cfg = OmegaConf.load(os.path.join(resume, item)) cfg.TRAIN = backcfg break checkpoints = sorted(os.listdir(os.path.join(resume, 'checkpoints')), key=(lambda x: int(x[6:(- 5)])), reverse=True) for checkpoint in checkpoints: if ('epoch=' in checkpoint): cfg.TRAIN.PRETRAINED = os.path.join(resume, 'checkpoints', checkpoint) break wandb_list = sorted(os.listdir(os.path.join(resume, 'wandb')), reverse=True) for item in wandb_list: if ('run-' in item): cfg.LOGGER.WANDB.RESUME_ID = item.split('-')[(- 1)] else: raise ValueError('Resume path is not right.') pl.seed_everything(cfg.SEED_VALUE) if (cfg.ACCELERATOR == 'gpu'): os.environ['PYTHONWARNINGS'] = 'ignore' os.environ['TOKENIZERS_PARALLELISM'] = 'false' loggers = [] if cfg.LOGGER.WANDB.PROJECT: wandb_logger = pl_loggers.WandbLogger(project=cfg.LOGGER.WANDB.PROJECT, offline=cfg.LOGGER.WANDB.OFFLINE, id=cfg.LOGGER.WANDB.RESUME_ID, save_dir=cfg.FOLDER_EXP, version='', name=cfg.NAME, anonymous=False, log_model=False) loggers.append(wandb_logger) if cfg.LOGGER.TENSORBOARD: tb_logger = pl_loggers.TensorBoardLogger(save_dir=cfg.FOLDER_EXP, sub_dir='tensorboard', version='', name='') loggers.append(tb_logger) logger.info(OmegaConf.to_yaml(cfg)) datasets = get_datasets(cfg, logger=logger) logger.info('datasets module {} initialized'.format(''.join(cfg.TRAIN.DATASETS))) model = get_model(cfg, datasets[0]) logger.info('model {} loaded'.format(cfg.model.model_type)) metric_monitor = {'Train_jf': 'recons/text2jfeats/train', 'Val_jf': 'recons/text2jfeats/val', 'Train_rf': 'recons/text2rfeats/train', 'Val_rf': 'recons/text2rfeats/val', 'APE root': 'Metrics/APE_root', 'APE mean pose': 'Metrics/APE_mean_pose', 'AVE root': 'Metrics/AVE_root', 'AVE mean pose': 'Metrics/AVE_mean_pose', 'R_TOP_1': 'Metrics/R_precision_top_1', 'R_TOP_2': 'Metrics/R_precision_top_2', 'R_TOP_3': 'Metrics/R_precision_top_3', '1_R_TOP_1': 'Metrics/s1_R_precision_top_1', '1_R_TOP_2': 'Metrics/s1_R_precision_top_2', '1_R_TOP_3': 'Metrics/s1_R_precision_top_3', '2_R_TOP_1': 'Metrics/s2_R_precision_top_1', '2_R_TOP_2': 'Metrics/s2_R_precision_top_2', '2_R_TOP_3': 'Metrics/s2_R_precision_top_3', '3_R_TOP_1': 'Metrics/s3_R_precision_top_1', '3_R_TOP_2': 'Metrics/s3_R_precision_top_2', '3_R_TOP_3': 'Metrics/s3_R_precision_top_3', 'gt_R_TOP_1': 'Metrics/gt_R_precision_top_1', 'gt_R_TOP_2': 'Metrics/gt_R_precision_top_2', 'gt_R_TOP_3': 'Metrics/gt_R_precision_top_3', 'FID': 'Metrics/FID', '1_FID': 'Metrics/1_FID', '2_FID': 'Metrics/2_FID', '3_FID': 'Metrics/3_FID', 'gt_FID': 'Metrics/gt_FID', 'Diversity': 'Metrics/Diversity', '1_Diversity': 'Metrics/1_Diversity', '2_Diversity': 'Metrics/2_Diversity', '3_Diversity': 'Metrics/3_Diversity', 'gt_Diversity': 'Metrics/gt_Diversity', 'MM dist': 'Metrics/Matching_score', '1_MM dist': 'Metrics/s1_Matching_score', '2_MM dist': 'Metrics/s2_Matching_score', '3_MM dist': 'Metrics/s3_Matching_score', 'MultiModality': 'Metrics/MultiModality', '1_MultiModality': 'Metrics/s1_MultiModality', '2_MultiModality': 'Metrics/s2_MultiModality', '3_MultiModality': 'Metrics/s3_MultiModality', 'Accuracy': 'Metrics/accuracy', 'gt_Accuracy': 'Metrics/gt_accuracy'} callbacks = [pl.callbacks.RichProgressBar(), ProgressLogger(metric_monitor=metric_monitor), ModelCheckpoint(dirpath=os.path.join(cfg.FOLDER_EXP, 'checkpoints'), filename='{epoch}', monitor='step', mode='max', every_n_epochs=cfg.LOGGER.SACE_CHECKPOINT_EPOCH, save_top_k=(- 1), save_last=False, save_on_train_epoch_end=True)] logger.info('Callbacks initialized') if (len(cfg.DEVICE) > 1): ddp_strategy = DDPStrategy(find_unused_parameters=True) else: ddp_strategy = 'auto' trainer = pl.Trainer(benchmark=False, max_epochs=cfg.TRAIN.END_EPOCH, accelerator=cfg.ACCELERATOR, devices=cfg.DEVICE, strategy=ddp_strategy, default_root_dir=cfg.FOLDER_EXP, log_every_n_steps=cfg.LOGGER.VAL_EVERY_STEPS, deterministic=False, detect_anomaly=False, enable_progress_bar=True, logger=loggers, callbacks=callbacks, check_val_every_n_epoch=cfg.LOGGER.VAL_EVERY_STEPS) logger.info('Trainer initialized') vae_type = cfg.model.motion_vae.target.split('.')[(- 1)].lower().replace('vae', '') if cfg.TRAIN.PRETRAINED_VAE: logger.info('Loading pretrain vae from {}'.format(cfg.TRAIN.PRETRAINED_VAE)) state_dict = torch.load(cfg.TRAIN.PRETRAINED_VAE, map_location='cpu')['state_dict'] from collections import OrderedDict if (vae_type in ['actor']): (encoder_dict, decoder_dict) = (OrderedDict(), OrderedDict()) for (k, v) in state_dict.items(): if (k.split('.')[0] == 'motion_encoder'): name = k.replace('motion_encoder.', '') encoder_dict[name] = v elif (k.split('.')[0] == 'motion_decoder'): name = k.replace('motion_decoder.', '') decoder_dict[name] = v model.motion_encoder.load_state_dict(encoder_dict, strict=True) model.motion_decoder.load_state_dict(decoder_dict, strict=True) elif (vae_type in ['GraphMotion']): vae_dict = OrderedDict() for (k, v) in state_dict.items(): if (k.split('.')[0] == 'vae'): name = k.replace('vae.', '') vae_dict[name] = v model.vae.load_state_dict(vae_dict, strict=True) if cfg.TRAIN.PRETRAINED: logger.info('Loading pretrain mode from {}'.format(cfg.TRAIN.PRETRAINED)) logger.info('Attention! VAE will be recovered') state_dict = torch.load(cfg.TRAIN.PRETRAINED, map_location='cpu')['state_dict'] from collections import OrderedDict new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if (k not in ['denoiser.sequence_pos_encoding.pe']): new_state_dict[k] = v model.load_state_dict(new_state_dict, strict=False) if cfg.TRAIN.RESUME: trainer.fit(model, datamodule=datasets[0], ckpt_path=cfg.TRAIN.PRETRAINED) else: trainer.fit(model, datamodule=datasets[0]) checkpoint_folder = trainer.checkpoint_callback.dirpath logger.info(f'The checkpoints are stored in {checkpoint_folder}') logger.info(f'The outputs of this experiment are stored in {cfg.FOLDER_EXP}') logger.info('Training ends!')
def dataloader_msrvtt_train(args, tokenizer): msrvtt_dataset = MSRVTTDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset) except: train_sampler = None dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msrvtt_dataset), train_sampler)
def dataloader_msrvtt_test(args, tokenizer, subset='test'): msrvtt_testset = MSRVTTDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: test_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_testset) except: test_sampler = None dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_msrvtt, len(msrvtt_testset))
def dataloader_activity_train(args, tokenizer): activity_dataset = ActivityNetDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset) dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(activity_dataset), train_sampler)
def dataloader_activity_test(args, tokenizer, subset='test'): activity_testset = ActivityNetDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) try: test_sampler = torch.utils.data.distributed.DistributedSampler(activity_testset) except: test_sampler = None dataloader_activity = DataLoader(activity_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_activity, len(activity_testset))
def dataloader_didemo_train(args, tokenizer): didemo_dataset = DiDeMoDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset) dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(didemo_dataset), train_sampler)
def dataloader_didemo_test(args, tokenizer, subset='test'): didemo_testset = DiDeMoDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) try: test_sampler = torch.utils.data.distributed.DistributedSampler(didemo_testset) except: test_sampler = None dataloader_didemo = DataLoader(didemo_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_didemo, len(didemo_testset))
class MSRVTTDataset(RetrievalDataset): 'MSRVTT dataset.' def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None): super(MSRVTTDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config) pass def _get_anns(self, subset='train'): '\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n ' csv_path = {'train': join(self.anno_path, 'MSRVTT_train.9k.csv'), 'val': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'test': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv')}[subset] if exists(csv_path): csv = pd.read_csv(csv_path) else: raise FileNotFoundError video_id_list = list(csv['video_id'].values) video_dict = OrderedDict() sentences_dict = OrderedDict() if (subset == 'train'): anno_path = join(self.anno_path, 'MSRVTT_data.json') data = json.load(open(anno_path, 'r')) for itm in data['sentences']: if (itm['video_id'] in video_id_list): sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None)) video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id'])) else: for (_, itm) in csv.iterrows(): sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['sentence'], None, None)) video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id'])) unique_sentence = set([v[1][0] for v in sentences_dict.values()]) print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict))) return (video_dict, sentences_dict)
def _interpolation(kwargs): interpolation = kwargs.pop('resample', Image.BILINEAR) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) else: return interpolation
def _check_args_tf(kwargs): if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))): kwargs.pop('fillcolor') kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs): pixels = (pct * img.size[0]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs): pixels = (pct * img.size[1]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs): _check_args_tf(kwargs) if (_PIL_VER >= (5, 2)): return img.rotate(degrees, **kwargs) elif (_PIL_VER >= (5, 0)): (w, h) = img.size post_trans = (0, 0) rotn_center = ((w / 2.0), (h / 2.0)) angle = (- math.radians(degrees)) matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f)) (matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] return img.transform(img.size, Image.AFFINE, matrix, **kwargs) else: return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__): return ImageOps.autocontrast(img)
def invert(img, **__): return ImageOps.invert(img)
def equalize(img, **__): return ImageOps.equalize(img)
def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__): lut = [] for i in range(256): if (i < thresh): lut.append(min(255, (i + add))) else: lut.append(i) if (img.mode in ('L', 'RGB')): if ((img.mode == 'RGB') and (len(lut) == 256)): lut = ((lut + lut) + lut) return img.point(lut) else: return img
def posterize(img, bits_to_keep, **__): if (bits_to_keep >= 8): return img return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__): return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__): return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__): return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__): return ImageEnhance.Sharpness(img).enhance(factor)