code stringlengths 17 6.64M |
|---|
def lengths_to_mask(lengths: List[int], device: torch.device, max_len: int=None) -> Tensor:
lengths = torch.tensor(lengths, device=device)
max_len = (max_len if max_len else max(lengths))
mask = (torch.arange(max_len, device=device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def detach_to_numpy(tensor):
return tensor.detach().cpu().numpy()
|
def remove_padding(tensors, lengths):
return [tensor[:tensor_length] for (tensor, tensor_length) in zip(tensors, lengths)]
|
def nfeats_of(rottype):
if (rottype in ['rotvec', 'axisangle']):
return 3
elif (rottype in ['rotquat', 'quaternion']):
return 4
elif (rottype in ['rot6d', '6drot', 'rotation6d']):
return 6
elif (rottype in ['rotmat']):
return 9
else:
return TypeError("This rotation type doesn't have features.")
|
def axis_angle_to(newtype, rotations):
if (newtype in ['matrix']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
elif (newtype in ['rotmat']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rotmat', rotations)
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.axis_angle_to_matrix(rotations)
rotations = matrix_to('rot6d', rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.axis_angle_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
return rotations
else:
raise NotImplementedError
|
def matrix_to(newtype, rotations):
if (newtype in ['matrix']):
return rotations
if (newtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 9))
return rotations
elif (newtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.matrix_to_rotation_6d(rotations)
return rotations
elif (newtype in ['rotquat', 'quaternion']):
rotations = geometry.matrix_to_quaternion(rotations)
return rotations
elif (newtype in ['rotvec', 'axisangle']):
rotations = geometry.matrix_to_axis_angle(rotations)
return rotations
else:
raise NotImplementedError
|
def to_matrix(oldtype, rotations):
if (oldtype in ['matrix']):
return rotations
if (oldtype in ['rotmat']):
rotations = rotations.reshape((*rotations.shape[:(- 2)], 3, 3))
return rotations
elif (oldtype in ['rot6d', '6drot', 'rotation6d']):
rotations = geometry.rotation_6d_to_matrix(rotations)
return rotations
elif (oldtype in ['rotquat', 'quaternion']):
rotations = geometry.quaternion_to_matrix(rotations)
return rotations
elif (oldtype in ['rotvec', 'axisangle']):
rotations = geometry.axis_angle_to_matrix(rotations)
return rotations
else:
raise NotImplementedError
|
def subsample(num_frames, last_framerate, new_framerate):
step = int((last_framerate / new_framerate))
assert (step >= 1)
frames = np.arange(0, num_frames, step)
return frames
|
def upsample(motion, last_framerate, new_framerate):
step = int((new_framerate / last_framerate))
assert (step >= 1)
alpha = np.linspace(0, 1, (step + 1))
last = np.einsum('l,...->l...', (1 - alpha), motion[:(- 1)])
new = np.einsum('l,...->l...', alpha, motion[1:])
chuncks = (last + new)[:(- 1)]
output = np.concatenate(chuncks.swapaxes(1, 0))
output = np.concatenate((output, motion[[(- 1)]]))
return output
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor}
return batch
|
def collate_data3d_slow(batch):
batchTensor = {}
for key in batch[0].keys():
databatch = [b[key] for b in batch]
batchTensor[key] = collate_tensors(databatch)
batch = batchTensor
return batch
|
def collate_data3d(batch):
batchTensor = {}
for key in batch[0].keys():
databatch = [b[key] for b in batch]
if (key == 'paths'):
batchTensor[key] = databatch
else:
batchTensor[key] = torch.stack(databatch, axis=0)
batch = batchTensor
return batch
|
def main():
'\n get input text\n ToDo skip if user input text in command\n current tasks:\n 1 text 2 mtion\n 2 motion transfer\n 3 random sampling\n 4 reconstruction\n\n ToDo \n 1 use one funtion for all expoert\n 2 fitting smpl and export fbx in this file\n 3 \n\n '
cfg = parse_args(phase='demo')
cfg.FOLDER = cfg.TEST.FOLDER
cfg.Name = ('demo--' + cfg.NAME)
logger = create_logger(cfg, phase='demo')
if cfg.DEMO.EXAMPLE:
from GraphMotion.utils.demo_utils import load_example_input
(text, length) = load_example_input(cfg.DEMO.EXAMPLE)
task = 'Example'
elif cfg.DEMO.TASK:
task = cfg.DEMO.TASK
text = None
else:
task = 'Keyborad_input'
text = input('Please enter texts, none for random latent sampling:')
length = input('Please enter length, range 16~196, e.g. 50, none for random latent sampling:')
if text:
motion_path = input('Please enter npy_path for motion transfer, none for skip:')
if (text and (not motion_path)):
cfg.DEMO.MOTION_TRANSFER = False
elif (text and motion_path):
joints = np.load(motion_path)
frames = subsample(len(joints), last_framerate=cfg.DEMO.FRAME_RATE, new_framerate=cfg.DATASET.KIT.FRAME_RATE)
joints_sample = torch.from_numpy(joints[frames]).float()
features = model.transforms.joints2jfeats(joints_sample[None])
motion = xx
cfg.DEMO.MOTION_TRANSFER = True
length = (200 if (not length) else length)
length = [int(length)]
text = [text]
output_dir = Path(os.path.join(cfg.FOLDER, str(cfg.model.model_type), str(cfg.NAME), ('samples_' + cfg.TIME)))
output_dir.mkdir(parents=True, exist_ok=True)
if (cfg.ACCELERATOR == 'gpu'):
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join((str(x) for x in cfg.DEVICE))
device = torch.device('cuda')
dataset = get_datasets(cfg, logger=logger, phase='test')[0]
total_time = time.time()
model = get_model(cfg, dataset)
if (not text):
logger.info(f'Begin specific task{task}')
logger.info('Loading checkpoints from {}'.format(cfg.TEST.CHECKPOINTS))
state_dict = torch.load(cfg.TEST.CHECKPOINTS, map_location='cpu')['state_dict']
model.load_state_dict(state_dict, strict=True)
logger.info('model {} loaded'.format(cfg.model.model_type))
model.sample_mean = cfg.TEST.MEAN
model.fact = cfg.TEST.FACT
model.to(device)
model.eval()
mld_time = time.time()
with torch.no_grad():
rep_lst = []
rep_ref_lst = []
texts_lst = []
if text:
batch = {'length': length, 'text': text}
for rep in range(cfg.DEMO.REPLICATION):
if cfg.DEMO.MOTION_TRANSFER:
joints = model.forward_motion_style_transfer(batch)
else:
joints = model(batch)
infer_time = (time.time() - mld_time)
num_batch = 1
num_all_frame = sum(batch['length'])
num_ave_frame = (sum(batch['length']) / len(batch['length']))
nsample = len(joints)
id = 0
for i in range(nsample):
npypath = str((output_dir / f'{task}_{length[i]}_batch{id}_{i}.npy'))
with open(npypath.replace('.npy', '.txt'), 'w') as text_file:
text_file.write(batch['text'][i])
np.save(npypath, joints[i].detach().cpu().numpy())
logger.info(f'''Motions are generated here:
{npypath}''')
if cfg.DEMO.OUTALL:
rep_lst.append(joints)
texts_lst.append(batch['text'])
if cfg.DEMO.OUTALL:
grouped_lst = []
for n in range(nsample):
grouped_lst.append(torch.cat([r[n][None] for r in rep_lst], dim=0)[None])
combinedOut = torch.cat(grouped_lst, dim=0)
try:
npypath = str((output_dir / f'{task}_{length[i]}_all.npy'))
np.save(npypath, combinedOut.detach().cpu().numpy())
with open(npypath.replace('npy', 'txt'), 'w') as text_file:
for texts in texts_lst:
for text in texts:
text_file.write(text)
text_file.write('\n')
logger.info(f'''All reconstructed motions are generated here:
{npypath}''')
except:
raise ValueError('Lengths of motions are different, so we cannot save all motions in one file.')
if (not text):
if (task == 'random_sampling'):
text = 'random sampling'
length = 196
(nsample, latent_dim) = (500, 256)
batch = {'latent': torch.randn(1, nsample, latent_dim, device=model.device), 'length': ([int(length)] * nsample)}
joints = model.gen_from_latent(batch)
(num_batch, num_all_frame, num_ave_frame) = (100, (100 * 196), 196)
infer_time = (time.time() - mld_time)
for i in range(nsample):
npypath = (output_dir / f"{text.split(' ')[0]}_{length}_{i}.npy")
np.save(npypath, joints[i].detach().cpu().numpy())
logger.info(f'''Motions are generated here:
{npypath}''')
elif (task in ['reconstrucion', 'text_motion']):
for rep in range(cfg.DEMO.REPLICATION):
logger.info(f'Replication {rep}')
joints_lst = []
ref_lst = []
for (id, batch) in enumerate(dataset.test_dataloader()):
if (task == 'reconstrucion'):
batch['motion'] = batch['motion'].to(device)
length = batch['length']
(joints, joints_ref) = model.recon_from_motion(batch)
elif (task == 'text_motion'):
batch['motion'] = batch['motion'].to(device)
(joints, joints_ref) = model(batch, return_ref=True)
nsample = len(joints)
length = batch['length']
for i in range(nsample):
npypath = str((output_dir / f'{task}_{length[i]}_batch{id}_{i}_{rep}.npy'))
np.save(npypath, joints[i].detach().cpu().numpy())
np.save(npypath.replace('.npy', '_ref.npy'), joints_ref[i].detach().cpu().numpy())
with open(npypath.replace('.npy', '.txt'), 'w') as text_file:
text_file.write(batch['text'][i])
logger.info(f'''Reconstructed motions are generated here:
{npypath}''')
else:
raise ValueError(f'Not support task {task}, only support random_sampling, reconstrucion, text_motion')
total_time = (time.time() - total_time)
print(f'MLD Infer time - This/Ave batch: {(infer_time / num_batch):.2f}')
print(f'MLD Infer FPS - Total batch: {(num_all_frame / infer_time):.2f}')
print(f'MLD Infer time - This/Ave batch: {(infer_time / num_batch):.2f}')
print(f'MLD Infer FPS - Total batch: {(num_all_frame / infer_time):.2f}')
print(f'MLD Infer FPS - Running Poses Per Second: {((num_ave_frame * infer_time) / num_batch):.2f}')
print(f'MLD Infer FPS - {(num_all_frame / infer_time):.2f}s')
print(f'MLD Infer FPS - Running Poses Per Second: {((num_ave_frame * infer_time) / num_batch):.2f}')
print(f'MLD Infer FPS - time for 100 Poses: {((infer_time / (num_batch * num_ave_frame)) * 100):.2f}')
print(f'Total time spent: {total_time:.2f} seconds (including model loading time and exporting time).')
if cfg.DEMO.RENDER:
from GraphMotion.utils.demo_utils import render_batch
blenderpath = cfg.RENDER.BLENDER_PATH
render_batch(os.path.dirname(npypath), execute_python=blenderpath, mode='sequence')
logger.info(f'''Motions are rendered here:
{os.path.dirname(npypath)}''')
|
class ProgressLogger(Callback):
def __init__(self, metric_monitor: dict, precision: int=3):
self.metric_monitor = metric_monitor
self.precision = precision
def on_train_start(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
logger.info('Training started')
def on_train_end(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
logger.info('Training done')
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule, **kwargs) -> None:
if trainer.sanity_checking:
logger.info('Sanity checking ok.')
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule, padding=False, **kwargs) -> None:
metric_format = f'{{:.{self.precision}e}}'
line = f'Epoch {trainer.current_epoch}'
if padding:
line = f"{line:>{len('Epoch xxxx')}}"
metrics_str = []
losses_dict = trainer.callback_metrics
for (metric_name, dico_name) in self.metric_monitor.items():
if (dico_name in losses_dict):
metric = losses_dict[dico_name].item()
metric = metric_format.format(metric)
metric = f'{metric_name} {metric}'
metrics_str.append(metric)
if (len(metrics_str) == 0):
return
memory = f'Memory {psutil.virtual_memory().percent}%'
line = ((((line + ': ') + ' '.join(metrics_str)) + ' ') + memory)
logger.info(line)
|
def get_module_config(cfg_model, path='modules'):
files = os.listdir(f'./configs/{path}/')
for file in files:
if file.endswith('.yaml'):
with open((f'./configs/{path}/' + file), 'r') as f:
cfg_model.merge_with(OmegaConf.load(f))
return cfg_model
|
def get_obj_from_str(string, reload=False):
(module, cls) = string.rsplit('.', 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
|
def instantiate_from_config(config):
if (not ('target' in config)):
if (config == '__is_first_stage__'):
return None
elif (config == '__is_unconditional__'):
return None
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params', dict()))
|
def parse_args(phase='train'):
parser = ArgumentParser()
group = parser.add_argument_group('Training options')
if (phase in ['train', 'test', 'demo']):
group.add_argument('--cfg', type=str, required=False, default='./configs/config.yaml', help='config file')
group.add_argument('--cfg_assets', type=str, required=False, default='./configs/assets.yaml', help='config file for asset paths')
group.add_argument('--batch_size', type=int, required=False, help='training batch size')
group.add_argument('--device', type=int, nargs='+', required=False, help='training device')
group.add_argument('--nodebug', action='store_true', required=False, help='debug or not')
group.add_argument('--dir', type=str, required=False, help='evaluate existing npys')
if (phase == 'demo'):
group.add_argument('--render', action='store_true', help='Render visulizaed figures')
group.add_argument('--render_mode', type=str, help='video or sequence')
group.add_argument('--frame_rate', type=float, default=12.5, help='the frame rate for the input/output motion')
group.add_argument('--replication', type=int, default=1, help='the frame rate for the input/output motion')
group.add_argument('--example', type=str, required=False, help='input text and lengths with txt format')
group.add_argument('--task', type=str, required=False, help='random_sampling, reconstrucion or text_motion')
group.add_argument('--out_dir', type=str, required=False, help='output dir')
group.add_argument('--allinone', action='store_true', required=False, help='output seperate or combined npy file')
if (phase == 'render'):
group.add_argument('--cfg', type=str, required=False, default='./configs/render.yaml', help='config file')
group.add_argument('--cfg_assets', type=str, required=False, default='./configs/assets.yaml', help='config file for asset paths')
group.add_argument('--npy', type=str, required=False, default=None, help='npy motion files')
group.add_argument('--dir', type=str, required=False, default=None, help='npy motion folder')
group.add_argument('--mode', type=str, required=False, default='sequence', help='render target: video, sequence, frame')
group.add_argument('--joint_type', type=str, required=False, default=None, help='mmm or vertices for skeleton')
params = parser.parse_args()
cfg_base = OmegaConf.load('./configs/base.yaml')
cfg_exp = OmegaConf.merge(cfg_base, OmegaConf.load(params.cfg))
cfg_model = get_module_config(cfg_exp.model, cfg_exp.model.target)
cfg_assets = OmegaConf.load(params.cfg_assets)
cfg = OmegaConf.merge(cfg_exp, cfg_model, cfg_assets)
if (phase in ['train', 'test']):
cfg.TRAIN.BATCH_SIZE = (params.batch_size if params.batch_size else cfg.TRAIN.BATCH_SIZE)
cfg.DEVICE = (params.device if params.device else cfg.DEVICE)
cfg.DEBUG = ((not params.nodebug) if (params.nodebug is not None) else cfg.DEBUG)
cfg.DEBUG = (False if (phase == 'test') else cfg.DEBUG)
if (phase == 'test'):
cfg.DEBUG = False
cfg.DEVICE = [0]
print('Force no debugging and one gpu when testing')
cfg.TEST.TEST_DIR = (params.dir if params.dir else cfg.TEST.TEST_DIR)
if (phase == 'demo'):
cfg.DEMO.RENDER = params.render
cfg.DEMO.FRAME_RATE = params.frame_rate
cfg.DEMO.EXAMPLE = params.example
cfg.DEMO.TASK = params.task
cfg.TEST.FOLDER = (params.out_dir if params.dir else cfg.TEST.FOLDER)
cfg.DEMO.REPLICATION = params.replication
cfg.DEMO.OUTALL = params.allinone
if (phase == 'render'):
if params.npy:
cfg.RENDER.NPY = params.npy
cfg.RENDER.INPUT_MODE = 'npy'
if params.dir:
cfg.RENDER.DIR = params.dir
cfg.RENDER.INPUT_MODE = 'dir'
cfg.RENDER.JOINT_TYPE = params.joint_type
cfg.RENDER.MODE = params.mode
if cfg.DEBUG:
cfg.NAME = ('debug--' + cfg.NAME)
cfg.LOGGER.WANDB.OFFLINE = True
cfg.LOGGER.VAL_EVERY_STEPS = 1
return cfg
|
class HumanML3DDataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'humanml3d'
self.njoints = 22
if (phase == 'text_only'):
self.Dataset = TextOnlyDataset
else:
self.Dataset = Text2MotionDatasetV2
self.cfg = cfg
sample_overrides = {'split': 'val', 'tiny': True, 'progress_bar': False}
self._sample_set = self.get_sample_set(overrides=sample_overrides)
self.nfeats = self._sample_set.nfeats
def feats2joints(self, features):
mean = torch.tensor(self.hparams.mean).to(features)
std = torch.tensor(self.hparams.std).to(features)
features = ((features * std) + mean)
return recover_from_ric(features, self.njoints)
def joints2feats(self, features):
features = process_file(features, self.njoints)[0]
return features
def renorm4t2m(self, features):
ori_mean = torch.tensor(self.hparams.mean).to(features)
ori_std = torch.tensor(self.hparams.std).to(features)
eval_mean = torch.tensor(self.hparams.mean_eval).to(features)
eval_std = torch.tensor(self.hparams.std_eval).to(features)
features = ((features * ori_std) + ori_mean)
features = ((features - eval_mean) / eval_std)
return features
def mm_mode(self, mm_on=True):
if mm_on:
self.is_mm = True
self.name_list = self.test_dataset.name_list
self.mm_list = np.random.choice(self.name_list, self.cfg.TEST.MM_NUM_SAMPLES, replace=False)
self.test_dataset.name_list = self.mm_list
else:
self.is_mm = False
self.test_dataset.name_list = self.name_list
|
class Humanact12DataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'HumanAct12'
self.Dataset = HumanAct12Poses
self.cfg = cfg
sample_overrides = {'num_seq_max': 2, 'split': 'test', 'tiny': True, 'progress_bar': False}
self.nfeats = 150
self.njoints = 25
self.nclasses = 12
|
class KitDataModule(BASEDataModule):
def __init__(self, cfg, phase='train', collate_fn=all_collate, batch_size: int=32, num_workers: int=16, **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'kit'
self.njoints = 21
if (phase == 'text_only'):
self.Dataset = TextOnlyDataset
else:
self.Dataset = Text2MotionDatasetV2
self.cfg = cfg
sample_overrides = {'split': 'val', 'tiny': True, 'progress_bar': False}
self._sample_set = self.get_sample_set(overrides=sample_overrides)
self.nfeats = self._sample_set.nfeats
def feats2joints(self, features):
mean = torch.tensor(self.hparams.mean).to(features)
std = torch.tensor(self.hparams.std).to(features)
features = ((features * std) + mean)
return recover_from_ric(features, self.njoints)
def renorm4t2m(self, features):
ori_mean = torch.tensor(self.hparams.mean).to(features)
ori_std = torch.tensor(self.hparams.std).to(features)
eval_mean = torch.tensor(self.hparams.mean_eval).to(features)
eval_std = torch.tensor(self.hparams.std_eval).to(features)
features = ((features * ori_std) + ori_mean)
features = ((features - eval_mean) / eval_std)
return features
def mm_mode(self, mm_on=True):
if mm_on:
self.is_mm = True
self.name_list = self.test_dataset.name_list
self.mm_list = np.random.choice(self.name_list, self.cfg.TEST.MM_NUM_SAMPLES, replace=False)
self.test_dataset.name_list = self.mm_list
else:
self.is_mm = False
self.test_dataset.name_list = self.name_list
|
class UestcDataModule(BASEDataModule):
def __init__(self, cfg, batch_size, num_workers, collate_fn=None, method_name='vibe', phase='train', **kwargs):
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
self.save_hyperparameters(logger=False)
self.name = 'Uestc'
self.Dataset = UESTC
self.cfg = cfg
self.nfeats = 150
self.njoints = 25
self.nclasses = 40
|
class HumanAct12Poses(Dataset):
dataname = 'humanact12'
def __init__(self, datapath='data/HumanAct12Poses', **kargs):
self.datapath = datapath
super().__init__(**kargs)
pkldatafilepath = os.path.join(datapath, 'humanact12poses.pkl')
with rich.progress.open(pkldatafilepath, 'rb', description='loading humanact12 pkl') as f:
data = pkl.load(f)
self._pose = [x for x in data['poses']]
self._num_frames_in_video = [p.shape[0] for p in self._pose]
self._joints = [x for x in data['joints3D']]
self._actions = [x for x in data['y']]
total_num_actions = 12
self.num_classes = total_num_actions
self._train = list(range(len(self._pose)))
keep_actions = np.arange(0, total_num_actions)
self._action_to_label = {x: i for (i, x) in enumerate(keep_actions)}
self._label_to_action = {i: x for (i, x) in enumerate(keep_actions)}
self._action_classes = humanact12_coarse_action_enumerator
def _load_joints3D(self, ind, frame_ix):
return self._joints[ind][frame_ix]
def _load_rotvec(self, ind, frame_ix):
pose = self._pose[ind][frame_ix].reshape((- 1), 24, 3)
return pose
|
def parse_info_name(path):
name = os.path.splitext(os.path.split(path)[(- 1)])[0]
info = {}
current_letter = None
for letter in name:
if (letter in string.ascii_letters):
info[letter] = []
current_letter = letter
else:
info[current_letter].append(letter)
for key in info.keys():
info[key] = ''.join(info[key])
return info
|
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif (type(tensor).__module__ != 'numpy'):
raise ValueError('Cannot convert {} to numpy array'.format(type(tensor)))
return tensor
|
def to_torch(ndarray):
if (type(ndarray).__module__ == 'numpy'):
return torch.from_numpy(ndarray)
elif (not torch.is_tensor(ndarray)):
raise ValueError('Cannot convert {} to torch tensor'.format(type(ndarray)))
return ndarray
|
def cleanexit():
import sys
import os
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor}
return batch
|
class BASEDataModule(pl.LightningDataModule):
def __init__(self, collate_fn, batch_size: int, num_workers: int):
super().__init__()
self.dataloader_options = {'batch_size': batch_size, 'num_workers': num_workers, 'collate_fn': collate_fn}
self.persistent_workers = True
self.is_mm = False
def get_sample_set(self, overrides={}):
sample_params = self.hparams.copy()
sample_params.update(overrides)
split_file = pjoin(eval(f'self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT'), (self.cfg.EVAL.SPLIT + '.txt'))
return self.Dataset(split_file=split_file, **sample_params)
def __getattr__(self, item):
if (item.endswith('_dataset') and (not item.startswith('_'))):
subset = item[:(- len('_dataset'))]
item_c = ('_' + item)
if (item_c not in self.__dict__):
subset = (subset.upper() if (subset != 'val') else 'EVAL')
split = eval(f'self.cfg.{subset}.SPLIT')
split_file = pjoin(eval(f'self.cfg.DATASET.{self.name.upper()}.SPLIT_ROOT'), (eval(f'self.cfg.{subset}.SPLIT') + '.txt'))
self.__dict__[item_c] = self.Dataset(split_file=split_file, split=split, **self.hparams)
return getattr(self, item_c)
classname = self.__class__.__name__
raise AttributeError(f"'{classname}' object has no attribute '{item}'")
def setup(self, stage=None):
self.stage = stage
if (stage in (None, 'fit')):
_ = self.train_dataset
_ = self.val_dataset
if (stage in (None, 'test')):
_ = self.test_dataset
def train_dataloader(self):
return DataLoader(self.train_dataset, shuffle=True, persistent_workers=True, **self.dataloader_options)
def predict_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = (1 if self.is_mm else self.cfg.TEST.BATCH_SIZE)
dataloader_options['num_workers'] = self.cfg.TEST.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.test_dataset, persistent_workers=True, **dataloader_options)
def val_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = self.cfg.EVAL.BATCH_SIZE
dataloader_options['num_workers'] = self.cfg.EVAL.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.val_dataset, persistent_workers=True, **dataloader_options)
def test_dataloader(self):
dataloader_options = self.dataloader_options.copy()
dataloader_options['batch_size'] = (1 if self.is_mm else self.cfg.TEST.BATCH_SIZE)
dataloader_options['num_workers'] = self.cfg.TEST.NUM_WORKERS
dataloader_options['shuffle'] = False
return DataLoader(self.test_dataset, persistent_workers=True, **dataloader_options)
|
def get_mean_std(phase, cfg, dataset_name):
name = ('t2m' if (dataset_name == 'humanml3d') else dataset_name)
assert (name in ['t2m', 'kit'])
if (phase in ['val']):
if (name == 't2m'):
data_root = pjoin(cfg.model.t2m_path, name, 'Comp_v6_KLD01', 'meta')
elif (name == 'kit'):
data_root = pjoin(cfg.model.t2m_path, name, 'Comp_v6_KLD005', 'meta')
else:
raise ValueError('Only support t2m and kit')
mean = np.load(pjoin(data_root, 'mean.npy'))
std = np.load(pjoin(data_root, 'std.npy'))
else:
data_root = eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT')
mean = np.load(pjoin(data_root, 'Mean.npy'))
std = np.load(pjoin(data_root, 'Std.npy'))
return (mean, std)
|
def get_WordVectorizer(cfg, phase, dataset_name):
if (phase not in ['text_only']):
if (dataset_name.lower() in ['humanml3d', 'kit']):
return WordVectorizer(cfg.DATASET.WORD_VERTILIZER_PATH, 'our_vab')
else:
raise ValueError('Only support WordVectorizer for HumanML3D')
else:
return None
|
def get_collate_fn(name, phase='train'):
if (name.lower() in ['humanml3d', 'kit']):
return mld_collate
elif (name.lower() in ['humanact12', 'uestc']):
return a2m_collate
|
def get_datasets(cfg, logger=None, phase='train'):
dataset_names = eval(f'cfg.{phase.upper()}.DATASETS')
datasets = []
for dataset_name in dataset_names:
if (dataset_name.lower() in ['humanml3d', 'kit']):
data_root = eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT')
(mean, std) = get_mean_std(phase, cfg, dataset_name)
(mean_eval, std_eval) = get_mean_std('val', cfg, dataset_name)
wordVectorizer = get_WordVectorizer(cfg, phase, dataset_name)
collate_fn = get_collate_fn(dataset_name, phase)
if (dataset_name.lower() in ['kit']):
dataset = dataset_module_map[dataset_name.lower()](cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, mean=mean, std=std, mean_eval=mean_eval, std_eval=std_eval, w_vectorizer=wordVectorizer, text_dir=pjoin(data_root, 'texts'), motion_dir=pjoin(data_root, motion_subdir[dataset_name]), max_motion_length=cfg.DATASET.SAMPLER.MAX_LEN, min_motion_length=24, max_text_len=cfg.DATASET.SAMPLER.MAX_TEXT_LEN, unit_length=eval(f'cfg.DATASET.{dataset_name.upper()}.UNIT_LEN'))
datasets.append(dataset)
else:
dataset = dataset_module_map[dataset_name.lower()](cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, mean=mean, std=std, mean_eval=mean_eval, std_eval=std_eval, w_vectorizer=wordVectorizer, text_dir=pjoin(data_root, 'texts'), motion_dir=pjoin(data_root, motion_subdir[dataset_name]), max_motion_length=cfg.DATASET.SAMPLER.MAX_LEN, min_motion_length=cfg.DATASET.SAMPLER.MIN_LEN, max_text_len=cfg.DATASET.SAMPLER.MAX_TEXT_LEN, unit_length=eval(f'cfg.DATASET.{dataset_name.upper()}.UNIT_LEN'))
datasets.append(dataset)
elif (dataset_name.lower() in ['humanact12', 'uestc']):
collate_fn = get_collate_fn(dataset_name, phase)
dataset = dataset_module_map[dataset_name.lower()](datapath=eval(f'cfg.DATASET.{dataset_name.upper()}.ROOT'), cfg=cfg, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, debug=cfg.DEBUG, collate_fn=collate_fn, num_frames=cfg.DATASET.HUMANACT12.NUM_FRAMES, sampling=cfg.DATASET.SAMPLER.SAMPLING, sampling_step=cfg.DATASET.SAMPLER.SAMPLING_STEP, pose_rep=cfg.DATASET.HUMANACT12.POSE_REP, max_len=cfg.DATASET.SAMPLER.MAX_LEN, min_len=cfg.DATASET.SAMPLER.MIN_LEN, num_seq_max=(cfg.DATASET.SAMPLER.MAX_SQE if (not cfg.DEBUG) else 100), glob=cfg.DATASET.HUMANACT12.GLOB, translation=cfg.DATASET.HUMANACT12.TRANSLATION)
cfg.DATASET.NCLASSES = dataset.nclasses
datasets.append(dataset)
elif (dataset_name.lower() in ['amass']):
raise NotImplementedError
else:
raise NotImplementedError
cfg.DATASET.NFEATS = datasets[0].nfeats
cfg.DATASET.NJOINTS = datasets[0].njoints
return datasets
|
def is_float(numStr):
flag = False
numStr = str(numStr).strip().lstrip('-').lstrip('+')
try:
reg = re.compile('^[-+]?[0-9]+\\.[0-9]+$')
res = reg.match(str(numStr))
if res:
flag = True
except Exception as ex:
print(('is_float() - error: ' + str(ex)))
return flag
|
def is_number(numStr):
flag = False
numStr = str(numStr).strip().lstrip('-').lstrip('+')
if str(numStr).isdigit():
flag = True
return flag
|
def get_opt(opt_path, device):
opt = Namespace()
opt_dict = vars(opt)
skip = ('-------------- End ----------------', '------------ Options -------------', '\n')
print('Reading', opt_path)
with open(opt_path) as f:
for line in f:
if (line.strip() not in skip):
(key, value) = line.strip().split(': ')
if (value in ('True', 'False')):
opt_dict[key] = bool(value)
elif is_float(value):
opt_dict[key] = float(value)
elif is_number(value):
opt_dict[key] = int(value)
else:
opt_dict[key] = str(value)
opt_dict['which_epoch'] = 'latest'
opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)
opt.model_dir = pjoin(opt.save_root, 'model')
opt.meta_dir = pjoin(opt.save_root, 'meta')
if (opt.dataset_name == 't2m'):
opt.data_root = './dataset/HumanML3D'
opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')
opt.text_dir = pjoin(opt.data_root, 'texts')
opt.joints_num = 22
opt.dim_pose = 263
opt.max_motion_length = 196
elif (opt.dataset_name == 'kit'):
opt.data_root = './dataset/KIT-ML'
opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')
opt.text_dir = pjoin(opt.data_root, 'texts')
opt.joints_num = 21
opt.dim_pose = 251
opt.max_motion_length = 196
else:
raise KeyError('Dataset not recognized')
opt.dim_word = 300
opt.num_classes = (200 // opt.unit_length)
opt.dim_pos_ohot = len(POS_enumerator)
opt.is_train = False
opt.is_continue = False
opt.device = device
return opt
|
def save_json(save_path, data):
with open(save_path, 'w') as file:
json.dump(data, file)
|
def load_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
return data
|
def process(graph):
(entities, relations) = ({}, [])
for i in graph['verbs']:
description = i['description']
pos = 0
flag = 0
(_words, _spans) = ([], [])
for i in description.split():
(tags, verb) = ({}, 0)
if ('[' in i):
_role = i[1:(- 1)]
flag = 1
_spans = [pos]
_words = []
elif (']' in i):
_words.append(i[:(- 1)])
entities[len(entities)] = {'role': _role, 'spans': _spans, 'words': _words}
pos += 1
flag = 0
if (_role != 'V'):
tags[len(entities)] = _role
else:
verb = len(entities)
else:
pos += 1
if flag:
_words.append(i)
_spans.append(pos)
for i in tags:
relations.append((verb, i, tags[i]))
output = {'entities': entities, 'relations': relations}
return output
|
class WordVectorizer(object):
def __init__(self, meta_root, prefix):
vectors = np.load(pjoin(meta_root, ('%s_data.npy' % prefix)))
words = pickle.load(open(pjoin(meta_root, ('%s_words.pkl' % prefix)), 'rb'))
word2idx = pickle.load(open(pjoin(meta_root, ('%s_idx.pkl' % prefix)), 'rb'))
self.word2vec = {w: vectors[word2idx[w]] for w in words}
def _get_pos_ohot(self, pos):
pos_vec = np.zeros(len(POS_enumerator))
if (pos in POS_enumerator):
pos_vec[POS_enumerator[pos]] = 1
else:
pos_vec[POS_enumerator['OTHER']] = 1
return pos_vec
def __len__(self):
return len(self.word2vec)
def __getitem__(self, item):
(word, pos) = item.split('/')
if (word in self.word2vec):
word_vec = self.word2vec[word]
vip_pos = None
for (key, values) in VIP_dict.items():
if (word in values):
vip_pos = key
break
if (vip_pos is not None):
pos_vec = self._get_pos_ohot(vip_pos)
else:
pos_vec = self._get_pos_ohot(pos)
else:
word_vec = self.word2vec['unk']
pos_vec = self._get_pos_ohot('OTHER')
return (word_vec, pos_vec)
|
class FrameSampler():
def __init__(self, sampling='conseq', sampling_step=1, request_frames=None, threshold_reject=0.75, max_len=1000, min_len=10):
self.sampling = sampling
self.sampling_step = sampling_step
self.request_frames = request_frames
self.threshold_reject = threshold_reject
self.max_len = max_len
self.min_len = min_len
def __call__(self, num_frames):
return get_frameix_from_data_index(num_frames, self.request_frames, self.sampling, self.sampling_step)
def accept(self, duration):
if (self.request_frames is None):
if (duration > self.max_len):
return False
elif (duration < self.min_len):
return False
else:
min_number = (self.threshold_reject * self.request_frames)
if (duration < min_number):
return False
return True
def get(self, key, default=None):
return getattr(self, key, default)
def __getitem__(self, key):
return getattr(self, key)
|
def subsample(num_frames, last_framerate, new_framerate):
step = int((last_framerate / new_framerate))
assert (step >= 1)
frames = np.arange(0, num_frames, step)
return frames
|
def upsample(motion, last_framerate, new_framerate):
step = int((new_framerate / last_framerate))
assert (step >= 1)
alpha = np.linspace(0, 1, (step + 1))
last = np.einsum('l,...->l...', (1 - alpha), motion[:(- 1)])
new = np.einsum('l,...->l...', alpha, motion[1:])
chuncks = (last + new)[:(- 1)]
output = np.concatenate(chuncks.swapaxes(1, 0))
output = np.concatenate((output, motion[[(- 1)]]))
return output
|
def get_frameix_from_data_index(num_frames: int, request_frames: Optional[int], sampling: str='conseq', sampling_step: int=1) -> Array:
nframes = num_frames
if (request_frames is None):
frame_ix = np.arange(nframes)
elif (request_frames > nframes):
fair = False
if fair:
choices = np.random.choice(range(nframes), request_frames, replace=True)
frame_ix = sorted(choices)
else:
ntoadd = max(0, (request_frames - nframes))
lastframe = (nframes - 1)
padding = (lastframe * np.ones(ntoadd, dtype=int))
frame_ix = np.concatenate((np.arange(0, nframes), padding))
elif (sampling in ['conseq', 'random_conseq']):
step_max = ((nframes - 1) // (request_frames - 1))
if (sampling == 'conseq'):
if ((sampling_step == (- 1)) or ((sampling_step * (request_frames - 1)) >= nframes)):
step = step_max
else:
step = sampling_step
elif (sampling == 'random_conseq'):
step = random.randint(1, step_max)
lastone = (step * (request_frames - 1))
shift_max = ((nframes - lastone) - 1)
shift = random.randint(0, max(0, (shift_max - 1)))
frame_ix = (shift + np.arange(0, (lastone + 1), step))
elif (sampling == 'random'):
choices = np.random.choice(range(nframes), request_frames, replace=False)
frame_ix = sorted(choices)
else:
raise ValueError('Sampling not recognized.')
return frame_ix
|
def lengths_to_mask(lengths):
max_len = max(lengths)
mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1))
return mask
|
def collate_tensors(batch):
dims = batch[0].dim()
max_size = [max([b.size(i) for b in batch]) for i in range(dims)]
size = ((len(batch),) + tuple(max_size))
canvas = batch[0].new_zeros(size=size)
for (i, b) in enumerate(batch):
sub_tensor = canvas[i]
for d in range(dims):
sub_tensor = sub_tensor.narrow(d, 0, b.size(d))
sub_tensor.add_(b)
return canvas
|
def all_collate(batch):
notnone_batches = [b for b in batch if (b is not None)]
databatch = [b['motion'] for b in notnone_batches]
if ('lengths' in notnone_batches[0]):
lenbatch = [b['lengths'] for b in notnone_batches]
else:
lenbatch = [len(b['inp'][0][0]) for b in notnone_batches]
databatchTensor = collate_tensors(databatch)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor, databatchTensor.shape[(- 1)]).unsqueeze(1).unsqueeze(1)
motion = databatchTensor
cond = {'y': {'mask': maskbatchTensor, 'lengths': lenbatchTensor}}
if ('text' in notnone_batches[0]):
textbatch = [b['text'] for b in notnone_batches]
cond['y'].update({'text': textbatch})
if ('action_text' in notnone_batches[0]):
action_text = [b['action_text'] for b in notnone_batches]
cond['y'].update({'action_text': action_text})
return (motion, cond)
|
def mld_collate(batch):
notnone_batches = [b for b in batch if (b is not None)]
notnone_batches.sort(key=(lambda x: x[3]), reverse=True)
adapted_batch = {'motion': collate_tensors([torch.tensor(b[4]).float() for b in notnone_batches]), 'text': [b[2] for b in notnone_batches], 'length': [b[5] for b in notnone_batches], 'word_embs': collate_tensors([torch.tensor(b[0]).float() for b in notnone_batches]), 'pos_ohot': collate_tensors([torch.tensor(b[1]).float() for b in notnone_batches]), 'text_len': collate_tensors([torch.tensor(b[3]) for b in notnone_batches]), 'tokens': [b[6] for b in notnone_batches], 'V': [b[7] for b in notnone_batches], 'entities': [b[8] for b in notnone_batches], 'relations': [b[9] for b in notnone_batches]}
return adapted_batch
|
def a2m_collate(batch):
databatch = [b[0] for b in batch]
labelbatch = [b[1] for b in batch]
lenbatch = [len(b[0][0][0]) for b in batch]
labeltextbatch = [b[3] for b in batch]
databatchTensor = collate_tensors(databatch)
labelbatchTensor = torch.as_tensor(labelbatch).unsqueeze(1)
lenbatchTensor = torch.as_tensor(lenbatch)
maskbatchTensor = lengths_to_mask(lenbatchTensor)
adapted_batch = {'motion': databatchTensor.permute(0, 3, 2, 1).flatten(start_dim=2), 'action': labelbatchTensor, 'action_text': labeltextbatch, 'mask': maskbatchTensor, 'length': lenbatchTensor}
return adapted_batch
|
def parse_args(self, args=None, namespace=None):
if (args is not None):
return self.parse_args_bak(args=args, namespace=namespace)
try:
idx = sys.argv.index('--')
args = sys.argv[(idx + 1):]
except ValueError as e:
args = []
return self.parse_args_bak(args=args, namespace=namespace)
|
def code_path(path=''):
code_dir = hydra.utils.get_original_cwd()
code_dir = Path(code_dir)
return str((code_dir / path))
|
def working_path(path):
return str((Path(os.getcwd()) / path))
|
def generate_id():
return ID
|
def get_last_checkpoint(path, ckpt_name='last.ckpt'):
output_dir = Path(hydra.utils.to_absolute_path(path))
last_ckpt_path = ((output_dir / 'checkpoints') / ckpt_name)
return str(last_ckpt_path)
|
def get_kitname(load_amass_data: bool, load_with_rot: bool):
if (not load_amass_data):
return 'kit-mmm-xyz'
if (load_amass_data and (not load_with_rot)):
return 'kit-amass-xyz'
if (load_amass_data and load_with_rot):
return 'kit-amass-rot'
|
def resolve_cfg_path(cfg: DictConfig):
working_dir = os.getcwd()
cfg.working_dir = working_dir
|
class ActorVae(nn.Module):
def __init__(self, ablation, nfeats: int, latent_dim: list=[1, 256], ff_size: int=1024, num_layers: int=9, num_heads: int=4, dropout: float=0.1, is_vae: bool=True, activation: str='gelu', position_embedding: str='learned', **kwargs) -> None:
super().__init__()
self.latent_size = latent_dim[0]
self.latent_dim = latent_dim[(- 1)]
self.is_vae = is_vae
input_feats = nfeats
output_feats = nfeats
self.encoder = ActorAgnosticEncoder(nfeats=input_feats, vae=True, latent_dim=self.latent_dim, ff_size=ff_size, num_layers=num_layers, num_heads=num_heads, dropout=dropout, activation=activation, **kwargs)
self.decoder = ActorAgnosticDecoder(nfeats=output_feats, vae=True, latent_dim=self.latent_dim, ff_size=ff_size, num_layers=num_layers, num_heads=num_heads, dropout=dropout, activation=activation, **kwargs)
def forward(self, features: Tensor, lengths: Optional[List[int]]=None):
print('Should Not enter here')
(z, dist) = self.encode(features, lengths)
feats_rst = self.decode(z, lengths)
return (feats_rst, z, dist)
def encode(self, features: Tensor, lengths: Optional[List[int]]=None) -> Union[(Tensor, Distribution)]:
dist = self.encoder(features, lengths)
if self.is_vae:
latent = sample_from_distribution(dist)
else:
latent = dist.unsqueeze(0)
return (latent, dist)
def decode(self, z: Tensor, lengths: List[int]):
feats = self.decoder(z, lengths)
return feats
|
class ActorAgnosticEncoder(nn.Module):
def __init__(self, nfeats: int, vae: bool, latent_dim: int=256, ff_size: int=1024, num_layers: int=4, num_heads: int=4, dropout: float=0.1, activation: str='gelu', **kwargs) -> None:
super().__init__()
input_feats = nfeats
self.vae = vae
self.skel_embedding = nn.Linear(input_feats, latent_dim)
if vae:
self.mu_token = nn.Parameter(torch.randn(latent_dim))
self.logvar_token = nn.Parameter(torch.randn(latent_dim))
else:
self.emb_token = nn.Parameter(torch.randn(latent_dim))
self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout)
seq_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation)
self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, num_layers=num_layers)
def forward(self, features: Tensor, lengths: Optional[List[int]]=None) -> Union[(Tensor, Distribution)]:
if (lengths is None):
lengths = [len(feature) for feature in features]
device = features.device
(bs, nframes, nfeats) = features.shape
mask = lengths_to_mask(lengths, device)
x = features
x = self.skel_embedding(x)
x = x.permute(1, 0, 2)
if self.vae:
mu_token = torch.tile(self.mu_token, (bs,)).reshape(bs, (- 1))
logvar_token = torch.tile(self.logvar_token, (bs,)).reshape(bs, (- 1))
xseq = torch.cat((mu_token[None], logvar_token[None], x), 0)
token_mask = torch.ones((bs, 2), dtype=bool, device=x.device)
aug_mask = torch.cat((token_mask, mask), 1)
else:
emb_token = torch.tile(self.emb_token, (bs,)).reshape(bs, (- 1))
xseq = torch.cat((emb_token[None], x), 0)
token_mask = torch.ones((bs, 1), dtype=bool, device=x.device)
aug_mask = torch.cat((token_mask, mask), 1)
xseq = self.sequence_pos_encoding(xseq)
final = self.seqTransEncoder(xseq, src_key_padding_mask=(~ aug_mask))
if self.vae:
(mu, logvar) = (final[0], final[1])
std = logvar.exp().pow(0.5)
dist = torch.distributions.Normal(mu, std)
return dist
else:
return final[0]
|
class ActorAgnosticDecoder(nn.Module):
def __init__(self, nfeats: int, latent_dim: int=256, ff_size: int=1024, num_layers: int=4, num_heads: int=4, dropout: float=0.1, activation: str='gelu', **kwargs) -> None:
super().__init__()
output_feats = nfeats
self.latent_dim = latent_dim
self.nfeats = nfeats
self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout)
seq_trans_decoder_layer = nn.TransformerDecoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation)
self.seqTransDecoder = nn.TransformerDecoder(seq_trans_decoder_layer, num_layers=num_layers)
self.final_layer = nn.Linear(latent_dim, output_feats)
def forward(self, z: Tensor, lengths: List[int]):
mask = lengths_to_mask(lengths, z.device)
(bs, nframes) = mask.shape
nfeats = self.nfeats
time_queries = torch.zeros(nframes, bs, self.latent_dim, device=z.device)
time_queries = self.sequence_pos_encoding(time_queries)
output = self.seqTransDecoder(tgt=time_queries, memory=z, tgt_key_padding_mask=(~ mask))
output = self.final_layer(output)
output[(~ mask.T)] = 0
feats = output.permute(1, 0, 2)
return feats
|
def sample_from_distribution(dist, *, fact=1.0, sample_mean=False) -> Tensor:
if sample_mean:
return dist.loc.unsqueeze(0)
if (fact is None):
return dist.rsample().unsqueeze(0)
eps = (dist.rsample() - dist.loc)
z = (dist.loc + (fact * eps))
z = z.unsqueeze(0)
return z
|
class Encoder_FC(nn.Module):
def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot, latent_dim=256, **kargs):
super().__init__()
self.modeltype = modeltype
self.njoints = njoints
self.nfeats = nfeats
self.num_frames = num_frames
self.num_classes = num_classes
self.translation = translation
self.pose_rep = pose_rep
self.glob = glob
self.glob_rot = glob_rot
self.latent_dim = latent_dim
self.activation = nn.GELU()
self.input_dim = (((self.njoints * self.nfeats) * self.num_frames) + self.num_classes)
self.fully_connected = nn.Sequential(nn.Linear(self.input_dim, 512), nn.GELU(), nn.Linear(512, 256), nn.GELU())
if (self.modeltype == 'cvae'):
self.mu = nn.Linear(256, self.latent_dim)
self.var = nn.Linear(256, self.latent_dim)
else:
self.final = nn.Linear(256, self.latent_dim)
def forward(self, batch):
(x, y) = (batch['x'], batch['y'])
(bs, njoints, feats, nframes) = x.size()
if (((njoints * feats) * nframes) != ((self.njoints * self.nfeats) * self.num_frames)):
raise ValueError('This model is not adapted with this input')
if (len(y.shape) == 1):
y = F.one_hot(y, self.num_classes)
y = y.to(dtype=x.dtype)
x = x.reshape(bs, ((njoints * feats) * nframes))
x = torch.cat((x, y), 1)
x = self.fully_connected(x)
if (self.modeltype == 'cvae'):
return {'mu': self.mu(x), 'logvar': self.var(x)}
else:
return {'z': self.final(x)}
|
class Decoder_FC(nn.Module):
def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot, latent_dim=256, **kargs):
super().__init__()
self.modeltype = modeltype
self.njoints = njoints
self.nfeats = nfeats
self.num_frames = num_frames
self.num_classes = num_classes
self.translation = translation
self.pose_rep = pose_rep
self.glob = glob
self.glob_rot = glob_rot
self.latent_dim = latent_dim
self.input_dim = (self.latent_dim + self.num_classes)
self.output_dim = ((self.njoints * self.nfeats) * self.num_frames)
self.fully_connected = nn.Sequential(nn.Linear(self.input_dim, 256), nn.GELU(), nn.Linear(256, 512), nn.GELU(), nn.Linear(512, self.output_dim), nn.GELU())
def forward(self, batch):
(z, y) = (batch['z'], batch['y'])
if (len(y.shape) == 1):
y = F.one_hot(y, self.num_classes)
y = y.to(dtype=z.dtype)
z = torch.cat((z, y), dim=1)
z = self.fully_connected(z)
(bs, _) = z.size()
z = z.reshape(bs, self.njoints, self.nfeats, self.num_frames)
batch['output'] = z
return batch
|
class GATLayer(nn.Module):
def __init__(self, in_features=768, out_features=768, dropout=0.1, alpha=0.2, concat=True):
super(GATLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.a = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARG0 = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARG1 = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARG2 = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARG3 = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARG4 = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARGM_LOC = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARGM_MNR = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARGM_TMP = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARGM_DIR = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.ARGM_ADV = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.MA = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
self.OTHERS = nn.Parameter(torch.empty(size=((2 * out_features), 1)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
nn.init.xavier_uniform_(self.a, gain=1.414)
nn.init.xavier_uniform_(self.ARG0.data, gain=1.414)
nn.init.xavier_uniform_(self.ARG1.data, gain=1.414)
nn.init.xavier_uniform_(self.ARG2.data, gain=1.414)
nn.init.xavier_uniform_(self.ARG3.data, gain=1.414)
nn.init.xavier_uniform_(self.ARG4.data, gain=1.414)
nn.init.xavier_uniform_(self.ARGM_LOC.data, gain=1.414)
nn.init.xavier_uniform_(self.ARGM_MNR.data, gain=1.414)
nn.init.xavier_uniform_(self.ARGM_TMP.data, gain=1.414)
nn.init.xavier_uniform_(self.ARGM_DIR.data, gain=1.414)
nn.init.xavier_uniform_(self.ARGM_ADV.data, gain=1.414)
nn.init.xavier_uniform_(self.MA.data, gain=1.414)
nn.init.xavier_uniform_(self.OTHERS.data, gain=1.414)
def forward(self, h0, h1, multi_adj, adj):
Wh0 = torch.einsum('bnd,de->bne', [h0, self.W])
Wh1 = torch.einsum('bnd,de->bne', [h1, self.W])
a_input = self._prepare_attentional_mechanism_input(Wh0, Wh1)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
e_ARG0 = self.leakyrelu(torch.matmul(a_input, self.ARG0).squeeze(3))
e_ARG1 = self.leakyrelu(torch.matmul(a_input, self.ARG1).squeeze(3))
e_ARG2 = self.leakyrelu(torch.matmul(a_input, self.ARG2).squeeze(3))
e_ARG3 = self.leakyrelu(torch.matmul(a_input, self.ARG3).squeeze(3))
e_ARG4 = self.leakyrelu(torch.matmul(a_input, self.ARG4).squeeze(3))
e_ARGM_LOC = self.leakyrelu(torch.matmul(a_input, self.ARGM_LOC).squeeze(3))
e_ARGM_MNR = self.leakyrelu(torch.matmul(a_input, self.ARGM_MNR).squeeze(3))
e_ARGM_TMP = self.leakyrelu(torch.matmul(a_input, self.ARGM_TMP).squeeze(3))
e_ARGM_DIR = self.leakyrelu(torch.matmul(a_input, self.ARGM_DIR).squeeze(3))
e_ARGM_ADV = self.leakyrelu(torch.matmul(a_input, self.ARGM_ADV).squeeze(3))
e_MA = self.leakyrelu(torch.matmul(a_input, self.MA).squeeze(3))
e_OTHERS = self.leakyrelu(torch.matmul(a_input, self.OTHERS).squeeze(3))
zero_vec = ((- 9000000000000000.0) * torch.ones_like(e))
attention = torch.where((adj > 0), e, zero_vec)
zero_vec = torch.zeros_like(e_ARG0)
attention_ARG0 = torch.where((multi_adj['ARG0'] > 0), e_ARG0, zero_vec)
attention_ARG1 = torch.where((multi_adj['ARG1'] > 0), e_ARG1, zero_vec)
attention_ARG2 = torch.where((multi_adj['ARG2'] > 0), e_ARG2, zero_vec)
attention_ARG3 = torch.where((multi_adj['ARG3'] > 0), e_ARG3, zero_vec)
attention_ARG4 = torch.where((multi_adj['ARG4'] > 0), e_ARG4, zero_vec)
attention_ARGM_LOC = torch.where((multi_adj['ARGM-LOC'] > 0), e_ARGM_LOC, zero_vec)
attention_ARGM_MNR = torch.where((multi_adj['ARGM-MNR'] > 0), e_ARGM_MNR, zero_vec)
attention_ARGM_TMP = torch.where((multi_adj['ARGM-TMP'] > 0), e_ARGM_TMP, zero_vec)
attention_ARGM_DIR = torch.where((multi_adj['ARGM-DIR'] > 0), e_ARGM_DIR, zero_vec)
attention_ARGM_ADV = torch.where((multi_adj['ARGM-ADV'] > 0), e_ARGM_ADV, zero_vec)
attention_OTHERS = torch.where((multi_adj['OTHERS'] > 0), e_OTHERS, zero_vec)
attention_MA = torch.where((multi_adj['MA'] > 0), e_MA, zero_vec)
attention = F.softmax((attention + (0.01 * (((((((((((attention_ARG0 + attention_ARG1) + attention_ARG2) + attention_ARG3) + attention_ARG4) + attention_ARGM_LOC) + attention_ARGM_MNR) + attention_ARGM_TMP) + attention_ARGM_DIR) + attention_ARGM_ADV) + attention_OTHERS) + attention_MA))), dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh1)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh0, Wh1):
(N0, N1) = (Wh0.size()[1], Wh1.size()[1])
Wh0_repeated_in_chunks = Wh0.repeat_interleave(N1, dim=1)
Wh1_repeated_alternating = Wh1.repeat(1, N0, 1)
all_combinations_matrix = torch.cat([Wh0_repeated_in_chunks, Wh1_repeated_alternating], dim=(- 1))
return all_combinations_matrix.view((- 1), N0, N1, (2 * self.out_features))
|
class MotionDiscriminator(nn.Module):
def __init__(self, input_size, hidden_size, hidden_layer, output_size=12, use_noise=None):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.hidden_layer = hidden_layer
self.use_noise = use_noise
self.recurrent = nn.GRU(input_size, hidden_size, hidden_layer)
self.linear1 = nn.Linear(hidden_size, 30)
self.linear2 = nn.Linear(30, output_size)
def forward(self, motion_sequence, lengths=None, hidden_unit=None):
(bs, njoints, nfeats, num_frames) = motion_sequence.shape
motion_sequence = motion_sequence.reshape(bs, (njoints * nfeats), num_frames)
motion_sequence = motion_sequence.permute(2, 0, 1)
if (hidden_unit is None):
hidden_unit = self.initHidden(motion_sequence.size(1), self.hidden_layer).to(motion_sequence.device)
(gru_o, _) = self.recurrent(motion_sequence.float(), hidden_unit)
out = gru_o[tuple(torch.stack(((lengths - 1), torch.arange(bs, device=motion_sequence.device))))]
lin1 = self.linear1(out)
lin1 = torch.tanh(lin1)
lin2 = self.linear2(lin1)
return lin2
def initHidden(self, num_samples, layer):
return torch.randn(layer, num_samples, self.hidden_size, requires_grad=False)
|
class MotionDiscriminatorForFID(MotionDiscriminator):
def forward(self, motion_sequence, lengths=None, hidden_unit=None):
(bs, njoints, nfeats, num_frames) = motion_sequence.shape
motion_sequence = motion_sequence.reshape(bs, (njoints * nfeats), num_frames)
motion_sequence = motion_sequence.permute(2, 0, 1)
if (hidden_unit is None):
hidden_unit = self.initHidden(motion_sequence.size(1), self.hidden_layer).to(motion_sequence.device)
(gru_o, _) = self.recurrent(motion_sequence.float(), hidden_unit)
out = gru_o[tuple(torch.stack(((lengths - 1), torch.arange(bs, device=motion_sequence.device))))]
lin1 = self.linear1(out)
lin1 = torch.tanh(lin1)
return lin1
|
class MLDTextEncoder(nn.Module):
def __init__(self, cfg, modelpath: str, finetune: bool=False, vae: bool=True, latent_dim: int=256, ff_size: int=1024, num_layers: int=6, num_heads: int=4, dropout: float=0.1, activation: str='gelu', **kwargs) -> None:
super().__init__()
from transformers import AutoTokenizer, AutoModel
from transformers import logging
logging.set_verbosity_error()
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
self.tokenizer = AutoTokenizer.from_pretrained(modelpath)
self.text_model = AutoModel.from_pretrained(modelpath)
if (not finetune):
self.text_model.training = False
for p in self.text_model.parameters():
p.requires_grad = False
self.text_encoded_dim = self.text_model.config.hidden_size
self.text_encoded_dim = latent_dim
encoded_dim = self.text_model.config.hidden_size
self.projection = nn.Sequential(nn.ReLU(), nn.Linear(encoded_dim, latent_dim))
vae = False
if vae:
self.mu_token = nn.Parameter(torch.randn(latent_dim))
self.logvar_token = nn.Parameter(torch.randn(latent_dim))
else:
self.global_text_token = nn.Parameter(torch.randn(latent_dim))
self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout)
seq_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation)
self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, num_layers=num_layers)
if self.is_action_branch:
action_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation)
self.actionTransEncoder = nn.TransformerEncoder(action_trans_encoder_layer, num_layers=num_layers)
self.mean_token = nn.Parameter(torch.randn(latent_dim))
self.std_token = nn.Parameter(torch.randn(latent_dim))
def global_branch(self, x, mask):
bs = x.shape[0]
x = x.permute(1, 0, 2)
global_tokens = torch.tile(self.global_text_token, (bs,)).reshape(bs, (- 1))
if self.is_cross_token:
mean_tokens = torch.tile(self.mean_token, (bs,)).reshape(bs, (- 1))
std_tokens = torch.tile(self.std_token, (bs,)).reshape(bs, (- 1))
xseq = torch.cat((mean_tokens[None], std_tokens[None], global_tokens[None], x), 0)
token_mask = torch.ones((bs, 3), dtype=bool, device=x.device)
aug_mask = torch.cat((token_mask, mask), 1)
else:
xseq = torch.cat((global_tokens[None], x), 0)
token_mask = torch.ones((bs, 1), dtype=bool, device=x.device)
aug_mask = torch.cat((token_mask, mask), 1)
xseq = self.sequence_pos_encoding(xseq)
text_tokens = self.seqTransEncoder(xseq, src_key_padding_mask=(~ aug_mask))
return text_tokens
def action_branch(self, x, mask):
bs = x.shape[0]
mean_tokens = torch.tile(self.mean_token, (bs,)).reshape(bs, (- 1))
std_tokens = torch.tile(self.std_token, (bs,)).reshape(bs, (- 1))
actionSeq = torch.cat((mean_tokens[None], std_tokens[None], x), 0)
token_mask = torch.ones((bs, 2), dtype=bool, device=x.device)
aug_mask = torch.cat((token_mask, mask), 1)
actionSeq = self.sequence_pos_encoding(actionSeq)
action_tokens = self.actionTransEncoder(actionSeq, src_key_padding_mask=(~ aug_mask))
return action_tokens[0:2]
def forward(self, texts: List[str]):
(text_encoded, mask) = self.get_last_hidden_state(texts, return_mask=True)
text_emb = self.projection(text_encoded)
return text_emb
def get_last_hidden_state(self, texts: List[str], return_mask: bool=False):
encoded_inputs = self.tokenizer(texts, return_tensors='pt', padding=True)
output = self.text_model(**encoded_inputs.to(self.text_model.device))
if (not return_mask):
return output.last_hidden_state
return (output.last_hidden_state, encoded_inputs.attention_mask.to(dtype=bool))
|
class MovementConvEncoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(MovementConvEncoder, self).__init__()
self.main = nn.Sequential(nn.Conv1d(input_size, hidden_size, 4, 2, 1), nn.Dropout(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv1d(hidden_size, output_size, 4, 2, 1), nn.Dropout(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True))
self.out_net = nn.Linear(output_size, output_size)
def forward(self, inputs):
inputs = inputs.permute(0, 2, 1)
outputs = self.main(inputs).permute(0, 2, 1)
return self.out_net(outputs)
|
class MotionEncoderBiGRUCo(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(MotionEncoderBiGRUCo, self).__init__()
self.input_emb = nn.Linear(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
self.output_net = nn.Sequential(nn.Linear((hidden_size * 2), hidden_size), nn.LayerNorm(hidden_size), nn.LeakyReLU(0.2, inplace=True), nn.Linear(hidden_size, output_size))
self.hidden_size = hidden_size
self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
def forward(self, inputs, m_lens):
num_samples = inputs.shape[0]
input_embs = self.input_emb(inputs)
hidden = self.hidden.repeat(1, num_samples, 1)
cap_lens = m_lens.data.tolist()
emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
(gru_seq, gru_last) = self.gru(emb, hidden)
gru_last = torch.cat([gru_last[0], gru_last[1]], dim=(- 1))
return self.output_net(gru_last)
|
class TextEncoderBiGRUCo(nn.Module):
def __init__(self, word_size, pos_size, hidden_size, output_size):
super(TextEncoderBiGRUCo, self).__init__()
self.pos_emb = nn.Linear(pos_size, word_size)
self.input_emb = nn.Linear(word_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
self.output_net = nn.Sequential(nn.Linear((hidden_size * 2), hidden_size), nn.LayerNorm(hidden_size), nn.LeakyReLU(0.2, inplace=True), nn.Linear(hidden_size, output_size))
self.hidden_size = hidden_size
self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
def forward(self, word_embs, pos_onehot, cap_lens):
num_samples = word_embs.shape[0]
pos_embs = self.pos_emb(pos_onehot)
inputs = (word_embs + pos_embs)
input_embs = self.input_emb(inputs)
hidden = self.hidden.repeat(1, num_samples, 1)
cap_lens = cap_lens.data.tolist()
emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
(gru_seq, gru_last) = self.gru(emb, hidden)
gru_last = torch.cat([gru_last[0], gru_last[1]], dim=(- 1))
return self.output_net(gru_last)
|
class STGCN(nn.Module):
'Spatial temporal graph convolutional networks.\n Args:\n in_channels (int): Number of channels in the input data\n num_class (int): Number of classes for the classification task\n graph_args (dict): The arguments for building the graph\n edge_importance_weighting (bool): If ``True``, adds a learnable\n importance weighting to the edges of the graph\n **kwargs (optional): Other parameters for graph convolution units\n Shape:\n - Input: :math:`(N, in_channels, T_{in}, V_{in}, M_{in})`\n - Output: :math:`(N, num_class)` where\n :math:`N` is a batch size,\n :math:`T_{in}` is a length of input sequence,\n :math:`V_{in}` is the number of graph nodes,\n :math:`M_{in}` is the number of instance in a frame.\n '
def __init__(self, in_channels, num_class, kintree_path, graph_args, edge_importance_weighting, **kwargs):
super().__init__()
self.num_class = num_class
self.losses = ['accuracy', 'cross_entropy', 'mixed']
self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
self.graph = Graph(kintree_path=kintree_path, **graph_args)
A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False)
self.register_buffer('A', A)
spatial_kernel_size = A.size(0)
temporal_kernel_size = 9
kernel_size = (temporal_kernel_size, spatial_kernel_size)
self.data_bn = nn.BatchNorm1d((in_channels * A.size(1)))
kwargs0 = {k: v for (k, v) in kwargs.items() if (k != 'dropout')}
self.st_gcn_networks = nn.ModuleList((st_gcn(in_channels, 64, kernel_size, 1, residual=False, **kwargs0), st_gcn(64, 64, kernel_size, 1, **kwargs), st_gcn(64, 64, kernel_size, 1, **kwargs), st_gcn(64, 64, kernel_size, 1, **kwargs), st_gcn(64, 128, kernel_size, 2, **kwargs), st_gcn(128, 128, kernel_size, 1, **kwargs), st_gcn(128, 128, kernel_size, 1, **kwargs), st_gcn(128, 256, kernel_size, 2, **kwargs), st_gcn(256, 256, kernel_size, 1, **kwargs), st_gcn(256, 256, kernel_size, 1, **kwargs)))
if edge_importance_weighting:
self.edge_importance = nn.ParameterList([nn.Parameter(torch.ones(self.A.size())) for i in self.st_gcn_networks])
else:
self.edge_importance = ([1] * len(self.st_gcn_networks))
self.fcn = nn.Conv2d(256, num_class, kernel_size=1)
def forward(self, motion):
batch = {'output': motion}
x = batch['output'].permute(0, 2, 3, 1).unsqueeze(4).contiguous()
(N, C, T, V, M) = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view((N * M), (V * C), T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view((N * M), C, T, V)
for (gcn, importance) in zip(self.st_gcn_networks, self.edge_importance):
(x, _) = gcn(x, (self.A * importance))
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(N, M, (- 1), 1, 1).mean(dim=1)
batch['features'] = x.squeeze()
x = self.fcn(x)
x = x.view(x.size(0), (- 1))
batch['yhat'] = x
return batch
def compute_accuracy(self, batch):
confusion = torch.zeros(self.num_class, self.num_class, dtype=int)
yhat = batch['yhat'].max(dim=1).indices
ygt = batch['y']
for (label, pred) in zip(ygt, yhat):
confusion[label][pred] += 1
accuracy = (torch.trace(confusion) / torch.sum(confusion))
return accuracy
def compute_loss(self, batch):
cross_entropy = self.criterion(batch['yhat'], batch['y'])
mixed_loss = cross_entropy
acc = self.compute_accuracy(batch)
losses = {'cross_entropy': cross_entropy.item(), 'mixed': mixed_loss.item(), 'accuracy': acc.item()}
return (mixed_loss, losses)
|
class st_gcn(nn.Module):
'Applies a spatial temporal graph convolution over an input graph sequence.\n Args:\n in_channels (int): Number of channels in the input sequence data\n out_channels (int): Number of channels produced by the convolution\n kernel_size (tuple): Size of the temporal convolving kernel and graph convolving kernel\n stride (int, optional): Stride of the temporal convolution. Default: 1\n dropout (int, optional): Dropout rate of the final output. Default: 0\n residual (bool, optional): If ``True``, applies a residual mechanism. Default: ``True``\n Shape:\n - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format\n - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes.\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dropout=0, residual=True):
super().__init__()
assert (len(kernel_size) == 2)
assert ((kernel_size[0] % 2) == 1)
padding = (((kernel_size[0] - 1) // 2), 0)
self.gcn = ConvTemporalGraphical(in_channels, out_channels, kernel_size[1])
self.tcn = nn.Sequential(nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, (kernel_size[0], 1), (stride, 1), padding), nn.BatchNorm2d(out_channels), nn.Dropout(dropout, inplace=True))
if (not residual):
self.residual = (lambda x: 0)
elif ((in_channels == out_channels) and (stride == 1)):
self.residual = (lambda x: x)
else:
self.residual = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=(stride, 1)), nn.BatchNorm2d(out_channels))
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A):
res = self.residual(x)
(x, A) = self.gcn(x, A)
x = (self.tcn(x) + res)
return (self.relu(x), A)
|
class Graph():
" The Graph to model the skeletons extracted by the openpose\n Args:\n strategy (string): must be one of the follow candidates\n - uniform: Uniform Labeling\n - distance: Distance Partitioning\n - spatial: Spatial Configuration\n For more information, please refer to the section 'Partition Strategies'\n in our paper (https://arxiv.org/abs/1801.07455).\n layout (string): must be one of the follow candidates\n - openpose: Is consists of 18 joints. For more information, please\n refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose#output\n - ntu-rgb+d: Is consists of 25 joints. For more information, please\n refer to https://github.com/shahroudy/NTURGB-D\n - smpl: Consists of 24/23 joints with without global rotation.\n max_hop (int): the maximal distance between two connected nodes\n dilation (int): controls the spacing between the kernel points\n "
def __init__(self, kintree_path, layout='openpose', strategy='uniform', max_hop=1, dilation=1):
self.max_hop = max_hop
self.dilation = dilation
self.kintree_path = kintree_path
self.get_edge(layout)
self.hop_dis = get_hop_distance(self.num_node, self.edge, max_hop=max_hop)
self.get_adjacency(strategy)
def __str__(self):
return self.A
def get_edge(self, layout):
if (layout == 'openpose'):
self.num_node = 18
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12, 11), (10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1), (0, 1), (15, 0), (14, 0), (17, 15), (16, 14)]
self.edge = (self_link + neighbor_link)
self.center = 1
elif (layout == 'smpl'):
self.num_node = 24
self_link = [(i, i) for i in range(self.num_node)]
kt = pkl.load(open(self.kintree_path, 'rb'))
neighbor_link = [(k, kt[1][(i + 1)]) for (i, k) in enumerate(kt[0][1:])]
self.edge = (self_link + neighbor_link)
self.center = 0
elif (layout == 'smpl_noglobal'):
self.num_node = 23
self_link = [(i, i) for i in range(self.num_node)]
kt = pkl.load(open(self.kintree_path, 'rb'))
neighbor_link = [(k, kt[1][(i + 1)]) for (i, k) in enumerate(kt[0][1:])]
neighbor_1base = [n for n in neighbor_link if ((n[0] != 0) and (n[1] != 0))]
neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base]
self.edge = (self_link + neighbor_link)
self.center = 0
elif (layout == 'ntu-rgb+d'):
self.num_node = 25
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (23, 8), (24, 25), (25, 12)]
neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base]
self.edge = (self_link + neighbor_link)
self.center = (21 - 1)
elif (layout == 'ntu_edge'):
self.num_node = 24
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6), (8, 7), (9, 2), (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (21, 22), (22, 8), (23, 24), (24, 12)]
neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base]
self.edge = (self_link + neighbor_link)
self.center = 2
else:
raise NotImplementedError('This Layout is not supported')
def get_adjacency(self, strategy):
valid_hop = range(0, (self.max_hop + 1), self.dilation)
adjacency = np.zeros((self.num_node, self.num_node))
for hop in valid_hop:
adjacency[(self.hop_dis == hop)] = 1
normalize_adjacency = normalize_digraph(adjacency)
if (strategy == 'uniform'):
A = np.zeros((1, self.num_node, self.num_node))
A[0] = normalize_adjacency
self.A = A
elif (strategy == 'distance'):
A = np.zeros((len(valid_hop), self.num_node, self.num_node))
for (i, hop) in enumerate(valid_hop):
A[i][(self.hop_dis == hop)] = normalize_adjacency[(self.hop_dis == hop)]
self.A = A
elif (strategy == 'spatial'):
A = []
for hop in valid_hop:
a_root = np.zeros((self.num_node, self.num_node))
a_close = np.zeros((self.num_node, self.num_node))
a_further = np.zeros((self.num_node, self.num_node))
for i in range(self.num_node):
for j in range(self.num_node):
if (self.hop_dis[(j, i)] == hop):
if (self.hop_dis[(j, self.center)] == self.hop_dis[(i, self.center)]):
a_root[(j, i)] = normalize_adjacency[(j, i)]
elif (self.hop_dis[(j, self.center)] > self.hop_dis[(i, self.center)]):
a_close[(j, i)] = normalize_adjacency[(j, i)]
else:
a_further[(j, i)] = normalize_adjacency[(j, i)]
if (hop == 0):
A.append(a_root)
else:
A.append((a_root + a_close))
A.append(a_further)
A = np.stack(A)
self.A = A
else:
raise NotImplementedError('This Strategy is not supported')
|
class ConvTemporalGraphical(nn.Module):
'The basic module for applying a graph convolution.\n Args:\n in_channels (int): Number of channels in the input sequence data\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int): Size of the graph convolving kernel\n t_kernel_size (int): Size of the temporal convolving kernel\n t_stride (int, optional): Stride of the temporal convolution. Default: 1\n t_padding (int, optional): Temporal zero-padding added to both sides of\n the input. Default: 0\n t_dilation (int, optional): Spacing between temporal kernel elements.\n Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output.\n Default: ``True``\n Shape:\n - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format\n - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes.\n '
def __init__(self, in_channels, out_channels, kernel_size, t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(in_channels, (out_channels * kernel_size), kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1), dilation=(t_dilation, 1), bias=bias)
def forward(self, x, A):
assert (A.size(0) == self.kernel_size)
x = self.conv(x)
(n, kc, t, v) = x.size()
x = x.view(n, self.kernel_size, (kc // self.kernel_size), t, v)
x = torch.einsum('nkctv,kvw->nctw', (x, A))
return (x.contiguous(), A)
|
def get_hop_distance(num_node, edge, max_hop=1):
A = np.zeros((num_node, num_node))
for (i, j) in edge:
A[(j, i)] = 1
A[(i, j)] = 1
hop_dis = (np.zeros((num_node, num_node)) + np.inf)
transfer_mat = [np.linalg.matrix_power(A, d) for d in range((max_hop + 1))]
arrive_mat = (np.stack(transfer_mat) > 0)
for d in range(max_hop, (- 1), (- 1)):
hop_dis[arrive_mat[d]] = d
return hop_dis
|
def normalize_digraph(A):
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if (Dl[i] > 0):
Dn[(i, i)] = (Dl[i] ** (- 1))
AD = np.dot(A, Dn)
return AD
|
def normalize_undigraph(A):
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if (Dl[i] > 0):
Dn[(i, i)] = (Dl[i] ** (- 0.5))
DAD = np.dot(np.dot(Dn, A), Dn)
return DAD
|
class VPosert(nn.Module):
def __init__(self, cfg, **kwargs) -> None:
super(VPosert, self).__init__()
num_neurons = 512
self.latentD = 256
n_features = (196 * 263)
self.encoder_net = nn.Sequential(BatchFlatten(), nn.BatchNorm1d(n_features), nn.Linear(n_features, num_neurons), nn.LeakyReLU(), nn.BatchNorm1d(num_neurons), nn.Dropout(0.1), nn.Linear(num_neurons, num_neurons), nn.Linear(num_neurons, num_neurons), NormalDistDecoder(num_neurons, self.latentD))
self.decoder_net = nn.Sequential(nn.Linear(self.latentD, num_neurons), nn.LeakyReLU(), nn.Dropout(0.1), nn.Linear(num_neurons, num_neurons), nn.LeakyReLU(), nn.Linear(num_neurons, n_features), ContinousRotReprDecoder())
def forward(self, features: Tensor, lengths: Optional[List[int]]=None):
q_z = self.encode(features)
feats_rst = self.decode(q_z)
return (feats_rst, q_z)
def encode(self, pose_body, lengths: Optional[List[int]]=None):
"\n :param Pin: Nx(numjoints*3)\n :param rep_type: 'matrot'/'aa' for matrix rotations or axis-angle\n :return:\n "
q_z = self.encoder_net(pose_body)
q_z_sample = q_z.rsample()
return (q_z_sample.unsqueeze(0), q_z)
def decode(self, Zin, lengths: Optional[List[int]]=None):
bs = Zin.shape[0]
Zin = Zin[0]
prec = self.decoder_net(Zin)
return prec
|
class BatchFlatten(nn.Module):
def __init__(self):
super(BatchFlatten, self).__init__()
self._name = 'batch_flatten'
def forward(self, x):
return x.view(x.shape[0], (- 1))
|
class ContinousRotReprDecoder(nn.Module):
def __init__(self):
super(ContinousRotReprDecoder, self).__init__()
def forward(self, module_input):
reshaped_input = module_input.view((- 1), 196, 263)
return reshaped_input
|
class NormalDistDecoder(nn.Module):
def __init__(self, num_feat_in, latentD):
super(NormalDistDecoder, self).__init__()
self.mu = nn.Linear(num_feat_in, latentD)
self.logvar = nn.Linear(num_feat_in, latentD)
def forward(self, Xout):
return torch.distributions.normal.Normal(self.mu(Xout), F.softplus(self.logvar(Xout)))
|
def get_model(cfg, datamodule, phase='train'):
modeltype = cfg.model.model_type
if (modeltype == 'mld'):
return get_module(cfg, datamodule)
else:
raise ValueError(f'Invalid model type {modeltype}.')
|
def get_module(cfg, datamodule):
modeltype = cfg.model.model_type
model_module = importlib.import_module(f'.modeltype.{cfg.model.model_type}', package='mld.models')
Model = model_module.__getattribute__(f'{modeltype.upper()}')
return Model(cfg=cfg, datamodule=datamodule)
|
class ACTORLosses(Metric):
'\n Loss\n Modify loss\n \n '
def __init__(self, vae, mode, cfg):
super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP)
self.vae = vae
self.mode = mode
losses = []
losses.append('recons_feature')
losses.append('recons_verts')
losses.append('recons_joints')
losses.append('recons_limb')
losses.append('latent_st2sm')
losses.append('kl_motion')
losses.append('total')
for loss in losses:
self.register_buffer(loss, torch.tensor(0.0))
self.register_buffer('count', torch.tensor(0))
self.losses = losses
self._losses_func = {}
self._params = {}
for loss in losses:
if (loss != 'total'):
if (loss.split('_')[0] == 'kl'):
self._losses_func[loss] = KLLoss()
self._params[loss] = cfg.LOSS.LAMBDA_KL
elif (loss.split('_')[0] == 'recons'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_REC
elif (loss.split('_')[0] == 'cross'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_CROSS
elif (loss.split('_')[0] == 'latent'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_LATENT
elif (loss.split('_')[0] == 'cycle'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_CYCLE
else:
ValueError('This loss is not recognized.')
def update(self, rs_set, dist_ref):
total: float = 0.0
total += self._update_loss('recons_feature', rs_set['m_rst'], rs_set['m_ref'])
total += self._update_loss('kl_motion', rs_set['dist_m'], dist_ref)
self.total += total.detach()
self.count += 1
return total
def compute(self, split):
count = getattr(self, 'count')
return {loss: (getattr(self, loss) / count) for loss in self.losses}
def _update_loss(self, loss: str, outputs, inputs):
val = self._losses_func[loss](outputs, inputs)
getattr(self, loss).__iadd__(val.detach())
weighted_loss = (self._params[loss] * val)
return weighted_loss
def loss2logname(self, loss: str, split: str):
if (loss == 'total'):
log_name = f'{loss}/{split}'
else:
(loss_type, name) = loss.split('_')
log_name = f'{loss_type}/{name}/{split}'
return log_name
|
class KLLoss():
def __init__(self):
pass
def __call__(self, q, p):
div = torch.distributions.kl_divergence(q, p)
return div.mean()
def __repr__(self):
return 'KLLoss()'
|
class KLLossMulti():
def __init__(self):
self.klloss = KLLoss()
def __call__(self, qlist, plist):
return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)])
def __repr__(self):
return 'KLLossMulti()'
|
class KLLoss():
def __init__(self):
pass
def __call__(self, q, p):
div = torch.distributions.kl_divergence(q, p)
return div.mean()
def __repr__(self):
return 'KLLoss()'
|
class KLLossMulti():
def __init__(self):
self.klloss = KLLoss()
def __call__(self, qlist, plist):
return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)])
def __repr__(self):
return 'KLLossMulti()'
|
class MLDLosses(Metric):
'\n MLD Loss\n '
def __init__(self, vae, mode, cfg):
super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP)
self.vae_type = cfg.TRAIN.ABLATION.VAE_TYPE
self.mode = mode
self.cfg = cfg
self.predict_epsilon = cfg.TRAIN.ABLATION.PREDICT_EPSILON
self.stage = cfg.TRAIN.STAGE
losses = []
if (self.stage in ['diffusion', 'vae_diffusion']):
losses.append('inst_loss')
losses.append('x_loss')
if (self.cfg.LOSS.LAMBDA_PRIOR != 0.0):
losses.append('prior_loss')
if (self.stage in ['vae', 'vae_diffusion']):
losses.append('recons_feature')
losses.append('recons_verts')
losses.append('recons_joints')
losses.append('recons_limb')
losses.append('gen_feature')
losses.append('gen_joints')
losses.append('kl_motion')
if (self.stage not in ['vae', 'diffusion', 'vae_diffusion']):
raise ValueError(f'Stage {self.stage} not supported')
losses.append('total')
for loss in losses:
self.add_state(loss, default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('count', torch.tensor(0), dist_reduce_fx='sum')
self.losses = losses
self._losses_func = {}
self._params = {}
for loss in losses:
if (loss.split('_')[0] == 'inst'):
self._losses_func[loss] = nn.MSELoss(reduction='mean')
self._params[loss] = 1
elif (loss.split('_')[0] == 'x'):
self._losses_func[loss] = nn.MSELoss(reduction='mean')
self._params[loss] = 1
elif (loss.split('_')[0] == 'prior'):
self._losses_func[loss] = nn.MSELoss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_PRIOR
if (loss.split('_')[0] == 'kl'):
if (cfg.LOSS.LAMBDA_KL != 0.0):
self._losses_func[loss] = KLLoss()
self._params[loss] = cfg.LOSS.LAMBDA_KL
elif (loss.split('_')[0] == 'recons'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_REC
elif (loss.split('_')[0] == 'gen'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_GEN
elif (loss.split('_')[0] == 'latent'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_LATENT
else:
ValueError('This loss is not recognized.')
if (loss.split('_')[(- 1)] == 'joints'):
self._params[loss] = cfg.LOSS.LAMBDA_JOINT
def update(self, rs_set):
total: float = 0.0
if (self.stage in ['vae', 'vae_diffusion']):
total += self._update_loss('recons_feature', rs_set['m_rst'], rs_set['m_ref'])
total += self._update_loss('recons_joints', rs_set['joints_rst'], rs_set['joints_ref'])
total += self._update_loss('kl_motion', rs_set['dist_m'], rs_set['dist_ref'])
if (self.stage in ['diffusion', 'vae_diffusion']):
if self.predict_epsilon:
total += self._update_loss('inst_loss', rs_set['noise_pred_1'], rs_set['noise_1'])
total += self._update_loss('inst_loss', rs_set['noise_pred_2'], rs_set['noise_2'])
total += self._update_loss('inst_loss', rs_set['noise_pred_3'], rs_set['noise_3'])
else:
total += self._update_loss('x_loss', rs_set['pred'], rs_set['latent'])
if (self.cfg.LOSS.LAMBDA_PRIOR != 0.0):
total += self._update_loss('prior_loss', rs_set['noise_prior'], rs_set['dist_m1'])
if (self.stage in ['vae_diffusion']):
total += self._update_loss('gen_feature', rs_set['gen_m_rst'], rs_set['m_ref'])
total += self._update_loss('gen_joints', rs_set['gen_joints_rst'], rs_set['joints_ref'])
self.total += total.detach()
self.count += 1
return total
def compute(self, split):
count = getattr(self, 'count')
return {loss: (getattr(self, loss) / count) for loss in self.losses}
def _update_loss(self, loss: str, outputs, inputs):
val = self._losses_func[loss](outputs, inputs)
getattr(self, loss).__iadd__(val.detach())
weighted_loss = (self._params[loss] * val)
return weighted_loss
def loss2logname(self, loss: str, split: str):
if (loss == 'total'):
log_name = f'{loss}/{split}'
else:
(loss_type, name) = loss.split('_')
log_name = f'{loss_type}/{name}/{split}'
return log_name
|
class KLLoss():
def __init__(self):
pass
def __call__(self, q, p):
div = torch.distributions.kl_divergence(q, p)
return div.mean()
def __repr__(self):
return 'KLLoss()'
|
class KLLossMulti():
def __init__(self):
self.klloss = KLLoss()
def __call__(self, qlist, plist):
return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)])
def __repr__(self):
return 'KLLossMulti()'
|
class TemosLosses(Metric):
"\n Loss\n Modify loss\n refer to temos loss\n add loss like deep-motion-editing\n 'gen_loss_total': l_total,\n 'gen_loss_adv': l_adv,\n 'gen_loss_recon_all': l_rec,\n 'gen_loss_recon_r': l_r_rec,\n 'gen_loss_recon_s': l_s_rec,\n 'gen_loss_feature_all': l_ft,\n 'gen_loss_feature_r': l_ft_r,\n 'gen_loss_feature_s': l_ft_s,\n 'gen_loss_feature_t': l_ft_t,\n 'gen_loss_quaternion': l_qt,\n 'gen_loss_twist': l_tw,\n 'gen_loss_triplet': l_triplet,\n 'gen_loss_joint': l_joint,\n \n "
def __init__(self, vae, mode, cfg):
super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP)
self.vae = vae
self.mode = mode
loss_on_both = False
force_loss_on_jfeats = True
ablation_no_kl_combine = False
ablation_no_kl_gaussian = False
ablation_no_motionencoder = False
self.loss_on_both = loss_on_both
self.ablation_no_kl_combine = ablation_no_kl_combine
self.ablation_no_kl_gaussian = ablation_no_kl_gaussian
self.ablation_no_motionencoder = ablation_no_motionencoder
losses = []
if ((mode == 'xyz') or force_loss_on_jfeats):
if (not ablation_no_motionencoder):
losses.append('recons_jfeats2jfeats')
losses.append('recons_text2jfeats')
if (mode == 'smpl'):
if (not ablation_no_motionencoder):
losses.append('recons_rfeats2rfeats')
losses.append('recons_text2rfeats')
else:
ValueError('This mode is not recognized.')
if (vae or loss_on_both):
kl_losses = []
if ((not ablation_no_kl_combine) and (not ablation_no_motionencoder)):
kl_losses.extend(['kl_text2motion', 'kl_motion2text'])
if (not ablation_no_kl_gaussian):
if ablation_no_motionencoder:
kl_losses.extend(['kl_text'])
else:
kl_losses.extend(['kl_text', 'kl_motion'])
losses.extend(kl_losses)
if ((not self.vae) or loss_on_both):
if (not ablation_no_motionencoder):
losses.append('latent_manifold')
losses.append('total')
for loss in losses:
self.register_buffer(loss, torch.tensor(0.0))
self.register_buffer('count', torch.tensor(0))
self.losses = losses
self._losses_func = {}
self._params = {}
for loss in losses:
if (loss != 'total'):
if (loss.split('_')[0] == 'kl'):
self._losses_func[loss] = KLLoss()
self._params[loss] = cfg.LOSS.LAMBDA_KL
elif (loss.split('_')[0] == 'recons'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_REC
elif (loss.split('_')[0] == 'latent'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_LATENT
elif (loss.split('_')[0] == 'cycle'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_CYCLE
else:
ValueError('This loss is not recognized.')
def update(self, f_text=None, f_motion=None, f_ref=None, lat_text=None, lat_motion=None, dis_text=None, dis_motion=None, dis_ref=None):
total: float = 0.0
if ((self.mode == 'xyz') or self.force_loss_on_jfeats):
if (not self.ablation_no_motionencoder):
total += self._update_loss('recons_jfeats2jfeats', f_motion, f_ref)
total += self._update_loss('recons_text2jfeats', f_text, f_ref)
if (self.mode == 'smpl'):
if (not self.ablation_no_motionencoder):
total += self._update_loss('recons_rfeats2rfeats', f_motion.rfeats, f_ref.rfeats)
total += self._update_loss('recons_text2rfeats', f_text.rfeats, f_ref.rfeats)
if (self.vae or self.loss_on_both):
if ((not self.ablation_no_kl_combine) and (not self.ablation_no_motionencoder)):
total += self._update_loss('kl_text2motion', dis_text, dis_motion)
total += self._update_loss('kl_motion2text', dis_motion, dis_text)
if (not self.ablation_no_kl_gaussian):
total += self._update_loss('kl_text', dis_text, dis_ref)
if (not self.ablation_no_motionencoder):
total += self._update_loss('kl_motion', dis_motion, dis_ref)
if ((not self.vae) or self.loss_on_both):
if (not self.ablation_no_motionencoder):
total += self._update_loss('latent_manifold', lat_text, lat_motion)
self.total += total.detach()
self.count += 1
return total
def compute(self, split):
count = getattr(self, 'count')
return {loss: (getattr(self, loss) / count) for loss in self.losses}
def _update_loss(self, loss: str, outputs, inputs):
val = self._losses_func[loss](outputs, inputs)
getattr(self, loss).__iadd__(val.detach())
weighted_loss = (self._params[loss] * val)
return weighted_loss
def loss2logname(self, loss: str, split: str):
if (loss == 'total'):
log_name = f'{loss}/{split}'
else:
(loss_type, name) = loss.split('_')
log_name = f'{loss_type}/{name}/{split}'
return log_name
|
class KLLoss():
def __init__(self):
pass
def __call__(self, q, p):
div = torch.distributions.kl_divergence(q, p)
return div.mean()
def __repr__(self):
return 'KLLoss()'
|
class KLLossMulti():
def __init__(self):
self.klloss = KLLoss()
def __call__(self, qlist, plist):
return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)])
def __repr__(self):
return 'KLLossMulti()'
|
class TmostLosses(Metric):
"\n Loss\n Modify loss\n refer to temos loss\n add loss like deep-motion-editing\n 'gen_loss_total': l_total,\n 'gen_loss_adv': l_adv,\n 'gen_loss_recon_all': l_rec,\n 'gen_loss_recon_r': l_r_rec,\n 'gen_loss_recon_s': l_s_rec,\n 'gen_loss_feature_all': l_ft,\n 'gen_loss_feature_r': l_ft_r,\n 'gen_loss_feature_s': l_ft_s,\n 'gen_loss_feature_t': l_ft_t,\n 'gen_loss_quaternion': l_qt,\n 'gen_loss_twist': l_tw,\n 'gen_loss_triplet': l_triplet,\n 'gen_loss_joint': l_joint,\n \n "
def __init__(self, vae, mode, cfg):
super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP)
self.vae = vae
self.mode = mode
losses = []
losses.append('recons_mm2m')
losses.append('recons_t2m')
losses.append('cross_mt2m')
losses.append('cross_tm2m')
losses.append('cycle_cmsm2mContent')
losses.append('cycle_cmsm2mStyle')
losses.append('latent_ct2cm')
losses.append('latent_st2sm')
losses.append('kl_motion')
losses.append('kl_text')
losses.append('kl_ct2cm')
losses.append('kl_cm2ct')
losses.append('total')
for loss in losses:
self.register_buffer(loss, torch.tensor(0.0))
self.register_buffer('count', torch.tensor(0))
self.losses = losses
self.ablation_cycle = cfg.TRAIN.ABLATION.CYCLE
self._losses_func = {}
self._params = {}
for loss in losses:
if (loss != 'total'):
if (loss.split('_')[0] == 'kl'):
self._losses_func[loss] = KLLoss()
self._params[loss] = cfg.LOSS.LAMBDA_KL
elif (loss.split('_')[0] == 'recons'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_REC
elif (loss.split('_')[0] == 'cross'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_CROSS
elif (loss.split('_')[0] == 'latent'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_LATENT
elif (loss.split('_')[0] == 'cycle'):
self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean')
self._params[loss] = cfg.LOSS.LAMBDA_CYCLE
else:
ValueError('This loss is not recognized.')
def update(self, rs_set, dist_ref):
total: float = 0.0
'\n loss list\n - triplet loss\n - anchor style1\n - pos style2\n - neg diff_style\n anchor = s_xa\n pos = s_xpos\n neg = self.gen.enc_style(co_data[diff_style], diff_style[-2:])\n l_triplet = self.triplet_loss(anchor, pos, neg)\n - \n '
total += self._update_loss('recons_mm2m', rs_set['rs_cm1sm1'], rs_set['m1'])
total += self._update_loss('recons_t2m', rs_set['rs_ct1st1'], rs_set['m1'])
total += self._update_loss('cross_mt2m', rs_set['rs_cm1st1'], rs_set['m1'])
total += self._update_loss('cross_tm2m', rs_set['rs_ct1sm1'], rs_set['m1'])
if self.ablation_cycle:
total += self._update_loss('cycle_cmsm2mContent', rs_set['cyc_rs_cm1sm1'], rs_set['m1'])
total += self._update_loss('cycle_cmsm2mStyle', rs_set['cyc_rs_cm2sm2'], rs_set['m2'])
total += self._update_loss('latent_ct2cm', rs_set['lat_ct1'], rs_set['lat_cm1'])
total += self._update_loss('latent_st2sm', rs_set['lat_st1'], rs_set['lat_sm1'])
total += self._update_loss('kl_motion', rs_set['dist_cm1'], dist_ref)
total += self._update_loss('kl_text', rs_set['dist_ct1'], dist_ref)
total += self._update_loss('kl_ct2cm', rs_set['dist_ct1'], rs_set['dist_cm1'])
total += self._update_loss('kl_cm2ct', rs_set['dist_cm1'], rs_set['dist_ct1'])
self.total += total.detach()
self.count += 1
return total
def compute(self, split):
count = getattr(self, 'count')
return {loss: (getattr(self, loss) / count) for loss in self.losses}
def _update_loss(self, loss: str, outputs, inputs):
val = self._losses_func[loss](outputs, inputs)
getattr(self, loss).__iadd__(val.detach())
weighted_loss = (self._params[loss] * val)
return weighted_loss
def loss2logname(self, loss: str, split: str):
if (loss == 'total'):
log_name = f'{loss}/{split}'
else:
(loss_type, name) = loss.split('_')
log_name = f'{loss_type}/{name}/{split}'
return log_name
|
class KLLoss():
def __init__(self):
pass
def __call__(self, q, p):
div = torch.distributions.kl_divergence(q, p)
return div.mean()
def __repr__(self):
return 'KLLoss()'
|
class KLLossMulti():
def __init__(self):
self.klloss = KLLoss()
def __call__(self, qlist, plist):
return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)])
def __repr__(self):
return 'KLLossMulti()'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.