code
stringlengths
17
6.64M
def load_json(file_path): with open(file_path, 'r') as file: data = json.load(file) return data
def process(graph): (entities, relations) = ({}, []) for i in graph['verbs']: description = i['description'] pos = 0 flag = 0 (_words, _spans) = ([], []) for i in description.split(): (tags, verb) = ({}, 0) if ('[' in i): _role = i[1:(- 1)] flag = 1 _spans = [pos] _words = [] elif (']' in i): _words.append(i[:(- 1)]) entities[len(entities)] = {'role': _role, 'spans': _spans, 'words': _words} pos += 1 flag = 0 if (_role != 'V'): tags[len(entities)] = _role else: verb = len(entities) else: pos += 1 if flag: _words.append(i) _spans.append(pos) for i in tags: relations.append((verb, i, tags[i])) output = {'entities': entities, 'relations': relations} return output
class WordVectorizer(object): def __init__(self, meta_root, prefix): vectors = np.load(pjoin(meta_root, ('%s_data.npy' % prefix))) words = pickle.load(open(pjoin(meta_root, ('%s_words.pkl' % prefix)), 'rb')) word2idx = pickle.load(open(pjoin(meta_root, ('%s_idx.pkl' % prefix)), 'rb')) self.word2vec = {w: vectors[word2idx[w]] for w in words} def _get_pos_ohot(self, pos): pos_vec = np.zeros(len(POS_enumerator)) if (pos in POS_enumerator): pos_vec[POS_enumerator[pos]] = 1 else: pos_vec[POS_enumerator['OTHER']] = 1 return pos_vec def __len__(self): return len(self.word2vec) def __getitem__(self, item): (word, pos) = item.split('/') if (word in self.word2vec): word_vec = self.word2vec[word] vip_pos = None for (key, values) in VIP_dict.items(): if (word in values): vip_pos = key break if (vip_pos is not None): pos_vec = self._get_pos_ohot(vip_pos) else: pos_vec = self._get_pos_ohot(pos) else: word_vec = self.word2vec['unk'] pos_vec = self._get_pos_ohot('OTHER') return (word_vec, pos_vec)
class FrameSampler(): def __init__(self, sampling='conseq', sampling_step=1, request_frames=None, threshold_reject=0.75, max_len=1000, min_len=10): self.sampling = sampling self.sampling_step = sampling_step self.request_frames = request_frames self.threshold_reject = threshold_reject self.max_len = max_len self.min_len = min_len def __call__(self, num_frames): return get_frameix_from_data_index(num_frames, self.request_frames, self.sampling, self.sampling_step) def accept(self, duration): if (self.request_frames is None): if (duration > self.max_len): return False elif (duration < self.min_len): return False else: min_number = (self.threshold_reject * self.request_frames) if (duration < min_number): return False return True def get(self, key, default=None): return getattr(self, key, default) def __getitem__(self, key): return getattr(self, key)
def subsample(num_frames, last_framerate, new_framerate): step = int((last_framerate / new_framerate)) assert (step >= 1) frames = np.arange(0, num_frames, step) return frames
def upsample(motion, last_framerate, new_framerate): step = int((new_framerate / last_framerate)) assert (step >= 1) alpha = np.linspace(0, 1, (step + 1)) last = np.einsum('l,...->l...', (1 - alpha), motion[:(- 1)]) new = np.einsum('l,...->l...', alpha, motion[1:]) chuncks = (last + new)[:(- 1)] output = np.concatenate(chuncks.swapaxes(1, 0)) output = np.concatenate((output, motion[[(- 1)]])) return output
def get_frameix_from_data_index(num_frames: int, request_frames: Optional[int], sampling: str='conseq', sampling_step: int=1) -> Array: nframes = num_frames if (request_frames is None): frame_ix = np.arange(nframes) elif (request_frames > nframes): fair = False if fair: choices = np.random.choice(range(nframes), request_frames, replace=True) frame_ix = sorted(choices) else: ntoadd = max(0, (request_frames - nframes)) lastframe = (nframes - 1) padding = (lastframe * np.ones(ntoadd, dtype=int)) frame_ix = np.concatenate((np.arange(0, nframes), padding)) elif (sampling in ['conseq', 'random_conseq']): step_max = ((nframes - 1) // (request_frames - 1)) if (sampling == 'conseq'): if ((sampling_step == (- 1)) or ((sampling_step * (request_frames - 1)) >= nframes)): step = step_max else: step = sampling_step elif (sampling == 'random_conseq'): step = random.randint(1, step_max) lastone = (step * (request_frames - 1)) shift_max = ((nframes - lastone) - 1) shift = random.randint(0, max(0, (shift_max - 1))) frame_ix = (shift + np.arange(0, (lastone + 1), step)) elif (sampling == 'random'): choices = np.random.choice(range(nframes), request_frames, replace=False) frame_ix = sorted(choices) else: raise ValueError('Sampling not recognized.') return frame_ix
def lengths_to_mask(lengths): max_len = max(lengths) mask = (torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1)) return mask
def collate_tensors(batch): dims = batch[0].dim() max_size = [max([b.size(i) for b in batch]) for i in range(dims)] size = ((len(batch),) + tuple(max_size)) canvas = batch[0].new_zeros(size=size) for (i, b) in enumerate(batch): sub_tensor = canvas[i] for d in range(dims): sub_tensor = sub_tensor.narrow(d, 0, b.size(d)) sub_tensor.add_(b) return canvas
def all_collate(batch): notnone_batches = [b for b in batch if (b is not None)] databatch = [b['motion'] for b in notnone_batches] if ('lengths' in notnone_batches[0]): lenbatch = [b['lengths'] for b in notnone_batches] else: lenbatch = [len(b['inp'][0][0]) for b in notnone_batches] databatchTensor = collate_tensors(databatch) lenbatchTensor = torch.as_tensor(lenbatch) maskbatchTensor = lengths_to_mask(lenbatchTensor, databatchTensor.shape[(- 1)]).unsqueeze(1).unsqueeze(1) motion = databatchTensor cond = {'y': {'mask': maskbatchTensor, 'lengths': lenbatchTensor}} if ('text' in notnone_batches[0]): textbatch = [b['text'] for b in notnone_batches] cond['y'].update({'text': textbatch}) if ('action_text' in notnone_batches[0]): action_text = [b['action_text'] for b in notnone_batches] cond['y'].update({'action_text': action_text}) return (motion, cond)
def mld_collate(batch): notnone_batches = [b for b in batch if (b is not None)] notnone_batches.sort(key=(lambda x: x[3]), reverse=True) adapted_batch = {'motion': collate_tensors([torch.tensor(b[4]).float() for b in notnone_batches]), 'text': [b[2] for b in notnone_batches], 'length': [b[5] for b in notnone_batches], 'word_embs': collate_tensors([torch.tensor(b[0]).float() for b in notnone_batches]), 'pos_ohot': collate_tensors([torch.tensor(b[1]).float() for b in notnone_batches]), 'text_len': collate_tensors([torch.tensor(b[3]) for b in notnone_batches]), 'tokens': [b[6] for b in notnone_batches], 'V': [b[7] for b in notnone_batches], 'entities': [b[8] for b in notnone_batches], 'relations': [b[9] for b in notnone_batches]} return adapted_batch
def a2m_collate(batch): databatch = [b[0] for b in batch] labelbatch = [b[1] for b in batch] lenbatch = [len(b[0][0][0]) for b in batch] labeltextbatch = [b[3] for b in batch] databatchTensor = collate_tensors(databatch) labelbatchTensor = torch.as_tensor(labelbatch).unsqueeze(1) lenbatchTensor = torch.as_tensor(lenbatch) maskbatchTensor = lengths_to_mask(lenbatchTensor) adapted_batch = {'motion': databatchTensor.permute(0, 3, 2, 1).flatten(start_dim=2), 'action': labelbatchTensor, 'action_text': labeltextbatch, 'mask': maskbatchTensor, 'length': lenbatchTensor} return adapted_batch
def parse_args(self, args=None, namespace=None): if (args is not None): return self.parse_args_bak(args=args, namespace=namespace) try: idx = sys.argv.index('--') args = sys.argv[(idx + 1):] except ValueError as e: args = [] return self.parse_args_bak(args=args, namespace=namespace)
def code_path(path=''): code_dir = hydra.utils.get_original_cwd() code_dir = Path(code_dir) return str((code_dir / path))
def working_path(path): return str((Path(os.getcwd()) / path))
def generate_id(): return ID
def get_last_checkpoint(path, ckpt_name='last.ckpt'): output_dir = Path(hydra.utils.to_absolute_path(path)) last_ckpt_path = ((output_dir / 'checkpoints') / ckpt_name) return str(last_ckpt_path)
def get_kitname(load_amass_data: bool, load_with_rot: bool): if (not load_amass_data): return 'kit-mmm-xyz' if (load_amass_data and (not load_with_rot)): return 'kit-amass-xyz' if (load_amass_data and load_with_rot): return 'kit-amass-rot'
def resolve_cfg_path(cfg: DictConfig): working_dir = os.getcwd() cfg.working_dir = working_dir
class ActorVae(nn.Module): def __init__(self, ablation, nfeats: int, latent_dim: list=[1, 256], ff_size: int=1024, num_layers: int=9, num_heads: int=4, dropout: float=0.1, is_vae: bool=True, activation: str='gelu', position_embedding: str='learned', **kwargs) -> None: super().__init__() self.latent_size = latent_dim[0] self.latent_dim = latent_dim[(- 1)] self.is_vae = is_vae input_feats = nfeats output_feats = nfeats self.encoder = ActorAgnosticEncoder(nfeats=input_feats, vae=True, latent_dim=self.latent_dim, ff_size=ff_size, num_layers=num_layers, num_heads=num_heads, dropout=dropout, activation=activation, **kwargs) self.decoder = ActorAgnosticDecoder(nfeats=output_feats, vae=True, latent_dim=self.latent_dim, ff_size=ff_size, num_layers=num_layers, num_heads=num_heads, dropout=dropout, activation=activation, **kwargs) def forward(self, features: Tensor, lengths: Optional[List[int]]=None): print('Should Not enter here') (z, dist) = self.encode(features, lengths) feats_rst = self.decode(z, lengths) return (feats_rst, z, dist) def encode(self, features: Tensor, lengths: Optional[List[int]]=None) -> Union[(Tensor, Distribution)]: dist = self.encoder(features, lengths) if self.is_vae: latent = sample_from_distribution(dist) else: latent = dist.unsqueeze(0) return (latent, dist) def decode(self, z: Tensor, lengths: List[int]): feats = self.decoder(z, lengths) return feats
class ActorAgnosticEncoder(nn.Module): def __init__(self, nfeats: int, vae: bool, latent_dim: int=256, ff_size: int=1024, num_layers: int=4, num_heads: int=4, dropout: float=0.1, activation: str='gelu', **kwargs) -> None: super().__init__() input_feats = nfeats self.vae = vae self.skel_embedding = nn.Linear(input_feats, latent_dim) if vae: self.mu_token = nn.Parameter(torch.randn(latent_dim)) self.logvar_token = nn.Parameter(torch.randn(latent_dim)) else: self.emb_token = nn.Parameter(torch.randn(latent_dim)) self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) seq_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation) self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, num_layers=num_layers) def forward(self, features: Tensor, lengths: Optional[List[int]]=None) -> Union[(Tensor, Distribution)]: if (lengths is None): lengths = [len(feature) for feature in features] device = features.device (bs, nframes, nfeats) = features.shape mask = lengths_to_mask(lengths, device) x = features x = self.skel_embedding(x) x = x.permute(1, 0, 2) if self.vae: mu_token = torch.tile(self.mu_token, (bs,)).reshape(bs, (- 1)) logvar_token = torch.tile(self.logvar_token, (bs,)).reshape(bs, (- 1)) xseq = torch.cat((mu_token[None], logvar_token[None], x), 0) token_mask = torch.ones((bs, 2), dtype=bool, device=x.device) aug_mask = torch.cat((token_mask, mask), 1) else: emb_token = torch.tile(self.emb_token, (bs,)).reshape(bs, (- 1)) xseq = torch.cat((emb_token[None], x), 0) token_mask = torch.ones((bs, 1), dtype=bool, device=x.device) aug_mask = torch.cat((token_mask, mask), 1) xseq = self.sequence_pos_encoding(xseq) final = self.seqTransEncoder(xseq, src_key_padding_mask=(~ aug_mask)) if self.vae: (mu, logvar) = (final[0], final[1]) std = logvar.exp().pow(0.5) dist = torch.distributions.Normal(mu, std) return dist else: return final[0]
class ActorAgnosticDecoder(nn.Module): def __init__(self, nfeats: int, latent_dim: int=256, ff_size: int=1024, num_layers: int=4, num_heads: int=4, dropout: float=0.1, activation: str='gelu', **kwargs) -> None: super().__init__() output_feats = nfeats self.latent_dim = latent_dim self.nfeats = nfeats self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) seq_trans_decoder_layer = nn.TransformerDecoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation) self.seqTransDecoder = nn.TransformerDecoder(seq_trans_decoder_layer, num_layers=num_layers) self.final_layer = nn.Linear(latent_dim, output_feats) def forward(self, z: Tensor, lengths: List[int]): mask = lengths_to_mask(lengths, z.device) (bs, nframes) = mask.shape nfeats = self.nfeats time_queries = torch.zeros(nframes, bs, self.latent_dim, device=z.device) time_queries = self.sequence_pos_encoding(time_queries) output = self.seqTransDecoder(tgt=time_queries, memory=z, tgt_key_padding_mask=(~ mask)) output = self.final_layer(output) output[(~ mask.T)] = 0 feats = output.permute(1, 0, 2) return feats
def sample_from_distribution(dist, *, fact=1.0, sample_mean=False) -> Tensor: if sample_mean: return dist.loc.unsqueeze(0) if (fact is None): return dist.rsample().unsqueeze(0) eps = (dist.rsample() - dist.loc) z = (dist.loc + (fact * eps)) z = z.unsqueeze(0) return z
class MLDTextEncoder(nn.Module): def __init__(self, cfg, modelpath: str, finetune: bool=False, vae: bool=True, latent_dim: int=256, ff_size: int=1024, num_layers: int=6, num_heads: int=4, dropout: float=0.1, activation: str='gelu', **kwargs) -> None: super().__init__() from transformers import AutoTokenizer, AutoModel from transformers import logging logging.set_verbosity_error() os.environ['TOKENIZERS_PARALLELISM'] = 'false' self.tokenizer = AutoTokenizer.from_pretrained(modelpath) self.text_model = AutoModel.from_pretrained(modelpath) if (not finetune): self.text_model.training = False for p in self.text_model.parameters(): p.requires_grad = False self.text_encoded_dim = self.text_model.config.hidden_size self.text_encoded_dim = latent_dim encoded_dim = self.text_model.config.hidden_size self.projection = nn.Sequential(nn.ReLU(), nn.Linear(encoded_dim, latent_dim)) vae = False if vae: self.mu_token = nn.Parameter(torch.randn(latent_dim)) self.logvar_token = nn.Parameter(torch.randn(latent_dim)) else: self.global_text_token = nn.Parameter(torch.randn(latent_dim)) self.sequence_pos_encoding = PositionalEncoding(latent_dim, dropout) seq_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation) self.seqTransEncoder = nn.TransformerEncoder(seq_trans_encoder_layer, num_layers=num_layers) if self.is_action_branch: action_trans_encoder_layer = nn.TransformerEncoderLayer(d_model=latent_dim, nhead=num_heads, dim_feedforward=ff_size, dropout=dropout, activation=activation) self.actionTransEncoder = nn.TransformerEncoder(action_trans_encoder_layer, num_layers=num_layers) self.mean_token = nn.Parameter(torch.randn(latent_dim)) self.std_token = nn.Parameter(torch.randn(latent_dim)) def global_branch(self, x, mask): bs = x.shape[0] x = x.permute(1, 0, 2) global_tokens = torch.tile(self.global_text_token, (bs,)).reshape(bs, (- 1)) if self.is_cross_token: mean_tokens = torch.tile(self.mean_token, (bs,)).reshape(bs, (- 1)) std_tokens = torch.tile(self.std_token, (bs,)).reshape(bs, (- 1)) xseq = torch.cat((mean_tokens[None], std_tokens[None], global_tokens[None], x), 0) token_mask = torch.ones((bs, 3), dtype=bool, device=x.device) aug_mask = torch.cat((token_mask, mask), 1) else: xseq = torch.cat((global_tokens[None], x), 0) token_mask = torch.ones((bs, 1), dtype=bool, device=x.device) aug_mask = torch.cat((token_mask, mask), 1) xseq = self.sequence_pos_encoding(xseq) text_tokens = self.seqTransEncoder(xseq, src_key_padding_mask=(~ aug_mask)) return text_tokens def action_branch(self, x, mask): bs = x.shape[0] mean_tokens = torch.tile(self.mean_token, (bs,)).reshape(bs, (- 1)) std_tokens = torch.tile(self.std_token, (bs,)).reshape(bs, (- 1)) actionSeq = torch.cat((mean_tokens[None], std_tokens[None], x), 0) token_mask = torch.ones((bs, 2), dtype=bool, device=x.device) aug_mask = torch.cat((token_mask, mask), 1) actionSeq = self.sequence_pos_encoding(actionSeq) action_tokens = self.actionTransEncoder(actionSeq, src_key_padding_mask=(~ aug_mask)) return action_tokens[0:2] def forward(self, texts: List[str]): (text_encoded, mask) = self.get_last_hidden_state(texts, return_mask=True) text_emb = self.projection(text_encoded) return text_emb def get_last_hidden_state(self, texts: List[str], return_mask: bool=False): encoded_inputs = self.tokenizer(texts, return_tensors='pt', padding=True) output = self.text_model(**encoded_inputs.to(self.text_model.device)) if (not return_mask): return output.last_hidden_state return (output.last_hidden_state, encoded_inputs.attention_mask.to(dtype=bool))
class Encoder_FC(nn.Module): def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot, latent_dim=256, **kargs): super().__init__() self.modeltype = modeltype self.njoints = njoints self.nfeats = nfeats self.num_frames = num_frames self.num_classes = num_classes self.translation = translation self.pose_rep = pose_rep self.glob = glob self.glob_rot = glob_rot self.latent_dim = latent_dim self.activation = nn.GELU() self.input_dim = (((self.njoints * self.nfeats) * self.num_frames) + self.num_classes) self.fully_connected = nn.Sequential(nn.Linear(self.input_dim, 512), nn.GELU(), nn.Linear(512, 256), nn.GELU()) if (self.modeltype == 'cvae'): self.mu = nn.Linear(256, self.latent_dim) self.var = nn.Linear(256, self.latent_dim) else: self.final = nn.Linear(256, self.latent_dim) def forward(self, batch): (x, y) = (batch['x'], batch['y']) (bs, njoints, feats, nframes) = x.size() if (((njoints * feats) * nframes) != ((self.njoints * self.nfeats) * self.num_frames)): raise ValueError('This model is not adapted with this input') if (len(y.shape) == 1): y = F.one_hot(y, self.num_classes) y = y.to(dtype=x.dtype) x = x.reshape(bs, ((njoints * feats) * nframes)) x = torch.cat((x, y), 1) x = self.fully_connected(x) if (self.modeltype == 'cvae'): return {'mu': self.mu(x), 'logvar': self.var(x)} else: return {'z': self.final(x)}
class Decoder_FC(nn.Module): def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot, latent_dim=256, **kargs): super().__init__() self.modeltype = modeltype self.njoints = njoints self.nfeats = nfeats self.num_frames = num_frames self.num_classes = num_classes self.translation = translation self.pose_rep = pose_rep self.glob = glob self.glob_rot = glob_rot self.latent_dim = latent_dim self.input_dim = (self.latent_dim + self.num_classes) self.output_dim = ((self.njoints * self.nfeats) * self.num_frames) self.fully_connected = nn.Sequential(nn.Linear(self.input_dim, 256), nn.GELU(), nn.Linear(256, 512), nn.GELU(), nn.Linear(512, self.output_dim), nn.GELU()) def forward(self, batch): (z, y) = (batch['z'], batch['y']) if (len(y.shape) == 1): y = F.one_hot(y, self.num_classes) y = y.to(dtype=z.dtype) z = torch.cat((z, y), dim=1) z = self.fully_connected(z) (bs, _) = z.size() z = z.reshape(bs, self.njoints, self.nfeats, self.num_frames) batch['output'] = z return batch
class GATLayer(nn.Module): def __init__(self, in_features=768, out_features=768, dropout=0.1, alpha=0.2, concat=True): super(GATLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.empty(size=(in_features, out_features))) self.leakyrelu = nn.LeakyReLU(self.alpha) self.a = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARG0 = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARG1 = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARG2 = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARG3 = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARG4 = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARGM_LOC = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARGM_MNR = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARGM_TMP = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARGM_DIR = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.ARGM_ADV = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.MA = nn.Parameter(torch.empty(size=((2 * out_features), 1))) self.OTHERS = nn.Parameter(torch.empty(size=((2 * out_features), 1))) nn.init.xavier_uniform_(self.W.data, gain=1.414) nn.init.xavier_uniform_(self.a, gain=1.414) nn.init.xavier_uniform_(self.ARG0.data, gain=1.414) nn.init.xavier_uniform_(self.ARG1.data, gain=1.414) nn.init.xavier_uniform_(self.ARG2.data, gain=1.414) nn.init.xavier_uniform_(self.ARG3.data, gain=1.414) nn.init.xavier_uniform_(self.ARG4.data, gain=1.414) nn.init.xavier_uniform_(self.ARGM_LOC.data, gain=1.414) nn.init.xavier_uniform_(self.ARGM_MNR.data, gain=1.414) nn.init.xavier_uniform_(self.ARGM_TMP.data, gain=1.414) nn.init.xavier_uniform_(self.ARGM_DIR.data, gain=1.414) nn.init.xavier_uniform_(self.ARGM_ADV.data, gain=1.414) nn.init.xavier_uniform_(self.MA.data, gain=1.414) nn.init.xavier_uniform_(self.OTHERS.data, gain=1.414) def forward(self, h0, h1, multi_adj, adj): Wh0 = torch.einsum('bnd,de->bne', [h0, self.W]) Wh1 = torch.einsum('bnd,de->bne', [h1, self.W]) a_input = self._prepare_attentional_mechanism_input(Wh0, Wh1) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) e_ARG0 = self.leakyrelu(torch.matmul(a_input, self.ARG0).squeeze(3)) e_ARG1 = self.leakyrelu(torch.matmul(a_input, self.ARG1).squeeze(3)) e_ARG2 = self.leakyrelu(torch.matmul(a_input, self.ARG2).squeeze(3)) e_ARG3 = self.leakyrelu(torch.matmul(a_input, self.ARG3).squeeze(3)) e_ARG4 = self.leakyrelu(torch.matmul(a_input, self.ARG4).squeeze(3)) e_ARGM_LOC = self.leakyrelu(torch.matmul(a_input, self.ARGM_LOC).squeeze(3)) e_ARGM_MNR = self.leakyrelu(torch.matmul(a_input, self.ARGM_MNR).squeeze(3)) e_ARGM_TMP = self.leakyrelu(torch.matmul(a_input, self.ARGM_TMP).squeeze(3)) e_ARGM_DIR = self.leakyrelu(torch.matmul(a_input, self.ARGM_DIR).squeeze(3)) e_ARGM_ADV = self.leakyrelu(torch.matmul(a_input, self.ARGM_ADV).squeeze(3)) e_MA = self.leakyrelu(torch.matmul(a_input, self.MA).squeeze(3)) e_OTHERS = self.leakyrelu(torch.matmul(a_input, self.OTHERS).squeeze(3)) zero_vec = ((- 9000000000000000.0) * torch.ones_like(e)) attention = torch.where((adj > 0), e, zero_vec) zero_vec = torch.zeros_like(e_ARG0) attention_ARG0 = torch.where((multi_adj['ARG0'] > 0), e_ARG0, zero_vec) attention_ARG1 = torch.where((multi_adj['ARG1'] > 0), e_ARG1, zero_vec) attention_ARG2 = torch.where((multi_adj['ARG2'] > 0), e_ARG2, zero_vec) attention_ARG3 = torch.where((multi_adj['ARG3'] > 0), e_ARG3, zero_vec) attention_ARG4 = torch.where((multi_adj['ARG4'] > 0), e_ARG4, zero_vec) attention_ARGM_LOC = torch.where((multi_adj['ARGM-LOC'] > 0), e_ARGM_LOC, zero_vec) attention_ARGM_MNR = torch.where((multi_adj['ARGM-MNR'] > 0), e_ARGM_MNR, zero_vec) attention_ARGM_TMP = torch.where((multi_adj['ARGM-TMP'] > 0), e_ARGM_TMP, zero_vec) attention_ARGM_DIR = torch.where((multi_adj['ARGM-DIR'] > 0), e_ARGM_DIR, zero_vec) attention_ARGM_ADV = torch.where((multi_adj['ARGM-ADV'] > 0), e_ARGM_ADV, zero_vec) attention_OTHERS = torch.where((multi_adj['OTHERS'] > 0), e_OTHERS, zero_vec) attention_MA = torch.where((multi_adj['MA'] > 0), e_MA, zero_vec) attention = F.softmax((attention + (0.01 * (((((((((((attention_ARG0 + attention_ARG1) + attention_ARG2) + attention_ARG3) + attention_ARG4) + attention_ARGM_LOC) + attention_ARGM_MNR) + attention_ARGM_TMP) + attention_ARGM_DIR) + attention_ARGM_ADV) + attention_OTHERS) + attention_MA))), dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, Wh1) if self.concat: return F.elu(h_prime) else: return h_prime def _prepare_attentional_mechanism_input(self, Wh0, Wh1): (N0, N1) = (Wh0.size()[1], Wh1.size()[1]) Wh0_repeated_in_chunks = Wh0.repeat_interleave(N1, dim=1) Wh1_repeated_alternating = Wh1.repeat(1, N0, 1) all_combinations_matrix = torch.cat([Wh0_repeated_in_chunks, Wh1_repeated_alternating], dim=(- 1)) return all_combinations_matrix.view((- 1), N0, N1, (2 * self.out_features))
class MotionDiscriminator(nn.Module): def __init__(self, input_size, hidden_size, hidden_layer, output_size=12, use_noise=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.hidden_layer = hidden_layer self.use_noise = use_noise self.recurrent = nn.GRU(input_size, hidden_size, hidden_layer) self.linear1 = nn.Linear(hidden_size, 30) self.linear2 = nn.Linear(30, output_size) def forward(self, motion_sequence, lengths=None, hidden_unit=None): (bs, njoints, nfeats, num_frames) = motion_sequence.shape motion_sequence = motion_sequence.reshape(bs, (njoints * nfeats), num_frames) motion_sequence = motion_sequence.permute(2, 0, 1) if (hidden_unit is None): hidden_unit = self.initHidden(motion_sequence.size(1), self.hidden_layer).to(motion_sequence.device) (gru_o, _) = self.recurrent(motion_sequence.float(), hidden_unit) out = gru_o[tuple(torch.stack(((lengths - 1), torch.arange(bs, device=motion_sequence.device))))] lin1 = self.linear1(out) lin1 = torch.tanh(lin1) lin2 = self.linear2(lin1) return lin2 def initHidden(self, num_samples, layer): return torch.randn(layer, num_samples, self.hidden_size, requires_grad=False)
class MotionDiscriminatorForFID(MotionDiscriminator): def forward(self, motion_sequence, lengths=None, hidden_unit=None): (bs, njoints, nfeats, num_frames) = motion_sequence.shape motion_sequence = motion_sequence.reshape(bs, (njoints * nfeats), num_frames) motion_sequence = motion_sequence.permute(2, 0, 1) if (hidden_unit is None): hidden_unit = self.initHidden(motion_sequence.size(1), self.hidden_layer).to(motion_sequence.device) (gru_o, _) = self.recurrent(motion_sequence.float(), hidden_unit) out = gru_o[tuple(torch.stack(((lengths - 1), torch.arange(bs, device=motion_sequence.device))))] lin1 = self.linear1(out) lin1 = torch.tanh(lin1) return lin1
class MovementConvEncoder(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(MovementConvEncoder, self).__init__() self.main = nn.Sequential(nn.Conv1d(input_size, hidden_size, 4, 2, 1), nn.Dropout(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv1d(hidden_size, output_size, 4, 2, 1), nn.Dropout(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True)) self.out_net = nn.Linear(output_size, output_size) def forward(self, inputs): inputs = inputs.permute(0, 2, 1) outputs = self.main(inputs).permute(0, 2, 1) return self.out_net(outputs)
class MotionEncoderBiGRUCo(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(MotionEncoderBiGRUCo, self).__init__() self.input_emb = nn.Linear(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) self.output_net = nn.Sequential(nn.Linear((hidden_size * 2), hidden_size), nn.LayerNorm(hidden_size), nn.LeakyReLU(0.2, inplace=True), nn.Linear(hidden_size, output_size)) self.hidden_size = hidden_size self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) def forward(self, inputs, m_lens): num_samples = inputs.shape[0] input_embs = self.input_emb(inputs) hidden = self.hidden.repeat(1, num_samples, 1) cap_lens = m_lens.data.tolist() emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) (gru_seq, gru_last) = self.gru(emb, hidden) gru_last = torch.cat([gru_last[0], gru_last[1]], dim=(- 1)) return self.output_net(gru_last)
class TextEncoderBiGRUCo(nn.Module): def __init__(self, word_size, pos_size, hidden_size, output_size): super(TextEncoderBiGRUCo, self).__init__() self.pos_emb = nn.Linear(pos_size, word_size) self.input_emb = nn.Linear(word_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) self.output_net = nn.Sequential(nn.Linear((hidden_size * 2), hidden_size), nn.LayerNorm(hidden_size), nn.LeakyReLU(0.2, inplace=True), nn.Linear(hidden_size, output_size)) self.hidden_size = hidden_size self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) def forward(self, word_embs, pos_onehot, cap_lens): num_samples = word_embs.shape[0] pos_embs = self.pos_emb(pos_onehot) inputs = (word_embs + pos_embs) input_embs = self.input_emb(inputs) hidden = self.hidden.repeat(1, num_samples, 1) cap_lens = cap_lens.data.tolist() emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) (gru_seq, gru_last) = self.gru(emb, hidden) gru_last = torch.cat([gru_last[0], gru_last[1]], dim=(- 1)) return self.output_net(gru_last)
class STGCN(nn.Module): 'Spatial temporal graph convolutional networks.\n Args:\n in_channels (int): Number of channels in the input data\n num_class (int): Number of classes for the classification task\n graph_args (dict): The arguments for building the graph\n edge_importance_weighting (bool): If ``True``, adds a learnable\n importance weighting to the edges of the graph\n **kwargs (optional): Other parameters for graph convolution units\n Shape:\n - Input: :math:`(N, in_channels, T_{in}, V_{in}, M_{in})`\n - Output: :math:`(N, num_class)` where\n :math:`N` is a batch size,\n :math:`T_{in}` is a length of input sequence,\n :math:`V_{in}` is the number of graph nodes,\n :math:`M_{in}` is the number of instance in a frame.\n ' def __init__(self, in_channels, num_class, kintree_path, graph_args, edge_importance_weighting, **kwargs): super().__init__() self.num_class = num_class self.losses = ['accuracy', 'cross_entropy', 'mixed'] self.criterion = torch.nn.CrossEntropyLoss(reduction='mean') self.graph = Graph(kintree_path=kintree_path, **graph_args) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) spatial_kernel_size = A.size(0) temporal_kernel_size = 9 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.data_bn = nn.BatchNorm1d((in_channels * A.size(1))) kwargs0 = {k: v for (k, v) in kwargs.items() if (k != 'dropout')} self.st_gcn_networks = nn.ModuleList((st_gcn(in_channels, 64, kernel_size, 1, residual=False, **kwargs0), st_gcn(64, 64, kernel_size, 1, **kwargs), st_gcn(64, 64, kernel_size, 1, **kwargs), st_gcn(64, 64, kernel_size, 1, **kwargs), st_gcn(64, 128, kernel_size, 2, **kwargs), st_gcn(128, 128, kernel_size, 1, **kwargs), st_gcn(128, 128, kernel_size, 1, **kwargs), st_gcn(128, 256, kernel_size, 2, **kwargs), st_gcn(256, 256, kernel_size, 1, **kwargs), st_gcn(256, 256, kernel_size, 1, **kwargs))) if edge_importance_weighting: self.edge_importance = nn.ParameterList([nn.Parameter(torch.ones(self.A.size())) for i in self.st_gcn_networks]) else: self.edge_importance = ([1] * len(self.st_gcn_networks)) self.fcn = nn.Conv2d(256, num_class, kernel_size=1) def forward(self, motion): batch = {'output': motion} x = batch['output'].permute(0, 2, 3, 1).unsqueeze(4).contiguous() (N, C, T, V, M) = x.size() x = x.permute(0, 4, 3, 1, 2).contiguous() x = x.view((N * M), (V * C), T) x = self.data_bn(x) x = x.view(N, M, V, C, T) x = x.permute(0, 1, 3, 4, 2).contiguous() x = x.view((N * M), C, T, V) for (gcn, importance) in zip(self.st_gcn_networks, self.edge_importance): (x, _) = gcn(x, (self.A * importance)) x = F.avg_pool2d(x, x.size()[2:]) x = x.view(N, M, (- 1), 1, 1).mean(dim=1) batch['features'] = x.squeeze() x = self.fcn(x) x = x.view(x.size(0), (- 1)) batch['yhat'] = x return batch def compute_accuracy(self, batch): confusion = torch.zeros(self.num_class, self.num_class, dtype=int) yhat = batch['yhat'].max(dim=1).indices ygt = batch['y'] for (label, pred) in zip(ygt, yhat): confusion[label][pred] += 1 accuracy = (torch.trace(confusion) / torch.sum(confusion)) return accuracy def compute_loss(self, batch): cross_entropy = self.criterion(batch['yhat'], batch['y']) mixed_loss = cross_entropy acc = self.compute_accuracy(batch) losses = {'cross_entropy': cross_entropy.item(), 'mixed': mixed_loss.item(), 'accuracy': acc.item()} return (mixed_loss, losses)
class st_gcn(nn.Module): 'Applies a spatial temporal graph convolution over an input graph sequence.\n Args:\n in_channels (int): Number of channels in the input sequence data\n out_channels (int): Number of channels produced by the convolution\n kernel_size (tuple): Size of the temporal convolving kernel and graph convolving kernel\n stride (int, optional): Stride of the temporal convolution. Default: 1\n dropout (int, optional): Dropout rate of the final output. Default: 0\n residual (bool, optional): If ``True``, applies a residual mechanism. Default: ``True``\n Shape:\n - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format\n - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes.\n ' def __init__(self, in_channels, out_channels, kernel_size, stride=1, dropout=0, residual=True): super().__init__() assert (len(kernel_size) == 2) assert ((kernel_size[0] % 2) == 1) padding = (((kernel_size[0] - 1) // 2), 0) self.gcn = ConvTemporalGraphical(in_channels, out_channels, kernel_size[1]) self.tcn = nn.Sequential(nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, (kernel_size[0], 1), (stride, 1), padding), nn.BatchNorm2d(out_channels), nn.Dropout(dropout, inplace=True)) if (not residual): self.residual = (lambda x: 0) elif ((in_channels == out_channels) and (stride == 1)): self.residual = (lambda x: x) else: self.residual = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=(stride, 1)), nn.BatchNorm2d(out_channels)) self.relu = nn.ReLU(inplace=True) def forward(self, x, A): res = self.residual(x) (x, A) = self.gcn(x, A) x = (self.tcn(x) + res) return (self.relu(x), A)
class Graph(): " The Graph to model the skeletons extracted by the openpose\n Args:\n strategy (string): must be one of the follow candidates\n - uniform: Uniform Labeling\n - distance: Distance Partitioning\n - spatial: Spatial Configuration\n For more information, please refer to the section 'Partition Strategies'\n in our paper (https://arxiv.org/abs/1801.07455).\n layout (string): must be one of the follow candidates\n - openpose: Is consists of 18 joints. For more information, please\n refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose#output\n - ntu-rgb+d: Is consists of 25 joints. For more information, please\n refer to https://github.com/shahroudy/NTURGB-D\n - smpl: Consists of 24/23 joints with without global rotation.\n max_hop (int): the maximal distance between two connected nodes\n dilation (int): controls the spacing between the kernel points\n " def __init__(self, kintree_path, layout='openpose', strategy='uniform', max_hop=1, dilation=1): self.max_hop = max_hop self.dilation = dilation self.kintree_path = kintree_path self.get_edge(layout) self.hop_dis = get_hop_distance(self.num_node, self.edge, max_hop=max_hop) self.get_adjacency(strategy) def __str__(self): return self.A def get_edge(self, layout): if (layout == 'openpose'): self.num_node = 18 self_link = [(i, i) for i in range(self.num_node)] neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12, 11), (10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1), (0, 1), (15, 0), (14, 0), (17, 15), (16, 14)] self.edge = (self_link + neighbor_link) self.center = 1 elif (layout == 'smpl'): self.num_node = 24 self_link = [(i, i) for i in range(self.num_node)] kt = pkl.load(open(self.kintree_path, 'rb')) neighbor_link = [(k, kt[1][(i + 1)]) for (i, k) in enumerate(kt[0][1:])] self.edge = (self_link + neighbor_link) self.center = 0 elif (layout == 'smpl_noglobal'): self.num_node = 23 self_link = [(i, i) for i in range(self.num_node)] kt = pkl.load(open(self.kintree_path, 'rb')) neighbor_link = [(k, kt[1][(i + 1)]) for (i, k) in enumerate(kt[0][1:])] neighbor_1base = [n for n in neighbor_link if ((n[0] != 0) and (n[1] != 0))] neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base] self.edge = (self_link + neighbor_link) self.center = 0 elif (layout == 'ntu-rgb+d'): self.num_node = 25 self_link = [(i, i) for i in range(self.num_node)] neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (23, 8), (24, 25), (25, 12)] neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base] self.edge = (self_link + neighbor_link) self.center = (21 - 1) elif (layout == 'ntu_edge'): self.num_node = 24 self_link = [(i, i) for i in range(self.num_node)] neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6), (8, 7), (9, 2), (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (21, 22), (22, 8), (23, 24), (24, 12)] neighbor_link = [((i - 1), (j - 1)) for (i, j) in neighbor_1base] self.edge = (self_link + neighbor_link) self.center = 2 else: raise NotImplementedError('This Layout is not supported') def get_adjacency(self, strategy): valid_hop = range(0, (self.max_hop + 1), self.dilation) adjacency = np.zeros((self.num_node, self.num_node)) for hop in valid_hop: adjacency[(self.hop_dis == hop)] = 1 normalize_adjacency = normalize_digraph(adjacency) if (strategy == 'uniform'): A = np.zeros((1, self.num_node, self.num_node)) A[0] = normalize_adjacency self.A = A elif (strategy == 'distance'): A = np.zeros((len(valid_hop), self.num_node, self.num_node)) for (i, hop) in enumerate(valid_hop): A[i][(self.hop_dis == hop)] = normalize_adjacency[(self.hop_dis == hop)] self.A = A elif (strategy == 'spatial'): A = [] for hop in valid_hop: a_root = np.zeros((self.num_node, self.num_node)) a_close = np.zeros((self.num_node, self.num_node)) a_further = np.zeros((self.num_node, self.num_node)) for i in range(self.num_node): for j in range(self.num_node): if (self.hop_dis[(j, i)] == hop): if (self.hop_dis[(j, self.center)] == self.hop_dis[(i, self.center)]): a_root[(j, i)] = normalize_adjacency[(j, i)] elif (self.hop_dis[(j, self.center)] > self.hop_dis[(i, self.center)]): a_close[(j, i)] = normalize_adjacency[(j, i)] else: a_further[(j, i)] = normalize_adjacency[(j, i)] if (hop == 0): A.append(a_root) else: A.append((a_root + a_close)) A.append(a_further) A = np.stack(A) self.A = A else: raise NotImplementedError('This Strategy is not supported')
class ConvTemporalGraphical(nn.Module): 'The basic module for applying a graph convolution.\n Args:\n in_channels (int): Number of channels in the input sequence data\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int): Size of the graph convolving kernel\n t_kernel_size (int): Size of the temporal convolving kernel\n t_stride (int, optional): Stride of the temporal convolution. Default: 1\n t_padding (int, optional): Temporal zero-padding added to both sides of\n the input. Default: 0\n t_dilation (int, optional): Spacing between temporal kernel elements.\n Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output.\n Default: ``True``\n Shape:\n - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format\n - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes.\n ' def __init__(self, in_channels, out_channels, kernel_size, t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(in_channels, (out_channels * kernel_size), kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1), dilation=(t_dilation, 1), bias=bias) def forward(self, x, A): assert (A.size(0) == self.kernel_size) x = self.conv(x) (n, kc, t, v) = x.size() x = x.view(n, self.kernel_size, (kc // self.kernel_size), t, v) x = torch.einsum('nkctv,kvw->nctw', (x, A)) return (x.contiguous(), A)
def get_hop_distance(num_node, edge, max_hop=1): A = np.zeros((num_node, num_node)) for (i, j) in edge: A[(j, i)] = 1 A[(i, j)] = 1 hop_dis = (np.zeros((num_node, num_node)) + np.inf) transfer_mat = [np.linalg.matrix_power(A, d) for d in range((max_hop + 1))] arrive_mat = (np.stack(transfer_mat) > 0) for d in range(max_hop, (- 1), (- 1)): hop_dis[arrive_mat[d]] = d return hop_dis
def normalize_digraph(A): Dl = np.sum(A, 0) num_node = A.shape[0] Dn = np.zeros((num_node, num_node)) for i in range(num_node): if (Dl[i] > 0): Dn[(i, i)] = (Dl[i] ** (- 1)) AD = np.dot(A, Dn) return AD
def normalize_undigraph(A): Dl = np.sum(A, 0) num_node = A.shape[0] Dn = np.zeros((num_node, num_node)) for i in range(num_node): if (Dl[i] > 0): Dn[(i, i)] = (Dl[i] ** (- 0.5)) DAD = np.dot(np.dot(Dn, A), Dn) return DAD
class VPosert(nn.Module): def __init__(self, cfg, **kwargs) -> None: super(VPosert, self).__init__() num_neurons = 512 self.latentD = 256 n_features = (196 * 263) self.encoder_net = nn.Sequential(BatchFlatten(), nn.BatchNorm1d(n_features), nn.Linear(n_features, num_neurons), nn.LeakyReLU(), nn.BatchNorm1d(num_neurons), nn.Dropout(0.1), nn.Linear(num_neurons, num_neurons), nn.Linear(num_neurons, num_neurons), NormalDistDecoder(num_neurons, self.latentD)) self.decoder_net = nn.Sequential(nn.Linear(self.latentD, num_neurons), nn.LeakyReLU(), nn.Dropout(0.1), nn.Linear(num_neurons, num_neurons), nn.LeakyReLU(), nn.Linear(num_neurons, n_features), ContinousRotReprDecoder()) def forward(self, features: Tensor, lengths: Optional[List[int]]=None): q_z = self.encode(features) feats_rst = self.decode(q_z) return (feats_rst, q_z) def encode(self, pose_body, lengths: Optional[List[int]]=None): "\n :param Pin: Nx(numjoints*3)\n :param rep_type: 'matrot'/'aa' for matrix rotations or axis-angle\n :return:\n " q_z = self.encoder_net(pose_body) q_z_sample = q_z.rsample() return (q_z_sample.unsqueeze(0), q_z) def decode(self, Zin, lengths: Optional[List[int]]=None): bs = Zin.shape[0] Zin = Zin[0] prec = self.decoder_net(Zin) return prec
class BatchFlatten(nn.Module): def __init__(self): super(BatchFlatten, self).__init__() self._name = 'batch_flatten' def forward(self, x): return x.view(x.shape[0], (- 1))
class ContinousRotReprDecoder(nn.Module): def __init__(self): super(ContinousRotReprDecoder, self).__init__() def forward(self, module_input): reshaped_input = module_input.view((- 1), 196, 263) return reshaped_input
class NormalDistDecoder(nn.Module): def __init__(self, num_feat_in, latentD): super(NormalDistDecoder, self).__init__() self.mu = nn.Linear(num_feat_in, latentD) self.logvar = nn.Linear(num_feat_in, latentD) def forward(self, Xout): return torch.distributions.normal.Normal(self.mu(Xout), F.softplus(self.logvar(Xout)))
def get_model(cfg, datamodule, phase='train'): modeltype = cfg.model.model_type if (modeltype == 'GraphMotion'): return get_module(cfg, datamodule) else: raise ValueError(f'Invalid model type {modeltype}.')
def get_module(cfg, datamodule): modeltype = cfg.model.model_type model_module = importlib.import_module(f'.modeltype.{cfg.model.model_type}', package='GraphMotion.models') Model = model_module.__getattribute__(f'{modeltype}') return Model(cfg=cfg, datamodule=datamodule)
class GraphMotionLosses(Metric): '\n Loss\n ' def __init__(self, vae, mode, cfg): super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) self.vae_type = cfg.TRAIN.ABLATION.VAE_TYPE self.mode = mode self.cfg = cfg self.predict_epsilon = cfg.TRAIN.ABLATION.PREDICT_EPSILON self.stage = cfg.TRAIN.STAGE losses = [] if (self.stage in ['diffusion', 'vae_diffusion']): losses.append('inst_loss') losses.append('x_loss') if (self.cfg.LOSS.LAMBDA_PRIOR != 0.0): losses.append('prior_loss') if (self.stage in ['vae', 'vae_diffusion']): losses.append('recons_feature') losses.append('recons_verts') losses.append('recons_joints') losses.append('recons_limb') losses.append('gen_feature') losses.append('gen_joints') losses.append('kl_motion') if (self.stage not in ['vae', 'diffusion', 'vae_diffusion']): raise ValueError(f'Stage {self.stage} not supported') losses.append('total') for loss in losses: self.add_state(loss, default=torch.tensor(0.0), dist_reduce_fx='sum') self.add_state('count', torch.tensor(0), dist_reduce_fx='sum') self.losses = losses self._losses_func = {} self._params = {} for loss in losses: if (loss.split('_')[0] == 'inst'): self._losses_func[loss] = nn.MSELoss(reduction='mean') self._params[loss] = 1 elif (loss.split('_')[0] == 'x'): self._losses_func[loss] = nn.MSELoss(reduction='mean') self._params[loss] = 1 elif (loss.split('_')[0] == 'prior'): self._losses_func[loss] = nn.MSELoss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_PRIOR if (loss.split('_')[0] == 'kl'): if (cfg.LOSS.LAMBDA_KL != 0.0): self._losses_func[loss] = KLLoss() self._params[loss] = cfg.LOSS.LAMBDA_KL elif (loss.split('_')[0] == 'recons'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_REC elif (loss.split('_')[0] == 'gen'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_GEN elif (loss.split('_')[0] == 'latent'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_LATENT else: ValueError('This loss is not recognized.') if (loss.split('_')[(- 1)] == 'joints'): self._params[loss] = cfg.LOSS.LAMBDA_JOINT def update(self, rs_set): total: float = 0.0 if (self.stage in ['vae', 'vae_diffusion']): total += self._update_loss('recons_feature', rs_set['m_rst'], rs_set['m_ref']) total += self._update_loss('recons_joints', rs_set['joints_rst'], rs_set['joints_ref']) total += self._update_loss('kl_motion', rs_set['dist_m'], rs_set['dist_ref']) if (self.stage in ['diffusion', 'vae_diffusion']): if self.predict_epsilon: total += self._update_loss('inst_loss', rs_set['noise_pred_1'], rs_set['noise_1']) total += self._update_loss('inst_loss', rs_set['noise_pred_2'], rs_set['noise_2']) total += self._update_loss('inst_loss', rs_set['noise_pred_3'], rs_set['noise_3']) else: total += self._update_loss('x_loss', rs_set['pred'], rs_set['latent']) if (self.cfg.LOSS.LAMBDA_PRIOR != 0.0): total += self._update_loss('prior_loss', rs_set['noise_prior'], rs_set['dist_m1']) if (self.stage in ['vae_diffusion']): total += self._update_loss('gen_feature', rs_set['gen_m_rst'], rs_set['m_ref']) total += self._update_loss('gen_joints', rs_set['gen_joints_rst'], rs_set['joints_ref']) self.total += total.detach() self.count += 1 return total def compute(self, split): count = getattr(self, 'count') return {loss: (getattr(self, loss) / count) for loss in self.losses} def _update_loss(self, loss: str, outputs, inputs): val = self._losses_func[loss](outputs, inputs) getattr(self, loss).__iadd__(val.detach()) weighted_loss = (self._params[loss] * val) return weighted_loss def loss2logname(self, loss: str, split: str): if (loss == 'total'): log_name = f'{loss}/{split}' else: (loss_type, name) = loss.split('_') log_name = f'{loss_type}/{name}/{split}' return log_name
class KLLoss(): def __init__(self): pass def __call__(self, q, p): div = torch.distributions.kl_divergence(q, p) return div.mean() def __repr__(self): return 'KLLoss()'
class KLLossMulti(): def __init__(self): self.klloss = KLLoss() def __call__(self, qlist, plist): return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)]) def __repr__(self): return 'KLLossMulti()'
class ACTORLosses(Metric): '\n Loss\n Modify loss\n \n ' def __init__(self, vae, mode, cfg): super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) self.vae = vae self.mode = mode losses = [] losses.append('recons_feature') losses.append('recons_verts') losses.append('recons_joints') losses.append('recons_limb') losses.append('latent_st2sm') losses.append('kl_motion') losses.append('total') for loss in losses: self.register_buffer(loss, torch.tensor(0.0)) self.register_buffer('count', torch.tensor(0)) self.losses = losses self._losses_func = {} self._params = {} for loss in losses: if (loss != 'total'): if (loss.split('_')[0] == 'kl'): self._losses_func[loss] = KLLoss() self._params[loss] = cfg.LOSS.LAMBDA_KL elif (loss.split('_')[0] == 'recons'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_REC elif (loss.split('_')[0] == 'cross'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_CROSS elif (loss.split('_')[0] == 'latent'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_LATENT elif (loss.split('_')[0] == 'cycle'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_CYCLE else: ValueError('This loss is not recognized.') def update(self, rs_set, dist_ref): total: float = 0.0 total += self._update_loss('recons_feature', rs_set['m_rst'], rs_set['m_ref']) total += self._update_loss('kl_motion', rs_set['dist_m'], dist_ref) self.total += total.detach() self.count += 1 return total def compute(self, split): count = getattr(self, 'count') return {loss: (getattr(self, loss) / count) for loss in self.losses} def _update_loss(self, loss: str, outputs, inputs): val = self._losses_func[loss](outputs, inputs) getattr(self, loss).__iadd__(val.detach()) weighted_loss = (self._params[loss] * val) return weighted_loss def loss2logname(self, loss: str, split: str): if (loss == 'total'): log_name = f'{loss}/{split}' else: (loss_type, name) = loss.split('_') log_name = f'{loss_type}/{name}/{split}' return log_name
class KLLoss(): def __init__(self): pass def __call__(self, q, p): div = torch.distributions.kl_divergence(q, p) return div.mean() def __repr__(self): return 'KLLoss()'
class KLLossMulti(): def __init__(self): self.klloss = KLLoss() def __call__(self, qlist, plist): return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)]) def __repr__(self): return 'KLLossMulti()'
class KLLoss(): def __init__(self): pass def __call__(self, q, p): div = torch.distributions.kl_divergence(q, p) return div.mean() def __repr__(self): return 'KLLoss()'
class KLLossMulti(): def __init__(self): self.klloss = KLLoss() def __call__(self, qlist, plist): return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)]) def __repr__(self): return 'KLLossMulti()'
class TemosLosses(Metric): "\n Loss\n Modify loss\n refer to temos loss\n add loss like deep-motion-editing\n 'gen_loss_total': l_total,\n 'gen_loss_adv': l_adv,\n 'gen_loss_recon_all': l_rec,\n 'gen_loss_recon_r': l_r_rec,\n 'gen_loss_recon_s': l_s_rec,\n 'gen_loss_feature_all': l_ft,\n 'gen_loss_feature_r': l_ft_r,\n 'gen_loss_feature_s': l_ft_s,\n 'gen_loss_feature_t': l_ft_t,\n 'gen_loss_quaternion': l_qt,\n 'gen_loss_twist': l_tw,\n 'gen_loss_triplet': l_triplet,\n 'gen_loss_joint': l_joint,\n \n " def __init__(self, vae, mode, cfg): super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) self.vae = vae self.mode = mode loss_on_both = False force_loss_on_jfeats = True ablation_no_kl_combine = False ablation_no_kl_gaussian = False ablation_no_motionencoder = False self.loss_on_both = loss_on_both self.ablation_no_kl_combine = ablation_no_kl_combine self.ablation_no_kl_gaussian = ablation_no_kl_gaussian self.ablation_no_motionencoder = ablation_no_motionencoder losses = [] if ((mode == 'xyz') or force_loss_on_jfeats): if (not ablation_no_motionencoder): losses.append('recons_jfeats2jfeats') losses.append('recons_text2jfeats') if (mode == 'smpl'): if (not ablation_no_motionencoder): losses.append('recons_rfeats2rfeats') losses.append('recons_text2rfeats') else: ValueError('This mode is not recognized.') if (vae or loss_on_both): kl_losses = [] if ((not ablation_no_kl_combine) and (not ablation_no_motionencoder)): kl_losses.extend(['kl_text2motion', 'kl_motion2text']) if (not ablation_no_kl_gaussian): if ablation_no_motionencoder: kl_losses.extend(['kl_text']) else: kl_losses.extend(['kl_text', 'kl_motion']) losses.extend(kl_losses) if ((not self.vae) or loss_on_both): if (not ablation_no_motionencoder): losses.append('latent_manifold') losses.append('total') for loss in losses: self.register_buffer(loss, torch.tensor(0.0)) self.register_buffer('count', torch.tensor(0)) self.losses = losses self._losses_func = {} self._params = {} for loss in losses: if (loss != 'total'): if (loss.split('_')[0] == 'kl'): self._losses_func[loss] = KLLoss() self._params[loss] = cfg.LOSS.LAMBDA_KL elif (loss.split('_')[0] == 'recons'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_REC elif (loss.split('_')[0] == 'latent'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_LATENT elif (loss.split('_')[0] == 'cycle'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_CYCLE else: ValueError('This loss is not recognized.') def update(self, f_text=None, f_motion=None, f_ref=None, lat_text=None, lat_motion=None, dis_text=None, dis_motion=None, dis_ref=None): total: float = 0.0 if ((self.mode == 'xyz') or self.force_loss_on_jfeats): if (not self.ablation_no_motionencoder): total += self._update_loss('recons_jfeats2jfeats', f_motion, f_ref) total += self._update_loss('recons_text2jfeats', f_text, f_ref) if (self.mode == 'smpl'): if (not self.ablation_no_motionencoder): total += self._update_loss('recons_rfeats2rfeats', f_motion.rfeats, f_ref.rfeats) total += self._update_loss('recons_text2rfeats', f_text.rfeats, f_ref.rfeats) if (self.vae or self.loss_on_both): if ((not self.ablation_no_kl_combine) and (not self.ablation_no_motionencoder)): total += self._update_loss('kl_text2motion', dis_text, dis_motion) total += self._update_loss('kl_motion2text', dis_motion, dis_text) if (not self.ablation_no_kl_gaussian): total += self._update_loss('kl_text', dis_text, dis_ref) if (not self.ablation_no_motionencoder): total += self._update_loss('kl_motion', dis_motion, dis_ref) if ((not self.vae) or self.loss_on_both): if (not self.ablation_no_motionencoder): total += self._update_loss('latent_manifold', lat_text, lat_motion) self.total += total.detach() self.count += 1 return total def compute(self, split): count = getattr(self, 'count') return {loss: (getattr(self, loss) / count) for loss in self.losses} def _update_loss(self, loss: str, outputs, inputs): val = self._losses_func[loss](outputs, inputs) getattr(self, loss).__iadd__(val.detach()) weighted_loss = (self._params[loss] * val) return weighted_loss def loss2logname(self, loss: str, split: str): if (loss == 'total'): log_name = f'{loss}/{split}' else: (loss_type, name) = loss.split('_') log_name = f'{loss_type}/{name}/{split}' return log_name
class KLLoss(): def __init__(self): pass def __call__(self, q, p): div = torch.distributions.kl_divergence(q, p) return div.mean() def __repr__(self): return 'KLLoss()'
class KLLossMulti(): def __init__(self): self.klloss = KLLoss() def __call__(self, qlist, plist): return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)]) def __repr__(self): return 'KLLossMulti()'
class TmostLosses(Metric): "\n Loss\n Modify loss\n refer to temos loss\n add loss like deep-motion-editing\n 'gen_loss_total': l_total,\n 'gen_loss_adv': l_adv,\n 'gen_loss_recon_all': l_rec,\n 'gen_loss_recon_r': l_r_rec,\n 'gen_loss_recon_s': l_s_rec,\n 'gen_loss_feature_all': l_ft,\n 'gen_loss_feature_r': l_ft_r,\n 'gen_loss_feature_s': l_ft_s,\n 'gen_loss_feature_t': l_ft_t,\n 'gen_loss_quaternion': l_qt,\n 'gen_loss_twist': l_tw,\n 'gen_loss_triplet': l_triplet,\n 'gen_loss_joint': l_joint,\n \n " def __init__(self, vae, mode, cfg): super().__init__(dist_sync_on_step=cfg.LOSS.DIST_SYNC_ON_STEP) self.vae = vae self.mode = mode losses = [] losses.append('recons_mm2m') losses.append('recons_t2m') losses.append('cross_mt2m') losses.append('cross_tm2m') losses.append('cycle_cmsm2mContent') losses.append('cycle_cmsm2mStyle') losses.append('latent_ct2cm') losses.append('latent_st2sm') losses.append('kl_motion') losses.append('kl_text') losses.append('kl_ct2cm') losses.append('kl_cm2ct') losses.append('total') for loss in losses: self.register_buffer(loss, torch.tensor(0.0)) self.register_buffer('count', torch.tensor(0)) self.losses = losses self.ablation_cycle = cfg.TRAIN.ABLATION.CYCLE self._losses_func = {} self._params = {} for loss in losses: if (loss != 'total'): if (loss.split('_')[0] == 'kl'): self._losses_func[loss] = KLLoss() self._params[loss] = cfg.LOSS.LAMBDA_KL elif (loss.split('_')[0] == 'recons'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_REC elif (loss.split('_')[0] == 'cross'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_CROSS elif (loss.split('_')[0] == 'latent'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_LATENT elif (loss.split('_')[0] == 'cycle'): self._losses_func[loss] = torch.nn.SmoothL1Loss(reduction='mean') self._params[loss] = cfg.LOSS.LAMBDA_CYCLE else: ValueError('This loss is not recognized.') def update(self, rs_set, dist_ref): total: float = 0.0 '\n loss list\n - triplet loss\n - anchor style1\n - pos style2\n - neg diff_style\n anchor = s_xa\n pos = s_xpos\n neg = self.gen.enc_style(co_data[diff_style], diff_style[-2:])\n l_triplet = self.triplet_loss(anchor, pos, neg)\n - \n ' total += self._update_loss('recons_mm2m', rs_set['rs_cm1sm1'], rs_set['m1']) total += self._update_loss('recons_t2m', rs_set['rs_ct1st1'], rs_set['m1']) total += self._update_loss('cross_mt2m', rs_set['rs_cm1st1'], rs_set['m1']) total += self._update_loss('cross_tm2m', rs_set['rs_ct1sm1'], rs_set['m1']) if self.ablation_cycle: total += self._update_loss('cycle_cmsm2mContent', rs_set['cyc_rs_cm1sm1'], rs_set['m1']) total += self._update_loss('cycle_cmsm2mStyle', rs_set['cyc_rs_cm2sm2'], rs_set['m2']) total += self._update_loss('latent_ct2cm', rs_set['lat_ct1'], rs_set['lat_cm1']) total += self._update_loss('latent_st2sm', rs_set['lat_st1'], rs_set['lat_sm1']) total += self._update_loss('kl_motion', rs_set['dist_cm1'], dist_ref) total += self._update_loss('kl_text', rs_set['dist_ct1'], dist_ref) total += self._update_loss('kl_ct2cm', rs_set['dist_ct1'], rs_set['dist_cm1']) total += self._update_loss('kl_cm2ct', rs_set['dist_cm1'], rs_set['dist_ct1']) self.total += total.detach() self.count += 1 return total def compute(self, split): count = getattr(self, 'count') return {loss: (getattr(self, loss) / count) for loss in self.losses} def _update_loss(self, loss: str, outputs, inputs): val = self._losses_func[loss](outputs, inputs) getattr(self, loss).__iadd__(val.detach()) weighted_loss = (self._params[loss] * val) return weighted_loss def loss2logname(self, loss: str, split: str): if (loss == 'total'): log_name = f'{loss}/{split}' else: (loss_type, name) = loss.split('_') log_name = f'{loss_type}/{name}/{split}' return log_name
class KLLoss(): def __init__(self): pass def __call__(self, q, p): div = torch.distributions.kl_divergence(q, p) return div.mean() def __repr__(self): return 'KLLoss()'
class KLLossMulti(): def __init__(self): self.klloss = KLLoss() def __call__(self, qlist, plist): return sum([self.klloss(q, p) for (q, p) in zip(qlist, plist)]) def __repr__(self): return 'KLLossMulti()'
class ComputeMetricsBest(ComputeMetrics): def update(self, jts_text_: List[Tensor], jts_ref_: List[Tensor], lengths: List[List[int]]): self.count += sum(lengths[0]) self.count_seq += len(lengths[0]) ntrials = len(jts_text_) metrics = [] for index in range(ntrials): (jts_text, poses_text, root_text, traj_text) = self.transform(jts_text_[index], lengths[index]) (jts_ref, poses_ref, root_ref, traj_ref) = self.transform(jts_ref_[index], lengths[index]) mets = [] for i in range(len(lengths[index])): APE_root = l2_norm(root_text[i], root_ref[i], dim=1).sum() APE_pose = l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0) APE_traj = l2_norm(traj_text[i], traj_ref[i], dim=1).sum() APE_joints = l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0) root_sigma_text = variance(root_text[i], lengths[index][i], dim=0) root_sigma_ref = variance(root_ref[i], lengths[index][i], dim=0) AVE_root = l2_norm(root_sigma_text, root_sigma_ref, dim=0) traj_sigma_text = variance(traj_text[i], lengths[index][i], dim=0) traj_sigma_ref = variance(traj_ref[i], lengths[index][i], dim=0) AVE_traj = l2_norm(traj_sigma_text, traj_sigma_ref, dim=0) poses_sigma_text = variance(poses_text[i], lengths[index][i], dim=0) poses_sigma_ref = variance(poses_ref[i], lengths[index][i], dim=0) AVE_pose = l2_norm(poses_sigma_text, poses_sigma_ref, dim=1) jts_sigma_text = variance(jts_text[i], lengths[index][i], dim=0) jts_sigma_ref = variance(jts_ref[i], lengths[index][i], dim=0) AVE_joints = l2_norm(jts_sigma_text, jts_sigma_ref, dim=1) met = [APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints] mets.append(met) metrics.append(mets) mmm = metrics[np.argmin([x[0][0] for x in metrics])] (APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints) = mmm[0] self.APE_root += APE_root self.APE_pose += APE_pose self.APE_traj += APE_traj self.APE_joints += APE_joints self.AVE_root += AVE_root self.AVE_pose += AVE_pose self.AVE_traj += AVE_traj self.AVE_joints += AVE_joints
class ComputeMetricsWorst(ComputeMetrics): def update(self, jts_text_: List[Tensor], jts_ref_: List[Tensor], lengths: List[List[int]]): self.count += sum(lengths[0]) self.count_seq += len(lengths[0]) ntrials = len(jts_text_) metrics = [] for index in range(ntrials): (jts_text, poses_text, root_text, traj_text) = self.transform(jts_text_[index], lengths[index]) (jts_ref, poses_ref, root_ref, traj_ref) = self.transform(jts_ref_[index], lengths[index]) mets = [] for i in range(len(lengths[index])): APE_root = l2_norm(root_text[i], root_ref[i], dim=1).sum() APE_pose = l2_norm(poses_text[i], poses_ref[i], dim=2).sum(0) APE_traj = l2_norm(traj_text[i], traj_ref[i], dim=1).sum() APE_joints = l2_norm(jts_text[i], jts_ref[i], dim=2).sum(0) root_sigma_text = variance(root_text[i], lengths[index][i], dim=0) root_sigma_ref = variance(root_ref[i], lengths[index][i], dim=0) AVE_root = l2_norm(root_sigma_text, root_sigma_ref, dim=0) traj_sigma_text = variance(traj_text[i], lengths[index][i], dim=0) traj_sigma_ref = variance(traj_ref[i], lengths[index][i], dim=0) AVE_traj = l2_norm(traj_sigma_text, traj_sigma_ref, dim=0) poses_sigma_text = variance(poses_text[i], lengths[index][i], dim=0) poses_sigma_ref = variance(poses_ref[i], lengths[index][i], dim=0) AVE_pose = l2_norm(poses_sigma_text, poses_sigma_ref, dim=1) jts_sigma_text = variance(jts_text[i], lengths[index][i], dim=0) jts_sigma_ref = variance(jts_ref[i], lengths[index][i], dim=0) AVE_joints = l2_norm(jts_sigma_text, jts_sigma_ref, dim=1) met = [APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints] mets.append(met) metrics.append(mets) mmm = metrics[np.argmax([x[0][0] for x in metrics])] (APE_root, APE_pose, APE_traj, APE_joints, AVE_root, AVE_pose, AVE_traj, AVE_joints) = mmm[0] self.APE_root += APE_root self.APE_pose += APE_pose self.APE_traj += APE_traj self.APE_joints += APE_joints self.AVE_root += AVE_root self.AVE_pose += AVE_pose self.AVE_traj += AVE_traj self.AVE_joints += AVE_joints
class MMMetrics(Metric): full_state_update = True def __init__(self, mm_num_times=10, dist_sync_on_step=True, stage=0, **kwargs): super().__init__(dist_sync_on_step=dist_sync_on_step) self.name = 'MultiModality scores' self.mm_num_times = mm_num_times self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum') self.add_state('count_seq', default=torch.tensor(0), dist_reduce_fx='sum') self.stage = stage if (self.stage in [1, 2, 3]): self.metrics = [f's{str(self.stage)}_MultiModality'] self.add_state(f's{str(self.stage)}_MultiModality', default=torch.tensor(0.0), dist_reduce_fx='sum') else: self.metrics = ['MultiModality'] self.add_state('MultiModality', default=torch.tensor(0.0), dist_reduce_fx='sum') self.add_state('mm_motion_embeddings', default=[], dist_reduce_fx=None) def compute(self, sanity_flag): count = self.count.item() count_seq = self.count_seq.item() metrics = {metric: getattr(self, metric) for metric in self.metrics} if sanity_flag: return metrics all_mm_motions = torch.cat(self.mm_motion_embeddings, axis=0).cpu().numpy() if (self.stage in [1, 2, 3]): metrics[f's{str(self.stage)}_MultiModality'] = calculate_multimodality_np(all_mm_motions, self.mm_num_times) else: metrics['MultiModality'] = calculate_multimodality_np(all_mm_motions, self.mm_num_times) return {**metrics} def update(self, mm_motion_embeddings: Tensor, lengths: List[int]): self.count += sum(lengths) self.count_seq += len(lengths) self.mm_motion_embeddings.append(mm_motion_embeddings)
class MRMetrics(Metric): def __init__(self, njoints, jointstype: str='mmm', force_in_meter: bool=True, align_root: bool=True, dist_sync_on_step=True, **kwargs): super().__init__(dist_sync_on_step=dist_sync_on_step) if (jointstype not in ['mmm', 'humanml3d']): raise NotImplementedError('This jointstype is not implemented.') self.name = 'Motion Reconstructions' self.jointstype = jointstype self.align_root = align_root self.force_in_meter = force_in_meter self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum') self.add_state('count_seq', default=torch.tensor(0), dist_reduce_fx='sum') self.add_state('MPJPE', default=torch.tensor([0.0]), dist_reduce_fx='sum') self.add_state('PAMPJPE', default=torch.tensor([0.0]), dist_reduce_fx='sum') self.add_state('ACCEL', default=torch.tensor([0.0]), dist_reduce_fx='sum') self.MR_metrics = ['MPJPE', 'PAMPJPE', 'ACCEL'] self.metrics = self.MR_metrics def compute(self, sanity_flag): if self.force_in_meter: factor = 1000.0 else: factor = 1.0 count = self.count count_seq = self.count_seq mr_metrics = {} mr_metrics['MPJPE'] = ((self.MPJPE / count) * factor) mr_metrics['PAMPJPE'] = ((self.PAMPJPE / count) * factor) mr_metrics['ACCEL'] = ((self.ACCEL / (count - (2 * count_seq))) * factor) return mr_metrics def update(self, joints_rst: Tensor, joints_ref: Tensor, lengths: List[int]): assert (joints_rst.shape == joints_ref.shape) assert (joints_rst.dim() == 4) self.count += sum(lengths) self.count_seq += len(lengths) rst = joints_rst.detach().cpu() ref = joints_ref.detach().cpu() if (self.align_root and (self.jointstype in ['mmm', 'humanml3d'])): align_inds = [0] else: align_inds = None for i in range(len(lengths)): self.MPJPE += torch.sum(calc_mpjpe(rst[i], ref[i], align_inds=align_inds)) self.PAMPJPE += torch.sum(calc_pampjpe(rst[i], ref[i])) self.ACCEL += torch.sum(calc_accel(rst[i], ref[i]))
class UncondMetrics(Metric): full_state_update = True def __init__(self, top_k=3, R_size=32, diversity_times=300, dist_sync_on_step=True, **kwargs): super().__init__(dist_sync_on_step=dist_sync_on_step) self.name = 'fid, kid, and diversity scores' self.top_k = top_k self.R_size = R_size self.diversity_times = 300 self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum') self.add_state('count_seq', default=torch.tensor(0), dist_reduce_fx='sum') self.metrics = [] self.add_state('KID_mean', default=torch.tensor(0.0), dist_reduce_fx='mean') self.add_state('KID_std', default=torch.tensor(0.0), dist_reduce_fx='mean') self.metrics.extend(['KID_mean', 'KID_std']) self.add_state('FID', default=torch.tensor(0.0), dist_reduce_fx='mean') self.metrics.append('FID') self.add_state('Diversity', default=torch.tensor(0.0), dist_reduce_fx='sum') self.add_state('gt_Diversity', default=torch.tensor(0.0), dist_reduce_fx='sum') self.metrics.extend(['Diversity', 'gt_Diversity']) self.add_state('recmotion_embeddings', default=[], dist_reduce_fx=None) self.add_state('gtmotion_embeddings', default=[], dist_reduce_fx=None) def compute(self, sanity_flag): count = self.count.item() count_seq = self.count_seq.item() metrics = {metric: getattr(self, metric) for metric in self.metrics} if sanity_flag: return metrics all_gtmotions = torch.cat(self.gtmotion_embeddings, axis=0).cpu() all_genmotions = torch.cat(self.recmotion_embeddings, axis=0).cpu() (KID_mean, KID_std) = calculate_kid(all_gtmotions, all_genmotions) metrics['KID_mean'] = KID_mean metrics['KID_std'] = KID_std all_genmotions = all_genmotions.numpy() all_gtmotions = all_gtmotions.numpy() (mu, cov) = calculate_activation_statistics_np(all_genmotions) (gt_mu, gt_cov) = calculate_activation_statistics_np(all_gtmotions) metrics['FID'] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov) assert (count_seq > self.diversity_times) print(all_genmotions.shape) print(all_gtmotions.shape) metrics['Diversity'] = calculate_diversity_np(all_genmotions, self.diversity_times) metrics['gt_Diversity'] = calculate_diversity_np(all_gtmotions, self.diversity_times) return {**metrics} def update(self, gtmotion_embeddings: Tensor, lengths: List[int], recmotion_embeddings=None): self.count += sum(lengths) self.count_seq += len(lengths) if (recmotion_embeddings is not None): recmotion_embeddings = torch.flatten(recmotion_embeddings, start_dim=1).detach() self.recmotion_embeddings.append(recmotion_embeddings) gtmotion_embeddings = torch.flatten(gtmotion_embeddings, start_dim=1).detach() self.gtmotion_embeddings.append(gtmotion_embeddings)
class MLP(nn.Module): def __init__(self, cfg, out_dim, is_init): super(MLP, self).__init__() dims = cfg.MODEL.MOTION_DECODER.MLP_DIM n_blk = len(dims) norm = 'none' acti = 'lrelu' layers = [] for i in range((n_blk - 1)): layers += LinearBlock(dims[i], dims[(i + 1)], norm=norm, acti=acti) layers += LinearBlock(dims[(- 1)], out_dim, norm='none', acti='none') self.model = nn.Sequential(*layers) if is_init: for m in self.modules(): if isinstance(m, nn.Linear): nn.init.constant_(m.weight, 1) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): return self.model(x.view(x.size(0), (- 1)))
def ZeroPad1d(sizes): return nn.ConstantPad1d(sizes, 0)
def get_acti_layer(acti='relu', inplace=True): if (acti == 'relu'): return [nn.ReLU(inplace=inplace)] elif (acti == 'lrelu'): return [nn.LeakyReLU(0.2, inplace=inplace)] elif (acti == 'tanh'): return [nn.Tanh()] elif (acti == 'none'): return [] else: assert 0, 'Unsupported activation: {}'.format(acti)
def get_norm_layer(norm='none', norm_dim=None): if (norm == 'bn'): return [nn.BatchNorm1d(norm_dim)] elif (norm == 'in'): return [nn.InstanceNorm1d(norm_dim, affine=True)] elif (norm == 'adain'): return [AdaptiveInstanceNorm1d(norm_dim)] elif (norm == 'none'): return [] else: assert 0, 'Unsupported normalization: {}'.format(norm)
def get_dropout_layer(dropout=None): if (dropout is not None): return [nn.Dropout(p=dropout)] else: return []
def ConvLayers(kernel_size, in_channels, out_channels, stride=1, pad_type='reflect', use_bias=True): '\n returns a list of [pad, conv] => should be += to some list, then apply sequential\n ' if (pad_type == 'reflect'): pad = nn.ReflectionPad1d elif (pad_type == 'replicate'): pad = nn.ReplicationPad1d elif (pad_type == 'zero'): pad = ZeroPad1d else: assert 0, 'Unsupported padding type: {}'.format(pad_type) pad_l = ((kernel_size - 1) // 2) pad_r = ((kernel_size - 1) - pad_l) return [pad((pad_l, pad_r)), nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, bias=use_bias)]
def ConvBlock(kernel_size, in_channels, out_channels, stride=1, pad_type='reflect', dropout=None, norm='none', acti='lrelu', acti_first=False, use_bias=True, inplace=True): '\n returns a list of [pad, conv, norm, acti] or [acti, pad, conv, norm]\n ' layers = ConvLayers(kernel_size, in_channels, out_channels, stride=stride, pad_type=pad_type, use_bias=use_bias) layers += get_dropout_layer(dropout) layers += get_norm_layer(norm, norm_dim=out_channels) acti_layers = get_acti_layer(acti, inplace=inplace) if acti_first: return (acti_layers + layers) else: return (layers + acti_layers)
def LinearBlock(in_dim, out_dim, dropout=None, norm='none', acti='relu'): use_bias = True layers = [] layers.append(nn.Linear(in_dim, out_dim, bias=use_bias)) layers += get_dropout_layer(dropout) layers += get_norm_layer(norm, norm_dim=out_dim) layers += get_acti_layer(acti) return layers
@contextlib.contextmanager def no_weight_gradients(): global weight_gradients_disabled old = weight_gradients_disabled weight_gradients_disabled = True (yield) weight_gradients_disabled = old
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): if could_use_op(input): return conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) return F.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): if could_use_op(input): return conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) return F.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, dilation=dilation, groups=groups)
def could_use_op(input): if ((not enabled) or (not torch.backends.cudnn.enabled)): return False if (input.device.type != 'cuda'): return False if any((torch.__version__.startswith(x) for x in ['1.7.', '1.8.'])): return True warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().') return False
def ensure_tuple(xs, ndim): xs = (tuple(xs) if isinstance(xs, (tuple, list)) else ((xs,) * ndim)) return xs
def conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): ndim = 2 weight_shape = tuple(weight_shape) stride = ensure_tuple(stride, ndim) padding = ensure_tuple(padding, ndim) output_padding = ensure_tuple(output_padding, ndim) dilation = ensure_tuple(dilation, ndim) key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) if (key in conv2d_gradfix_cache): return conv2d_gradfix_cache[key] common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) def calc_output_padding(input_shape, output_shape): if transpose: return [0, 0] return [(((input_shape[(i + 2)] - ((output_shape[(i + 2)] - 1) * stride[i])) - (1 - (2 * padding[i]))) - (dilation[i] * (weight_shape[(i + 2)] - 1))) for i in range(ndim)] class Conv2d(autograd.Function): @staticmethod def forward(ctx, input, weight, bias): if (not transpose): out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) else: out = F.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) ctx.save_for_backward(input, weight) return out @staticmethod def backward(ctx, grad_output): (input, weight) = ctx.saved_tensors (grad_input, grad_weight, grad_bias) = (None, None, None) if ctx.needs_input_grad[0]: p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape) grad_input = conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None) if (ctx.needs_input_grad[1] and (not weight_gradients_disabled)): grad_weight = Conv2dGradWeight.apply(grad_output, input) if ctx.needs_input_grad[2]: grad_bias = grad_output.sum((0, 2, 3)) return (grad_input, grad_weight, grad_bias) class Conv2dGradWeight(autograd.Function): @staticmethod def forward(ctx, grad_output, input): op = torch._C._jit_get_operation(('aten::cudnn_convolution_backward_weight' if (not transpose) else 'aten::cudnn_convolution_transpose_backward_weight')) flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags) ctx.save_for_backward(grad_output, input) return grad_weight @staticmethod def backward(ctx, grad_grad_weight): (grad_output, input) = ctx.saved_tensors (grad_grad_output, grad_grad_input) = (None, None) if ctx.needs_input_grad[0]: grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) if ctx.needs_input_grad[1]: p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape) grad_grad_input = conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad_grad_weight, None) return (grad_grad_output, grad_grad_input) conv2d_gradfix_cache[key] = Conv2d return Conv2d
class SkipTransformerEncoder(nn.Module): def __init__(self, encoder_layer, num_layers, norm=None): super().__init__() self.d_model = encoder_layer.d_model self.num_layers = num_layers self.norm = norm assert ((num_layers % 2) == 1) num_block = ((num_layers - 1) // 2) self.input_blocks = _get_clones(encoder_layer, num_block) self.middle_block = _get_clone(encoder_layer) self.output_blocks = _get_clones(encoder_layer, num_block) self.linear_blocks = _get_clones(nn.Linear((2 * self.d_model), self.d_model), num_block) self._reset_parameters() def _reset_parameters(self): for p in self.parameters(): if (p.dim() > 1): nn.init.xavier_uniform_(p) def forward(self, src, mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): x = src xs = [] for module in self.input_blocks: x = module(x, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos) xs.append(x) x = self.middle_block(x, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos) for (module, linear) in zip(self.output_blocks, self.linear_blocks): x = torch.cat([x, xs.pop()], dim=(- 1)) x = linear(x) x = module(x, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos) if (self.norm is not None): x = self.norm(x) return x
class SkipTransformerDecoder(nn.Module): def __init__(self, decoder_layer, num_layers, norm=None): super().__init__() self.d_model = decoder_layer.d_model self.num_layers = num_layers self.norm = norm assert ((num_layers % 2) == 1) num_block = ((num_layers - 1) // 2) self.input_blocks = _get_clones(decoder_layer, num_block) self.middle_block = _get_clone(decoder_layer) self.output_blocks = _get_clones(decoder_layer, num_block) self.linear_blocks = _get_clones(nn.Linear((2 * self.d_model), self.d_model), num_block) self._reset_parameters() def _reset_parameters(self): for p in self.parameters(): if (p.dim() > 1): nn.init.xavier_uniform_(p) def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): x = tgt xs = [] for module in self.input_blocks: x = module(x, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos) xs.append(x) x = self.middle_block(x, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos) for (module, linear) in zip(self.output_blocks, self.linear_blocks): x = torch.cat([x, xs.pop()], dim=(- 1)) x = linear(x) x = module(x, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos) if (self.norm is not None): x = self.norm(x) return x
class Transformer(nn.Module): def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False): super().__init__() encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) encoder_norm = (nn.LayerNorm(d_model) if normalize_before else None) self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) decoder_norm = nn.LayerNorm(d_model) self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec) self._reset_parameters() self.d_model = d_model self.nhead = nhead def _reset_parameters(self): for p in self.parameters(): if (p.dim() > 1): nn.init.xavier_uniform_(p) def forward(self, src, mask, query_embed, pos_embed): (bs, c, h, w) = src.shape src = src.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) mask = mask.flatten(1) tgt = torch.zeros_like(query_embed) memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed) return (hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w))
class TransformerEncoder(nn.Module): def __init__(self, encoder_layer, num_layers, norm=None): super().__init__() self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers self.norm = norm def forward(self, src, mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): output = src for layer in self.layers: output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos) if (self.norm is not None): output = self.norm(output) return output
class TransformerDecoder(nn.Module): def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): super().__init__() self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.norm = norm self.return_intermediate = return_intermediate def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): output = tgt intermediate = [] for layer in self.layers: output = layer(output, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos) if self.return_intermediate: intermediate.append(self.norm(output)) if (self.norm is not None): output = self.norm(output) if self.return_intermediate: intermediate.pop() intermediate.append(output) if self.return_intermediate: return torch.stack(intermediate) return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False): super().__init__() self.d_model = d_model self.nhead = nhead self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return (tensor if (pos is None) else (tensor + pos)) def forward_post(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): q = k = self.with_pos_embed(src, pos) if (src_mask is not None): src_mask = torch.repeat_interleave(src_mask, self.nhead, dim=0) src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = (src + self.dropout1(src2)) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = (src + self.dropout2(src2)) src = self.norm2(src) return src def forward_pre(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): src2 = self.norm1(src) q = k = self.with_pos_embed(src2, pos) src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = (src + self.dropout1(src2)) src2 = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) src = (src + self.dropout2(src2)) return src def forward(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): if self.normalize_before: return self.forward_pre(src, src_mask, src_key_padding_mask, pos) return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.d_model = d_model self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return (tensor if (pos is None) else (tensor + pos)) def forward_post(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): q = k = self.with_pos_embed(tgt, query_pos) tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = (tgt + self.dropout1(tgt2)) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = (tgt + self.dropout2(tgt2)) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = (tgt + self.dropout3(tgt2)) tgt = self.norm3(tgt) return tgt def forward_pre(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = (tgt + self.dropout1(tgt2)) tgt2 = self.norm2(tgt) tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = (tgt + self.dropout2(tgt2)) tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) tgt = (tgt + self.dropout3(tgt2)) return tgt def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): if self.normalize_before: return self.forward_pre(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) return self.forward_post(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clone(module): return copy.deepcopy(module)
def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args): return Transformer(d_model=args.hidden_dim, dropout=args.dropout, nhead=args.nheads, dim_feedforward=args.dim_feedforward, num_encoder_layers=args.enc_layers, num_decoder_layers=args.dec_layers, normalize_before=args.pre_norm, return_intermediate_dec=True)
def _get_activation_fn(activation): 'Return an activation function given a string' if (activation == 'relu'): return F.relu if (activation == 'gelu'): return F.gelu if (activation == 'glu'): return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.')
def remove_padding(tensors, lengths): return [tensor[:tensor_length] for (tensor, tensor_length) in zip(tensors, lengths)]
class AutoParams(nn.Module): def __init__(self, **kargs): try: for param in self.needed_params: if (param in kargs): setattr(self, param, kargs[param]) else: raise ValueError(f'{param} is needed.') except: pass try: for (param, default) in self.optional_params.items(): if ((param in kargs) and (kargs[param] is not None)): setattr(self, param, kargs[param]) else: setattr(self, param, default) except: pass super().__init__()
def freeze_params(module: nn.Module) -> None: '\n Freeze the parameters of this module,\n i.e. do not update them during training\n\n :param module: freeze parameters of this module\n ' for (_, p) in module.named_parameters(): p.requires_grad = False
class Camera(): def __init__(self, *, first_root, mode, is_mesh): camera = bpy.data.objects['Camera'] camera.location.x = 7.36 camera.location.y = (- 6.93) if is_mesh: camera.location.z = 5.6 else: camera.location.z = 5.2 if (mode == 'sequence'): if is_mesh: camera.data.lens = 65 else: camera.data.lens = 85 elif (mode == 'frame'): if is_mesh: camera.data.lens = 130 else: camera.data.lens = 85 elif (mode == 'video'): if is_mesh: camera.data.lens = 110 else: camera.data.lens = 85 self.mode = mode self.camera = camera self.camera.location.x += first_root[0] self.camera.location.y += first_root[1] self._root = first_root def update(self, newroot): delta_root = (newroot - self._root) self.camera.location.x += delta_root[0] self.camera.location.y += delta_root[1] self._root = newroot
class Data(): def __len__(self): return self.N
def clear_material(material): if material.node_tree: material.node_tree.links.clear() material.node_tree.nodes.clear()
def colored_material_diffuse_BSDF(r, g, b, a=1, roughness=0.127451): materials = bpy.data.materials material = materials.new(name='body') material.use_nodes = True clear_material(material) nodes = material.node_tree.nodes links = material.node_tree.links output = nodes.new(type='ShaderNodeOutputMaterial') diffuse = nodes.new(type='ShaderNodeBsdfDiffuse') diffuse.inputs['Color'].default_value = (r, g, b, a) diffuse.inputs['Roughness'].default_value = roughness links.new(diffuse.outputs['BSDF'], output.inputs['Surface']) return material
def colored_material_relection_BSDF(r, g, b, a=1, roughness=0.127451, saturation_factor=1): materials = bpy.data.materials material = materials.new(name='body') material.use_nodes = True nodes = material.node_tree.nodes links = material.node_tree.links output = nodes.new(type='ShaderNodeOutputMaterial') diffuse = nodes['Principled BSDF'] diffuse.inputs['Base Color'].default_value = ((r * saturation_factor), (g * saturation_factor), (b * saturation_factor), a) diffuse.inputs['Roughness'].default_value = roughness links.new(diffuse.outputs['BSDF'], output.inputs['Surface']) return material
def body_material(r, g, b, a=1, name='body', oldrender=True): if oldrender: material = colored_material_diffuse_BSDF(r, g, b, a=a) else: materials = bpy.data.materials material = materials.new(name=name) material.use_nodes = True nodes = material.node_tree.nodes diffuse = nodes['Principled BSDF'] inputs = diffuse.inputs settings = DEFAULT_BSDF_SETTINGS.copy() settings['Base Color'] = (r, g, b, a) settings['Subsurface Color'] = (r, g, b, a) settings['Subsurface'] = 0.0 for (setting, val) in settings.items(): inputs[setting].default_value = val return material
def colored_material_bsdf(name, **kwargs): materials = bpy.data.materials material = materials.new(name=name) material.use_nodes = True nodes = material.node_tree.nodes diffuse = nodes['Principled BSDF'] inputs = diffuse.inputs settings = DEFAULT_BSDF_SETTINGS.copy() for (key, val) in kwargs.items(): settings[key] = val for (setting, val) in settings.items(): inputs[setting].default_value = val return material
def floor_mat(name='floor_mat', color=(0.1, 0.1, 0.1, 1), roughness=0.127451): return colored_material_diffuse_BSDF(color[0], color[1], color[2], a=color[3], roughness=roughness)