| |
| |
| |
| |
|
|
| import torch |
| import numpy as np |
| import torch.nn as nn |
| import math |
| from einops import rearrange |
| import sys |
| sys.path.append('') |
|
|
| from fish_speech.models.v2s_unit.modules.llama_nar import DiffLlama |
| from fish_speech.models.v2s_unit.load_pretrain_model import build_avhubert_encoder |
| from fish_speech.models.v2s_unit.mask import make_pad_mask |
| from fish_speech.models.v2s_unit.length_regulator import InterpolateRegulator |
|
|
| def top_k(logits, thres=0.9): |
| k = math.ceil((1 - thres) * logits.shape[-1]) |
| val, ind = logits.topk(k, dim=-1) |
| probs = torch.full_like(logits, float("-inf")) |
| probs.scatter_(2, ind, val) |
| return probs |
|
|
|
|
| def log(t, eps=1e-10): |
| return torch.log(t + eps) |
|
|
|
|
| def gumbel_noise(t): |
| noise = torch.zeros_like(t).uniform_(0, 1) |
| return -log(-log(noise)) |
|
|
|
|
| def gumbel_sample(t, temperature=1.0, dim=-1): |
| return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim=dim) |
|
|
|
|
|
|
| class ConformerUnit(nn.Module): |
| def __init__( |
| self, |
| hidden_size=512, |
| token_codebook_size=8192, |
| ssl_dim=1024, |
| ): |
| super().__init__() |
|
|
| self.hidden_size = hidden_size |
| self.token_codebook_size = token_codebook_size |
| self.ssl_dim = ssl_dim |
|
|
| self.to_logit = nn.Linear(self.hidden_size, token_codebook_size+1) |
|
|
| self.reset_parameters() |
|
|
| self.criterion_ce = LabelSmoothingLoss( |
| size=token_codebook_size+1, |
| padding_idx=token_codebook_size, |
| smoothing=0, |
| normalize_length=True, |
| ) |
|
|
|
|
| def reset_parameters(self): |
| def _reset_parameters(m): |
| if isinstance(m, nn.MultiheadAttention): |
| if m._qkv_same_embed_dim: |
| nn.init.normal_(m.in_proj_weight, std=0.02) |
| else: |
| nn.init.normal_(m.q_proj_weight, std=0.02) |
| nn.init.normal_(m.k_proj_weight, std=0.02) |
| nn.init.normal_(m.v_proj_weight, std=0.02) |
|
|
| if m.in_proj_bias is not None: |
| nn.init.constant_(m.in_proj_bias, 0.0) |
| nn.init.constant_(m.out_proj.bias, 0.0) |
| if m.bias_k is not None: |
| nn.init.xavier_normal_(m.bias_k) |
| if m.bias_v is not None: |
| nn.init.xavier_normal_(m.bias_v) |
|
|
| elif ( |
| isinstance(m, nn.Conv1d) |
| or isinstance(m, nn.ConvTranspose1d) |
| or isinstance(m, nn.Conv2d) |
| or isinstance(m, nn.ConvTranspose2d) |
| ): |
| m.weight.data.normal_(0.0, 0.02) |
|
|
| elif isinstance(m, nn.Linear): |
| m.weight.data.normal_(mean=0.0, std=0.02) |
| if m.bias is not None: |
| m.bias.data.zero_() |
|
|
| elif isinstance(m, nn.Embedding): |
| m.weight.data.normal_(mean=0.0, std=0.02) |
| if m.padding_idx is not None: |
| m.weight.data[m.padding_idx].zero_() |
|
|
| self.apply(_reset_parameters) |
|
|
| def forward(self, codes,code_lengths, video_features=None, video_feature_lengths=None): |
|
|
| mouth_embedding = self.extract_video_feats(video_features,video_feature_lengths) |
| |
| cond = self.cond_emb(mouth_embedding) |
| cond, _ = self.lr(cond, code_lengths) |
| h, h_lengths = self.encoder(cond, code_lengths) |
| logits = self.to_logit(h) |
| loss = self.criterion_ce(logits, codes) |
|
|
| return loss |
| |
| def inference(self, codes,code_lengths, video_features=None, video_feature_lengths=None): |
|
|
| mouth_embedding = self.extract_video_feats(video_features,video_feature_lengths) |
| |
| cond = self.cond_emb(mouth_embedding) |
| cond, _ = self.lr(cond, code_lengths) |
| h, h_lengths = self.encoder(cond, code_lengths) |
| logits = self.to_logit(h) |
| acc = th_accuracy(logits.view(-1, self.token_codebook_size + 1), codes, ignore_label=self.token_codebook_size) |
|
|
| return acc |
|
|
|
|
| class MaskTransformerUnit(nn.Module): |
| def __init__( |
| self, |
| hidden_size=512, |
| num_layers=8, |
| num_heads=8, |
| cfg_scale=0.15, |
| token_codebook_size=8192, |
| cond_dim=512, |
| ssl_dim=1024, |
| avhubert_ckpt_path=None, |
| avhubert_output_layer=None, |
| ): |
| super().__init__() |
|
|
| self.hidden_size = hidden_size |
| self.num_layers = num_layers |
| self.num_heads = num_heads |
| self.cfg_scale = cfg_scale |
| self.token_codebook_size = token_codebook_size |
| self.cond_dim = cond_dim |
| self.ssl_dim = ssl_dim |
|
|
| self.cond_emb = nn.Linear(self.ssl_dim, self.hidden_size) |
|
|
| self.mask_emb = nn.Embedding(1, self.hidden_size) |
|
|
| self.to_logit = nn.Linear(self.hidden_size, token_codebook_size+1) |
|
|
| self.token_emb = nn.Embedding(token_codebook_size+1, self.hidden_size, padding_idx=token_codebook_size) |
|
|
| self.reset_parameters() |
|
|
| self.diff_estimator = DiffLlama( |
| hidden_size=hidden_size, |
| num_heads=num_heads, |
| num_layers=num_layers, |
| ) |
|
|
| self.lr = InterpolateRegulator(self.hidden_size,sampling_ratios=[1,1,1,1]) |
|
|
| self.av_hubert_encoder = build_avhubert_encoder(avhubert_ckpt_path) |
| self.avhubert_output_layer = avhubert_output_layer |
| for p in self.av_hubert_encoder.parameters(): |
| p.requires_grad = False |
|
|
| def extract_video_feats(self,video_features, video_feature_lengths): |
| |
| padding_mask = make_pad_mask(video_feature_lengths).to(video_features) |
| source = {'audio': None,'video':video_features.permute(0, 4, 1, 2,3)} |
| w2v_args = { |
| "source": source, |
| "padding_mask": padding_mask, |
| "output_layer": self.avhubert_output_layer, |
| } |
| with torch.no_grad(): |
| x, padding_mask = self.av_hubert_encoder.w2v_model.extract_finetune(**w2v_args) |
| return x |
|
|
|
|
| def mask_prob(self, t): |
| return torch.sin(t * np.pi / 2).to(t.device) |
|
|
| def forward_diffusion(self, x0, t): |
| |
| new_t = t |
| mask_prob = self.mask_prob(new_t) |
| |
| mask_prob = torch.where( |
| mask_prob < 0.2, torch.ones_like(mask_prob) * 0.2, mask_prob |
| ) |
| mask_token = self.mask_emb( |
| torch.LongTensor([0]).to(x0.device) |
| ) |
|
|
| xt = torch.zeros(x0.shape[0], x0.shape[1], self.hidden_size).to(x0.device) |
|
|
| cfg_scale = self.cfg_scale |
|
|
| |
| if torch.rand(1) > cfg_scale: |
| prompt_len = torch.randint( |
| min(x0.shape[1] // 4, 5), int(x0.shape[1] * 0.4), (x0.shape[0],) |
| ).to( |
| x0.device |
| ) |
| else: |
| prompt_len = torch.zeros(x0.shape[0]).to(x0) |
|
|
| |
| is_prompt = torch.zeros_like(x0[:, :]) |
| col_indices = ( |
| torch.arange(is_prompt.shape[1]) |
| .repeat(is_prompt.shape[0], 1) |
| .to(prompt_len) |
| ) |
| is_prompt[col_indices < prompt_len.unsqueeze(1)] = 1 |
|
|
| |
| mask = torch.bernoulli(torch.ones_like(x0[:, :]) * mask_prob[..., None]) |
| mask[is_prompt.bool()] = 0 |
| mask_num = mask[:,].sum(dim=1, keepdim=False) |
| all_zero_mask = (mask_num == 0).bool() |
| row_indices_to_modify = torch.nonzero(all_zero_mask) |
| mask[row_indices_to_modify, prompt_len[row_indices_to_modify]] = 1 |
| mask = mask[..., None] |
| xt = ( |
| xt + mask * mask_token[:, None, :] + (1 - mask) * self.token_emb(x0[:, :]) |
| ) |
|
|
| return xt, new_t, mask, prompt_len, mask_prob |
|
|
| def loss_t(self, x0, x_mask, t, mouth_embedding=None): |
| xt, new_t, mask, prompt_len, mask_prob = self.forward_diffusion(x0, t) |
| |
| |
| |
| |
| |
| cond = self.cond_emb(mouth_embedding) |
| cond, _ = self.lr(cond, x0.shape[1]) |
|
|
| embeds = self.diff_estimator( |
| xt, new_t, x_mask, cond=cond |
| ) |
| logits = self.to_logit(embeds) |
|
|
| |
| final_mask = mask * x_mask[..., None] |
|
|
| return logits, final_mask, x0, prompt_len, mask_prob |
|
|
| def compute_loss(self, x0, x_mask, mouth_embedding=None): |
| |
| |
| t = torch.rand(x0.shape[0], device=x0.device, requires_grad=False) |
| t = torch.clamp(t, 1e-5, 1.0) |
| return self.loss_t(x0, x_mask, t, mouth_embedding) |
|
|
| def reset_parameters(self): |
| def _reset_parameters(m): |
| if isinstance(m, nn.MultiheadAttention): |
| if m._qkv_same_embed_dim: |
| nn.init.normal_(m.in_proj_weight, std=0.02) |
| else: |
| nn.init.normal_(m.q_proj_weight, std=0.02) |
| nn.init.normal_(m.k_proj_weight, std=0.02) |
| nn.init.normal_(m.v_proj_weight, std=0.02) |
|
|
| if m.in_proj_bias is not None: |
| nn.init.constant_(m.in_proj_bias, 0.0) |
| nn.init.constant_(m.out_proj.bias, 0.0) |
| if m.bias_k is not None: |
| nn.init.xavier_normal_(m.bias_k) |
| if m.bias_v is not None: |
| nn.init.xavier_normal_(m.bias_v) |
|
|
| elif ( |
| isinstance(m, nn.Conv1d) |
| or isinstance(m, nn.ConvTranspose1d) |
| or isinstance(m, nn.Conv2d) |
| or isinstance(m, nn.ConvTranspose2d) |
| ): |
| m.weight.data.normal_(0.0, 0.02) |
|
|
| elif isinstance(m, nn.Linear): |
| m.weight.data.normal_(mean=0.0, std=0.02) |
| if m.bias is not None: |
| m.bias.data.zero_() |
|
|
| elif isinstance(m, nn.Embedding): |
| m.weight.data.normal_(mean=0.0, std=0.02) |
| if m.padding_idx is not None: |
| m.weight.data[m.padding_idx].zero_() |
|
|
| self.apply(_reset_parameters) |
|
|
| @torch.no_grad() |
| def reverse_diffusion( |
| self, |
| target_len, |
| video_features, |
| video_feature_lengths=None, |
| prompt=None, |
| temp=0.9, |
| filter_thres=0.98, |
| n_timesteps=40, |
| cfg=1.0, |
| rescale_cfg=1.0, |
| ): |
| |
| mouth_embedding = self.extract_video_feats(video_features,video_feature_lengths) |
| cond = self.cond_emb(mouth_embedding) |
| |
| cond, _ = self.lr(cond, target_len) |
|
|
| |
| |
|
|
| |
| |
|
|
| x_mask = torch.ones(video_features.shape[0], target_len).to( |
| video_features.device |
| ) |
| |
|
|
| |
| |
| |
| |
|
|
| cum = torch.zeros(x_mask.shape[0], x_mask.shape[1], self.hidden_size).to( |
| x_mask.device |
| ) |
|
|
| bsz, seq_len, _ = cum.shape |
|
|
| choice_temp = 1.0 |
| start_temp = temp |
| start_choice_temp = choice_temp |
|
|
| xt = torch.LongTensor(bsz, seq_len).to(x_mask.device) |
|
|
| steps = n_timesteps |
| to_logit = self.to_logit |
| token_emb = self.token_emb |
|
|
| mask_token = self.mask_emb(torch.LongTensor([0]).to(xt.device)) |
| mask = torch.full((bsz, seq_len, 1), True).to(x_mask.device) |
| seq = torch.full((bsz, seq_len), 0).to(x_mask.device) |
| h = 1.0 / steps |
|
|
| |
| |
|
|
| t_list = [1.0 - i * h for i in range(steps)] |
| t_list.append(0.0) |
| for i in range(steps): |
| t = t_list[i] * torch.ones(bsz).to(x_mask.device) |
| token = token_emb(seq) |
| cur = cum + mask * mask_token[:, None, :] + (~mask) * token |
|
|
| |
| |
| |
| |
| xt_input = cur |
| xt_mask = x_mask |
|
|
| embeds = self.diff_estimator( |
| xt_input, |
| t, |
| xt_mask, |
| cond=cond |
| ) |
| |
| |
| |
| |
| |
| if cfg > 0: |
| cfg_cond = torch.zeros_like(cur) |
| mask_embeds = self.diff_estimator( |
| cur, |
| t, |
| x_mask, |
| cond=cfg_cond |
| ) |
| pos_emb_std = embeds.std() |
| embeds = embeds + cfg * (embeds - mask_embeds) |
| rescale_embeds = embeds * pos_emb_std / embeds.std() |
| embeds = rescale_cfg * rescale_embeds + (1 - rescale_cfg) * embeds |
|
|
| logits = to_logit(embeds) |
| annealing_scale = t_list[i] |
|
|
| choice_temp = start_choice_temp * annealing_scale |
| temp = start_temp * annealing_scale |
| logits = top_k(logits, filter_thres) |
|
|
| if i == steps - 1: |
| |
| if steps == 1: |
| temp = 0.2 |
| sampled_ids = gumbel_sample(logits, temperature=max(temp, 1e-3)) |
| else: |
| sampled_ids = logits.argmax(dim=-1) |
|
|
| else: |
| |
| sampled_ids = gumbel_sample(logits, temperature=max(temp, 1e-3)) |
|
|
| seq = torch.where(mask.squeeze(-1), sampled_ids, seq) |
|
|
| scores = logits.softmax(dim=-1) |
| scores = scores.gather(2, rearrange(sampled_ids, "b n -> b n 1")) |
| scores = rearrange(scores, "b n 1 -> b n") |
|
|
| scores = choice_temp * gumbel_noise(scores) + scores |
| scores = 1 - scores |
|
|
| next_t = t_list[i + 1] * torch.ones(bsz).to(x_mask.device) |
|
|
| next_mask_num = (self.mask_prob(next_t) * seq_len).long()[0].item() |
|
|
| if next_mask_num == 0: |
| break |
| scores = scores.masked_fill( |
| ~mask.squeeze(-1), -torch.finfo(scores.dtype).max |
| ) |
|
|
| mask_indices = scores.topk(next_mask_num, dim=-1).indices |
| mask = torch.zeros_like(scores, dtype=torch.bool).scatter( |
| 1, mask_indices, True |
| ) |
| seq = seq.masked_fill(mask, 0) |
|
|
| mask = mask.unsqueeze(-1) |
|
|
| cum = cum + token_emb(seq) |
| xt = seq |
|
|
| return xt |
|
|
| def forward(self, x0, x_mask, video_features=None, video_feature_lengths=None): |
| |
| |
|
|
| mouth_embedding = self.extract_video_feats(video_features,video_feature_lengths) |
|
|
|
|
| logits, final_mask, x0, prompt_len, mask_prob = self.compute_loss( |
| x0, x_mask, mouth_embedding |
| ) |
| return logits, final_mask, x0, prompt_len, mask_prob |
|
|
| |
| |
| |