| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | import threading |
| | import torch |
| | import torch.nn.functional as F |
| | from matcha.models.components.flow_matching import BASECFM |
| | import queue |
| |
|
| | class EstimatorWrapper: |
| | def __init__(self, estimator_engine, estimator_count=2,): |
| | self.estimators = queue.Queue() |
| | self.estimator_engine = estimator_engine |
| | for _ in range(estimator_count): |
| | estimator = estimator_engine.create_execution_context() |
| | if estimator is not None: |
| | self.estimators.put(estimator) |
| |
|
| | if self.estimators.empty(): |
| | raise Exception("No available estimator") |
| | |
| | def acquire_estimator(self): |
| | return self.estimators.get(), self.estimator_engine |
| |
|
| | def release_estimator(self, estimator): |
| | self.estimators.put(estimator) |
| | return |
| |
|
| | class ConditionalCFM(BASECFM): |
| | def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None): |
| | super().__init__( |
| | n_feats=in_channels, |
| | cfm_params=cfm_params, |
| | n_spks=n_spks, |
| | spk_emb_dim=spk_emb_dim, |
| | ) |
| | self.t_scheduler = cfm_params.t_scheduler |
| | self.training_cfg_rate = cfm_params.training_cfg_rate |
| | self.inference_cfg_rate = cfm_params.inference_cfg_rate |
| | in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0) |
| | |
| | self.estimator = estimator |
| | self.lock = threading.Lock() |
| |
|
| | @torch.inference_mode() |
| | def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, prompt_len=0, flow_cache=torch.zeros(1, 80, 0, 2)): |
| | """Forward diffusion |
| | |
| | Args: |
| | mu (torch.Tensor): output of encoder |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | mask (torch.Tensor): output_mask |
| | shape: (batch_size, 1, mel_timesteps) |
| | n_timesteps (int): number of diffusion steps |
| | temperature (float, optional): temperature for scaling noise. Defaults to 1.0. |
| | spks (torch.Tensor, optional): speaker ids. Defaults to None. |
| | shape: (batch_size, spk_emb_dim) |
| | cond: Not used but kept for future purposes |
| | |
| | Returns: |
| | sample: generated mel-spectrogram |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | """ |
| |
|
| | z = torch.randn_like(mu).to(mu.device).to(mu.dtype) * temperature |
| | cache_size = flow_cache.shape[2] |
| | |
| | if cache_size != 0: |
| | z[:, :, :cache_size] = flow_cache[:, :, :, 0] |
| | mu[:, :, :cache_size] = flow_cache[:, :, :, 1] |
| | z_cache = torch.concat([z[:, :, :prompt_len], z[:, :, -34:]], dim=2) |
| | mu_cache = torch.concat([mu[:, :, :prompt_len], mu[:, :, -34:]], dim=2) |
| | flow_cache = torch.stack([z_cache, mu_cache], dim=-1) |
| |
|
| | t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype) |
| | if self.t_scheduler == 'cosine': |
| | t_span = 1 - torch.cos(t_span * 0.5 * torch.pi) |
| | return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), flow_cache |
| |
|
| | def solve_euler(self, x, t_span, mu, mask, spks, cond): |
| | """ |
| | Fixed euler solver for ODEs. |
| | Args: |
| | x (torch.Tensor): random noise |
| | t_span (torch.Tensor): n_timesteps interpolated |
| | shape: (n_timesteps + 1,) |
| | mu (torch.Tensor): output of encoder |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | mask (torch.Tensor): output_mask |
| | shape: (batch_size, 1, mel_timesteps) |
| | spks (torch.Tensor, optional): speaker ids. Defaults to None. |
| | shape: (batch_size, spk_emb_dim) |
| | cond: Not used but kept for future purposes |
| | """ |
| | t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0] |
| | t = t.unsqueeze(dim=0) |
| |
|
| | |
| | |
| | sol = [] |
| |
|
| | |
| | x_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype) |
| | mask_in = torch.zeros([2, 1, x.size(2)], device=x.device, dtype=x.dtype) |
| | mu_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype) |
| | t_in = torch.zeros([2], device=x.device, dtype=x.dtype) |
| | spks_in = torch.zeros([2, 80], device=x.device, dtype=x.dtype) |
| | cond_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype) |
| | for step in range(1, len(t_span)): |
| | |
| | x_in[:] = x |
| | mask_in[:] = mask |
| | mu_in[0] = mu |
| | t_in[:] = t.unsqueeze(0) |
| | spks_in[0] = spks |
| | cond_in[0] = cond |
| | dphi_dt = self.forward_estimator( |
| | x_in, mask_in, |
| | mu_in, t_in, |
| | spks_in, |
| | cond_in |
| | ) |
| | dphi_dt, cfg_dphi_dt = torch.split(dphi_dt, [x.size(0), x.size(0)], dim=0) |
| | dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt - self.inference_cfg_rate * cfg_dphi_dt) |
| | x = x + dt * dphi_dt |
| | t = t + dt |
| | sol.append(x) |
| | if step < len(t_span) - 1: |
| | dt = t_span[step + 1] - t |
| |
|
| | return sol[-1].float() |
| |
|
| | def forward_estimator(self, x, mask, mu, t, spks, cond): |
| | if isinstance(self.estimator, torch.nn.Module): |
| | return self.estimator.forward(x, mask, mu, t, spks, cond) |
| | else: |
| | if isinstance(self.estimator, EstimatorWrapper): |
| | estimator, engine = self.estimator.acquire_estimator() |
| |
|
| | estimator.set_input_shape('x', (2, 80, x.size(2))) |
| | estimator.set_input_shape('mask', (2, 1, x.size(2))) |
| | estimator.set_input_shape('mu', (2, 80, x.size(2))) |
| | estimator.set_input_shape('t', (2,)) |
| | estimator.set_input_shape('spks', (2, 80)) |
| | estimator.set_input_shape('cond', (2, 80, x.size(2))) |
| |
|
| | data_ptrs = [x.contiguous().data_ptr(), |
| | mask.contiguous().data_ptr(), |
| | mu.contiguous().data_ptr(), |
| | t.contiguous().data_ptr(), |
| | spks.contiguous().data_ptr(), |
| | cond.contiguous().data_ptr(), |
| | x.data_ptr()] |
| |
|
| | for idx, data_ptr in enumerate(data_ptrs): |
| | estimator.set_tensor_address(engine.get_tensor_name(idx), data_ptr) |
| |
|
| | |
| | estimator.execute_async_v3(torch.cuda.current_stream().cuda_stream) |
| |
|
| | torch.cuda.current_stream().synchronize() |
| | self.estimator.release_estimator(estimator) |
| | return x |
| | else: |
| | with self.lock: |
| | self.estimator.set_input_shape('x', (2, 80, x.size(2))) |
| | self.estimator.set_input_shape('mask', (2, 1, x.size(2))) |
| | self.estimator.set_input_shape('mu', (2, 80, x.size(2))) |
| | self.estimator.set_input_shape('t', (2,)) |
| | self.estimator.set_input_shape('spks', (2, 80)) |
| | self.estimator.set_input_shape('cond', (2, 80, x.size(2))) |
| | |
| | self.estimator.execute_v2([x.contiguous().data_ptr(), |
| | mask.contiguous().data_ptr(), |
| | mu.contiguous().data_ptr(), |
| | t.contiguous().data_ptr(), |
| | spks.contiguous().data_ptr(), |
| | cond.contiguous().data_ptr(), |
| | x.data_ptr()]) |
| | return x |
| |
|
| | def compute_loss(self, x1, mask, mu, spks=None, cond=None): |
| | """Computes diffusion loss |
| | |
| | Args: |
| | x1 (torch.Tensor): Target |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | mask (torch.Tensor): target mask |
| | shape: (batch_size, 1, mel_timesteps) |
| | mu (torch.Tensor): output of encoder |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | spks (torch.Tensor, optional): speaker embedding. Defaults to None. |
| | shape: (batch_size, spk_emb_dim) |
| | |
| | Returns: |
| | loss: conditional flow matching loss |
| | y: conditional flow |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | """ |
| | b, _, t = mu.shape |
| |
|
| | |
| | t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype) |
| | if self.t_scheduler == 'cosine': |
| | t = 1 - torch.cos(t * 0.5 * torch.pi) |
| | |
| | z = torch.randn_like(x1) |
| |
|
| | y = (1 - (1 - self.sigma_min) * t) * z + t * x1 |
| | u = x1 - (1 - self.sigma_min) * z |
| |
|
| | |
| | if self.training_cfg_rate > 0: |
| | cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate |
| | mu = mu * cfg_mask.view(-1, 1, 1) |
| | spks = spks * cfg_mask.view(-1, 1) |
| | cond = cond * cfg_mask.view(-1, 1, 1) |
| |
|
| | pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond) |
| | loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1]) |
| | return loss, y |
| |
|
| |
|
| | class CausalConditionalCFM(ConditionalCFM): |
| | def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None): |
| | super().__init__(in_channels, cfm_params, n_spks, spk_emb_dim, estimator) |
| | self.rand_noise = torch.randn([1, 80, 50 * 300]) |
| |
|
| | @torch.inference_mode() |
| | def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None): |
| | """Forward diffusion |
| | |
| | Args: |
| | mu (torch.Tensor): output of encoder |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | mask (torch.Tensor): output_mask |
| | shape: (batch_size, 1, mel_timesteps) |
| | n_timesteps (int): number of diffusion steps |
| | temperature (float, optional): temperature for scaling noise. Defaults to 1.0. |
| | spks (torch.Tensor, optional): speaker ids. Defaults to None. |
| | shape: (batch_size, spk_emb_dim) |
| | cond: Not used but kept for future purposes |
| | |
| | Returns: |
| | sample: generated mel-spectrogram |
| | shape: (batch_size, n_feats, mel_timesteps) |
| | """ |
| |
|
| | z = self.rand_noise[:, :, :mu.size(2)].to(mu.device).to(mu.dtype) * temperature |
| | |
| | t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype) |
| | if self.t_scheduler == 'cosine': |
| | t_span = 1 - torch.cos(t_span * 0.5 * torch.pi) |
| | return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), None |
| |
|