| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | import math |
| | from typing import List, Optional, Tuple, Union |
| |
|
| | import numpy as np |
| | import torch |
| |
|
| | from ..configuration_utils import ConfigMixin, register_to_config |
| | from .scheduling_dpmsolver_sde import BrownianTreeNoiseSampler |
| | from .scheduling_utils import SchedulerMixin, SchedulerOutput |
| |
|
| |
|
| | class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): |
| | """ |
| | Implements a variant of `DPMSolverMultistepScheduler` with cosine schedule, proposed by Nichol and Dhariwal (2021). |
| | This scheduler was used in Stable Audio Open [1]. |
| | |
| | [1] Evans, Parker, et al. "Stable Audio Open" https://arxiv.org/abs/2407.14358 |
| | |
| | This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic |
| | methods the library implements for all schedulers such as loading and saving. |
| | |
| | Args: |
| | sigma_min (`float`, *optional*, defaults to 0.3): |
| | Minimum noise magnitude in the sigma schedule. This was set to 0.3 in Stable Audio Open [1]. |
| | sigma_max (`float`, *optional*, defaults to 500): |
| | Maximum noise magnitude in the sigma schedule. This was set to 500 in Stable Audio Open [1]. |
| | sigma_data (`float`, *optional*, defaults to 1.0): |
| | The standard deviation of the data distribution. This is set to 1.0 in Stable Audio Open [1]. |
| | sigma_schedule (`str`, *optional*, defaults to `exponential`): |
| | Sigma schedule to compute the `sigmas`. By default, we the schedule introduced in the EDM paper |
| | (https://arxiv.org/abs/2206.00364). Other acceptable value is "exponential". The exponential schedule was |
| | incorporated in this model: https://huggingface.co/stabilityai/cosxl. |
| | num_train_timesteps (`int`, defaults to 1000): |
| | The number of diffusion steps to train the model. |
| | solver_order (`int`, defaults to 2): |
| | The DPMSolver order which can be `1` or `2`. It is recommended to use `solver_order=2`. |
| | prediction_type (`str`, defaults to `v_prediction`, *optional*): |
| | Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), |
| | `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen |
| | Video](https://imagen.research.google/video/paper.pdf) paper). |
| | solver_type (`str`, defaults to `midpoint`): |
| | Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the |
| | sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. |
| | lower_order_final (`bool`, defaults to `True`): |
| | Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can |
| | stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. |
| | euler_at_final (`bool`, defaults to `False`): |
| | Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail |
| | richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference |
| | steps, but sometimes may result in blurring. |
| | final_sigmas_type (`str`, defaults to `"zero"`): |
| | The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final |
| | sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. |
| | """ |
| |
|
| | _compatibles = [] |
| | order = 1 |
| |
|
| | @register_to_config |
| | def __init__( |
| | self, |
| | sigma_min: float = 0.3, |
| | sigma_max: float = 500, |
| | sigma_data: float = 1.0, |
| | sigma_schedule: str = "exponential", |
| | num_train_timesteps: int = 1000, |
| | solver_order: int = 2, |
| | prediction_type: str = "v_prediction", |
| | rho: float = 7.0, |
| | solver_type: str = "midpoint", |
| | lower_order_final: bool = True, |
| | euler_at_final: bool = False, |
| | final_sigmas_type: Optional[str] = "zero", |
| | ): |
| | if solver_type not in ["midpoint", "heun"]: |
| | if solver_type in ["logrho", "bh1", "bh2"]: |
| | self.register_to_config(solver_type="midpoint") |
| | else: |
| | raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") |
| |
|
| | ramp = torch.linspace(0, 1, num_train_timesteps) |
| | if sigma_schedule == "karras": |
| | sigmas = self._compute_karras_sigmas(ramp) |
| | elif sigma_schedule == "exponential": |
| | sigmas = self._compute_exponential_sigmas(ramp) |
| |
|
| | self.timesteps = self.precondition_noise(sigmas) |
| |
|
| | self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) |
| |
|
| | |
| | self.num_inference_steps = None |
| | self.model_outputs = [None] * solver_order |
| | self.lower_order_nums = 0 |
| | self._step_index = None |
| | self._begin_index = None |
| | self.sigmas = self.sigmas.to("cpu") |
| |
|
| | @property |
| | def init_noise_sigma(self): |
| | |
| | return (self.config.sigma_max**2 + 1) ** 0.5 |
| |
|
| | @property |
| | def step_index(self): |
| | """ |
| | The index counter for current timestep. It will increase 1 after each scheduler step. |
| | """ |
| | return self._step_index |
| |
|
| | @property |
| | def begin_index(self): |
| | """ |
| | The index for the first timestep. It should be set from pipeline with `set_begin_index` method. |
| | """ |
| | return self._begin_index |
| |
|
| | |
| | def set_begin_index(self, begin_index: int = 0): |
| | """ |
| | Sets the begin index for the scheduler. This function should be run from pipeline before the inference. |
| | |
| | Args: |
| | begin_index (`int`): |
| | The begin index for the scheduler. |
| | """ |
| | self._begin_index = begin_index |
| |
|
| | |
| | def precondition_inputs(self, sample, sigma): |
| | c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5) |
| | scaled_sample = sample * c_in |
| | return scaled_sample |
| |
|
| | def precondition_noise(self, sigma): |
| | if not isinstance(sigma, torch.Tensor): |
| | sigma = torch.tensor([sigma]) |
| |
|
| | return sigma.atan() / math.pi * 2 |
| |
|
| | |
| | def precondition_outputs(self, sample, model_output, sigma): |
| | sigma_data = self.config.sigma_data |
| | c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) |
| |
|
| | if self.config.prediction_type == "epsilon": |
| | c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 |
| | elif self.config.prediction_type == "v_prediction": |
| | c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 |
| | else: |
| | raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.") |
| |
|
| | denoised = c_skip * sample + c_out * model_output |
| |
|
| | return denoised |
| |
|
| | |
| | def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: |
| | """ |
| | Ensures interchangeability with schedulers that need to scale the denoising model input depending on the |
| | current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. |
| | |
| | Args: |
| | sample (`torch.Tensor`): |
| | The input sample. |
| | timestep (`int`, *optional*): |
| | The current timestep in the diffusion chain. |
| | |
| | Returns: |
| | `torch.Tensor`: |
| | A scaled input sample. |
| | """ |
| | if self.step_index is None: |
| | self._init_step_index(timestep) |
| |
|
| | sigma = self.sigmas[self.step_index] |
| | sample = self.precondition_inputs(sample, sigma) |
| |
|
| | self.is_scale_input_called = True |
| | return sample |
| |
|
| | def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): |
| | """ |
| | Sets the discrete timesteps used for the diffusion chain (to be run before inference). |
| | |
| | Args: |
| | num_inference_steps (`int`): |
| | The number of diffusion steps used when generating samples with a pre-trained model. |
| | device (`str` or `torch.device`, *optional*): |
| | The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. |
| | """ |
| |
|
| | self.num_inference_steps = num_inference_steps |
| |
|
| | ramp = torch.linspace(0, 1, self.num_inference_steps) |
| | if self.config.sigma_schedule == "karras": |
| | sigmas = self._compute_karras_sigmas(ramp) |
| | elif self.config.sigma_schedule == "exponential": |
| | sigmas = self._compute_exponential_sigmas(ramp) |
| |
|
| | sigmas = sigmas.to(dtype=torch.float32, device=device) |
| | self.timesteps = self.precondition_noise(sigmas) |
| |
|
| | if self.config.final_sigmas_type == "sigma_min": |
| | sigma_last = self.config.sigma_min |
| | elif self.config.final_sigmas_type == "zero": |
| | sigma_last = 0 |
| | else: |
| | raise ValueError( |
| | f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" |
| | ) |
| |
|
| | self.sigmas = torch.cat([sigmas, torch.tensor([sigma_last], dtype=torch.float32, device=device)]) |
| |
|
| | self.model_outputs = [ |
| | None, |
| | ] * self.config.solver_order |
| | self.lower_order_nums = 0 |
| |
|
| | |
| | self._step_index = None |
| | self._begin_index = None |
| | self.sigmas = self.sigmas.to("cpu") |
| |
|
| | |
| | self.noise_sampler = None |
| |
|
| | |
| | def _compute_karras_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: |
| | """Constructs the noise schedule of Karras et al. (2022).""" |
| | sigma_min = sigma_min or self.config.sigma_min |
| | sigma_max = sigma_max or self.config.sigma_max |
| |
|
| | rho = self.config.rho |
| | min_inv_rho = sigma_min ** (1 / rho) |
| | max_inv_rho = sigma_max ** (1 / rho) |
| | sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho |
| | return sigmas |
| |
|
| | |
| | def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: |
| | """Implementation closely follows k-diffusion. |
| | |
| | https://github.com/crowsonkb/k-diffusion/blob/6ab5146d4a5ef63901326489f31f1d8e7dd36b48/k_diffusion/sampling.py#L26 |
| | """ |
| | sigma_min = sigma_min or self.config.sigma_min |
| | sigma_max = sigma_max or self.config.sigma_max |
| | sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), len(ramp)).exp().flip(0) |
| | return sigmas |
| |
|
| | |
| | def _sigma_to_t(self, sigma, log_sigmas): |
| | |
| | log_sigma = np.log(np.maximum(sigma, 1e-10)) |
| |
|
| | |
| | dists = log_sigma - log_sigmas[:, np.newaxis] |
| |
|
| | |
| | low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) |
| | high_idx = low_idx + 1 |
| |
|
| | low = log_sigmas[low_idx] |
| | high = log_sigmas[high_idx] |
| |
|
| | |
| | w = (low - log_sigma) / (low - high) |
| | w = np.clip(w, 0, 1) |
| |
|
| | |
| | t = (1 - w) * low_idx + w * high_idx |
| | t = t.reshape(sigma.shape) |
| | return t |
| |
|
| | def _sigma_to_alpha_sigma_t(self, sigma): |
| | alpha_t = torch.tensor(1) |
| | sigma_t = sigma |
| |
|
| | return alpha_t, sigma_t |
| |
|
| | def convert_model_output( |
| | self, |
| | model_output: torch.Tensor, |
| | sample: torch.Tensor = None, |
| | ) -> torch.Tensor: |
| | """ |
| | Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is |
| | designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an |
| | integral of the data prediction model. |
| | |
| | <Tip> |
| | |
| | The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise |
| | prediction and data prediction models. |
| | |
| | </Tip> |
| | |
| | Args: |
| | model_output (`torch.Tensor`): |
| | The direct output from the learned diffusion model. |
| | sample (`torch.Tensor`): |
| | A current instance of a sample created by the diffusion process. |
| | |
| | Returns: |
| | `torch.Tensor`: |
| | The converted model output. |
| | """ |
| | sigma = self.sigmas[self.step_index] |
| | x0_pred = self.precondition_outputs(sample, model_output, sigma) |
| |
|
| | return x0_pred |
| |
|
| | def dpm_solver_first_order_update( |
| | self, |
| | model_output: torch.Tensor, |
| | sample: torch.Tensor = None, |
| | noise: Optional[torch.Tensor] = None, |
| | ) -> torch.Tensor: |
| | """ |
| | One step for the first-order DPMSolver (equivalent to DDIM). |
| | |
| | Args: |
| | model_output (`torch.Tensor`): |
| | The direct output from the learned diffusion model. |
| | sample (`torch.Tensor`): |
| | A current instance of a sample created by the diffusion process. |
| | |
| | Returns: |
| | `torch.Tensor`: |
| | The sample tensor at the previous timestep. |
| | """ |
| | sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] |
| | alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) |
| | alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) |
| | lambda_t = torch.log(alpha_t) - torch.log(sigma_t) |
| | lambda_s = torch.log(alpha_s) - torch.log(sigma_s) |
| |
|
| | h = lambda_t - lambda_s |
| | assert noise is not None |
| | x_t = ( |
| | (sigma_t / sigma_s * torch.exp(-h)) * sample |
| | + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output |
| | + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise |
| | ) |
| |
|
| | return x_t |
| |
|
| | def multistep_dpm_solver_second_order_update( |
| | self, |
| | model_output_list: List[torch.Tensor], |
| | sample: torch.Tensor = None, |
| | noise: Optional[torch.Tensor] = None, |
| | ) -> torch.Tensor: |
| | """ |
| | One step for the second-order multistep DPMSolver. |
| | |
| | Args: |
| | model_output_list (`List[torch.Tensor]`): |
| | The direct outputs from learned diffusion model at current and latter timesteps. |
| | sample (`torch.Tensor`): |
| | A current instance of a sample created by the diffusion process. |
| | |
| | Returns: |
| | `torch.Tensor`: |
| | The sample tensor at the previous timestep. |
| | """ |
| | sigma_t, sigma_s0, sigma_s1 = ( |
| | self.sigmas[self.step_index + 1], |
| | self.sigmas[self.step_index], |
| | self.sigmas[self.step_index - 1], |
| | ) |
| |
|
| | alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) |
| | alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) |
| | alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) |
| |
|
| | lambda_t = torch.log(alpha_t) - torch.log(sigma_t) |
| | lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) |
| | lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) |
| |
|
| | m0, m1 = model_output_list[-1], model_output_list[-2] |
| |
|
| | h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 |
| | r0 = h_0 / h |
| | D0, D1 = m0, (1.0 / r0) * (m0 - m1) |
| |
|
| | |
| | assert noise is not None |
| | if self.config.solver_type == "midpoint": |
| | x_t = ( |
| | (sigma_t / sigma_s0 * torch.exp(-h)) * sample |
| | + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 |
| | + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 |
| | + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise |
| | ) |
| | elif self.config.solver_type == "heun": |
| | x_t = ( |
| | (sigma_t / sigma_s0 * torch.exp(-h)) * sample |
| | + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 |
| | + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 |
| | + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise |
| | ) |
| |
|
| | return x_t |
| |
|
| | |
| | def index_for_timestep(self, timestep, schedule_timesteps=None): |
| | if schedule_timesteps is None: |
| | schedule_timesteps = self.timesteps |
| |
|
| | index_candidates = (schedule_timesteps == timestep).nonzero() |
| |
|
| | if len(index_candidates) == 0: |
| | step_index = len(self.timesteps) - 1 |
| | |
| | |
| | |
| | |
| | elif len(index_candidates) > 1: |
| | step_index = index_candidates[1].item() |
| | else: |
| | step_index = index_candidates[0].item() |
| |
|
| | return step_index |
| |
|
| | |
| | def _init_step_index(self, timestep): |
| | """ |
| | Initialize the step_index counter for the scheduler. |
| | """ |
| |
|
| | if self.begin_index is None: |
| | if isinstance(timestep, torch.Tensor): |
| | timestep = timestep.to(self.timesteps.device) |
| | self._step_index = self.index_for_timestep(timestep) |
| | else: |
| | self._step_index = self._begin_index |
| |
|
| | def step( |
| | self, |
| | model_output: torch.Tensor, |
| | timestep: Union[int, torch.Tensor], |
| | sample: torch.Tensor, |
| | generator=None, |
| | return_dict: bool = True, |
| | ) -> Union[SchedulerOutput, Tuple]: |
| | """ |
| | Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with |
| | the multistep DPMSolver. |
| | |
| | Args: |
| | model_output (`torch.Tensor`): |
| | The direct output from learned diffusion model. |
| | timestep (`int`): |
| | The current discrete timestep in the diffusion chain. |
| | sample (`torch.Tensor`): |
| | A current instance of a sample created by the diffusion process. |
| | generator (`torch.Generator`, *optional*): |
| | A random number generator. |
| | return_dict (`bool`): |
| | Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. |
| | |
| | Returns: |
| | [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: |
| | If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a |
| | tuple is returned where the first element is the sample tensor. |
| | |
| | """ |
| | if self.num_inference_steps is None: |
| | raise ValueError( |
| | "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" |
| | ) |
| |
|
| | if self.step_index is None: |
| | self._init_step_index(timestep) |
| |
|
| | |
| | lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( |
| | self.config.euler_at_final |
| | or (self.config.lower_order_final and len(self.timesteps) < 15) |
| | or self.config.final_sigmas_type == "zero" |
| | ) |
| | lower_order_second = ( |
| | (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 |
| | ) |
| |
|
| | model_output = self.convert_model_output(model_output, sample=sample) |
| | for i in range(self.config.solver_order - 1): |
| | self.model_outputs[i] = self.model_outputs[i + 1] |
| | self.model_outputs[-1] = model_output |
| |
|
| | if self.noise_sampler is None: |
| | seed = None |
| | if generator is not None: |
| | seed = ( |
| | [g.initial_seed() for g in generator] if isinstance(generator, list) else generator.initial_seed() |
| | ) |
| | self.noise_sampler = BrownianTreeNoiseSampler( |
| | model_output, sigma_min=self.config.sigma_min, sigma_max=self.config.sigma_max, seed=seed |
| | ) |
| | noise = self.noise_sampler(self.sigmas[self.step_index], self.sigmas[self.step_index + 1]).to( |
| | model_output.device |
| | ) |
| |
|
| | if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: |
| | prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) |
| | elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: |
| | prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) |
| |
|
| | if self.lower_order_nums < self.config.solver_order: |
| | self.lower_order_nums += 1 |
| |
|
| | |
| | self._step_index += 1 |
| |
|
| | if not return_dict: |
| | return (prev_sample,) |
| |
|
| | return SchedulerOutput(prev_sample=prev_sample) |
| |
|
| | |
| | def add_noise( |
| | self, |
| | original_samples: torch.Tensor, |
| | noise: torch.Tensor, |
| | timesteps: torch.Tensor, |
| | ) -> torch.Tensor: |
| | |
| | sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) |
| | if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): |
| | |
| | schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) |
| | timesteps = timesteps.to(original_samples.device, dtype=torch.float32) |
| | else: |
| | schedule_timesteps = self.timesteps.to(original_samples.device) |
| | timesteps = timesteps.to(original_samples.device) |
| |
|
| | |
| | if self.begin_index is None: |
| | step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] |
| | elif self.step_index is not None: |
| | |
| | step_indices = [self.step_index] * timesteps.shape[0] |
| | else: |
| | |
| | step_indices = [self.begin_index] * timesteps.shape[0] |
| |
|
| | sigma = sigmas[step_indices].flatten() |
| | while len(sigma.shape) < len(original_samples.shape): |
| | sigma = sigma.unsqueeze(-1) |
| |
|
| | noisy_samples = original_samples + noise * sigma |
| | return noisy_samples |
| |
|
| | def __len__(self): |
| | return self.config.num_train_timesteps |
| |
|