| x_t = ( | |
| expand_dims(sigma_t / sigma_s, dims) * x | |
| - expand_dims(alpha_t * phi_1, dims) * model_s | |
| ) | |
| if return_intermediate: | |
| return x_t, {'model_s': model_s} | |
| else: | |
| return x_t | |
| else: | |
| phi_1 = torch.expm1(h) | |
| if model_s is None: | |
| model_s = self.model_fn(x, s) | |
| x_t = ( | |
| expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x | |
| - expand_dims(sigma_t * phi_1, dims) * model_s | |
| ) | |
| if return_intermediate: | |
| return x_t, {'model_s': model_s} | |
| else: | |
| return x_t | |
| def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, | |
| solver_type='dpm_solver'): | |
| """ | |
| Singlestep solver DPM-Solver-2 from time `s` to time `t`. | |
| Args: | |
| x: A pytorch tensor. The initial value at time `s`. | |
| s: A pytorch tensor. The starting time, with the shape (x.shape[0],). | |
| t: A pytorch tensor. The ending time, with the shape (x.shape[0],). | |
| r1: A `float`. The hyperparameter of the second-order solver. | |
| model_s: A pytorch tensor. The model function evaluated at time `s`. | |
| If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. | |
| return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). | |
| solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. | |
| The type slightly impacts the performance. We recommend to use 'dpm_solver' type. | |
| Returns: | |
| x_t: A pytorch tensor. The approximated solution at time `t`. | |
| """ | |
| if solver_type not in ['dpm_solver', 'taylor']: | |
| raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) | |
| if r1 is None: | |
| r1 = 0.5 | |
| ns = self.noise_schedule | |
| dims = x.dim() | |
| lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) | |
| h = lambda_t - lambda_s | |
| lambda_s1 = lambda_s + r1 * h | |
| s1 = ns.inverse_lambda(lambda_s1) | |
| log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff( | |
| s1), ns.marginal_log_mean_coeff(t) | |
| sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) | |
| alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) | |
| if self.predict_x0: | |
| phi_11 = torch.expm1(-r1 * h) | |
| phi_1 = torch.expm1(-h) | |
| if model_s is None: | |
| model_s = self.model_fn(x, s) | |
| x_s1 = ( | |
| expand_dims(sigma_s1 / sigma_s, dims) * x | |
| - expand_dims(alpha_s1 * phi_11, dims) * model_s | |
| ) | |
| model_s1 = self.model_fn(x_s1, s1) | |
| if solver_type == 'dpm_solver': | |
| x_t = ( | |
| expand_dims(sigma_t / sigma_s, dims) * x | |
| - expand_dims(alpha_t * phi_1, dims) * model_s | |
| - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) | |
| ) | |
| elif solver_type == 'taylor': | |
| x_t = ( | |
| expand_dims(sigma_t / sigma_s, dims) * x | |
| - expand_dims(alpha_t * phi_1, dims) * model_s | |
| + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * ( | |
| model_s1 - model_s) | |
| ) | |
| else: | |
| phi_11 = torch.expm1(r1 * h) | |
| phi_1 = torch.expm1(h) | |
| if model_s is None: | |
| model_s = self.model_fn(x, s) | |
| x_s1 = ( | |
| expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x | |
| - expand_dims(sigma_s1 * phi_11, dims) * model_s | |
| ) | |
| model_s1 = self.model_fn(x_s1, s1) | |
| if solver_type == 'dpm_solver': | |
| x_t = ( | |
| expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x | |
| - expand_dims(sigma_t * phi_1, dims) * model_s | |
| - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) | |
| ) | |
| elif solver_type == 'taylor': | |
| x_t = ( | |
| expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x | |
| - expand_dims(sigma_t * phi_1, dims) * model_s | |
| - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) | |
| ) | |
| if return_intermediate: | |
| return x_t, {'model_s': model_s, 'model_s1': model_s1} | |
| else: | |
| return x_t | |
| def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None, | |
| return_intermediate=False, solver_type='dpm_solver'): | |
| """ | |
| Singlestep solver DPM-Solver-3 from time `s` to time `t`. | |
| Args: | |
| x: A pytorch tensor. The initial value at time `s`. | |
| s: A pytorch tensor. The starting time, with the shape (x.shape[0],). | |
| t: A pytorch tensor. The ending time, with the shape (x.shape[0],). | |
| r1: A `float`. The hyperparameter of the third-order solver. | |
| r2: A `float`. The hyperparameter of the third-order solver. | |
| model_s: A pytorch tensor. The model function evaluated at time `s`. | |
| If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. | |
| model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). | |
| If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. | |
| return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). | |
| solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. | |
| The type slightly impacts the performance. We recommend to use 'dpm_solver' type. | |
| Returns: | |
| x_t: A pytorch tensor. The approximated solution at time `t`. | |
| """ | |
| if solver_type not in ['dpm_solver', 'taylor']: | |
| raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) | |
| if r1 is None: | |
| r1 = 1. / 3. | |
| if r2 is None: | |
| r2 = 2. / 3. | |
| ns = self.noise_schedule | |
| dims = x.dim() | |
| lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) | |
| h = lambda_t - lambda_s | |
| lambda_s1 = lambda_s + r1 * h | |
| lambda_s2 = lambda_s + r2 * h | |
| s1 = ns.inverse_lambda(lambda_s1) | |
| s2 = ns.inverse_lambda(lambda_s2) | |
| log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff( | |
| s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) | |
| sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std( | |
| s2), ns.marginal_std(t) | |
| alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) | |
| if self.predict_x0: | |
| phi_11 = torch.expm1(-r1 * h) | |
| phi_12 = torch.expm1(-r2 * h) | |
| phi_1 = torch.expm1(-h) | |
| phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. | |
| phi_2 = phi_1 / h + 1. | |
| phi_3 = phi_2 / h - 0.5 | |
| if model_s is None: | |
| model_s = self.mode |