| |
| """ |
| This code started out as a PyTorch port of Ho et al's diffusion models: |
| https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py |
| https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/gaussian_diffusion.py |
| Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules. |
| """ |
|
|
| import enum |
| import math |
| import random |
| from copy import deepcopy |
|
|
| import numpy as np |
| import torch |
| import torch as th |
|
|
| |
| from torch.autograd import Variable |
|
|
| from genmo.diffusion_utils.losses import discretized_gaussian_log_likelihood, normal_kl |
| from genmo.diffusion_utils.nn import mean_flat, sum_flat |
| from genmo.utils.rotation_conversions import ( |
| matrix_to_rotation_6d, |
| rotation_6d_to_matrix, |
| ) |
|
|
|
|
| def gmof(res, sigma): |
| """ |
| Geman-McClure error function |
| - residual |
| - sigma scaling factor |
| """ |
| x_squared = res**2 |
| sigma_squared = sigma**2 |
| return (sigma_squared * x_squared) / (sigma_squared + x_squared) |
|
|
|
|
| def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.0): |
| """ |
| Get a pre-defined beta schedule for the given name. |
| |
| The beta schedule library consists of beta schedules which remain similar |
| in the limit of num_diffusion_timesteps. |
| Beta schedules may be added, but should not be removed or changed once |
| they are committed to maintain backwards compatibility. |
| """ |
| if schedule_name == "linear": |
| |
| |
| scale = scale_betas * 1000 / num_diffusion_timesteps |
| beta_start = scale * 0.0001 |
| beta_end = scale * 0.02 |
| return np.linspace( |
| beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64 |
| ) |
| elif schedule_name == "cosine": |
| return betas_for_alpha_bar( |
| num_diffusion_timesteps, |
| lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, |
| ) |
| else: |
| raise NotImplementedError(f"unknown beta schedule: {schedule_name}") |
|
|
|
|
| def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): |
| """ |
| Create a beta schedule that discretizes the given alpha_t_bar function, |
| which defines the cumulative product of (1-beta) over time from t = [0,1]. |
| |
| :param num_diffusion_timesteps: the number of betas to produce. |
| :param alpha_bar: a lambda that takes an argument t from 0 to 1 and |
| produces the cumulative product of (1-beta) up to that |
| part of the diffusion process. |
| :param max_beta: the maximum beta to use; use values lower than 1 to |
| prevent singularities. |
| """ |
| betas = [] |
| for i in range(num_diffusion_timesteps): |
| t1 = i / num_diffusion_timesteps |
| t2 = (i + 1) / num_diffusion_timesteps |
| betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) |
| return np.array(betas) |
|
|
|
|
| class ModelMeanType(enum.Enum): |
| """ |
| Which type of output the model predicts. |
| """ |
|
|
| PREVIOUS_X = enum.auto() |
| START_X = enum.auto() |
| EPSILON = enum.auto() |
|
|
|
|
| class ModelVarType(enum.Enum): |
| """ |
| What is used as the model's output variance. |
| |
| The LEARNED_RANGE option has been added to allow the model to predict |
| values between FIXED_SMALL and FIXED_LARGE, making its job easier. |
| """ |
|
|
| LEARNED = enum.auto() |
| FIXED_SMALL = enum.auto() |
| FIXED_LARGE = enum.auto() |
| LEARNED_RANGE = enum.auto() |
|
|
|
|
| class LossType(enum.Enum): |
| MSE = enum.auto() |
| RESCALED_MSE = ( |
| enum.auto() |
| ) |
| KL = enum.auto() |
| RESCALED_KL = enum.auto() |
|
|
| def is_vb(self): |
| return self == LossType.KL or self == LossType.RESCALED_KL |
|
|
|
|
| class GaussianDiffusion: |
| """ |
| Utilities for training and sampling diffusion models. |
| |
| Ported directly from here, and then adapted over time to further experimentation. |
| https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42 |
| |
| :param betas: a 1-D numpy array of betas for each diffusion timestep, |
| starting at T and going to 1. |
| :param model_mean_type: a ModelMeanType determining what the model outputs. |
| :param model_var_type: a ModelVarType determining how variance is output. |
| :param loss_type: a LossType determining the loss function to use. |
| :param rescale_timesteps: if True, pass floating point timesteps into the |
| model so that they are always scaled like in the |
| original paper (0 to 1000). |
| """ |
|
|
| def __init__( |
| self, |
| *, |
| betas, |
| model_mean_type, |
| model_var_type, |
| loss_type, |
| rescale_timesteps=False, |
| ): |
| self.model_mean_type = model_mean_type |
| self.model_var_type = model_var_type |
| self.loss_type = loss_type |
| self.rescale_timesteps = rescale_timesteps |
|
|
| |
| betas = np.array(betas, dtype=np.float64) |
| self.betas = betas |
| assert len(betas.shape) == 1, "betas must be 1-D" |
| assert (betas > 0).all() and (betas <= 1).all() |
|
|
| self.num_timesteps = int(betas.shape[0]) |
|
|
| alphas = 1.0 - betas |
| self.alphas_cumprod = np.cumprod(alphas, axis=0) |
| self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) |
| self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) |
| assert self.alphas_cumprod_prev.shape == (self.num_timesteps,) |
|
|
| |
| self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod) |
| self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod) |
| self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod) |
| self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod) |
| self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1) |
|
|
| |
| self.posterior_variance = ( |
| betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) |
| ) |
| |
| |
| if len(self.posterior_variance) == 1: |
| self.posterior_log_variance_clipped = np.array([0.0]) |
| else: |
| self.posterior_log_variance_clipped = np.log( |
| np.append(self.posterior_variance[1], self.posterior_variance[1:]) |
| ) |
| self.posterior_mean_coef1 = ( |
| betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) |
| ) |
| self.posterior_mean_coef2 = ( |
| (1.0 - self.alphas_cumprod_prev) |
| * np.sqrt(alphas) |
| / (1.0 - self.alphas_cumprod) |
| ) |
|
|
| self.l2_loss = ( |
| lambda a, b: (a - b) ** 2 |
| ) |
|
|
| def masked_l2(self, a, b, mask): |
| |
| |
| loss = self.l2_loss(a, b) |
| loss = sum_flat( |
| loss * mask.float() |
| ) |
| n_entries = a.shape[1] * a.shape[2] |
| non_zero_elements = sum_flat(mask) * n_entries |
| |
| |
| |
| mse_loss_val = loss / non_zero_elements |
| |
| return mse_loss_val |
|
|
| def q_mean_variance(self, x_start, t): |
| """ |
| Get the distribution q(x_t | x_0). |
| |
| :param x_start: the [N x C x ...] tensor of noiseless inputs. |
| :param t: the number of diffusion steps (minus 1). Here, 0 means one step. |
| :return: A tuple (mean, variance, log_variance), all of x_start's shape. |
| """ |
| mean = ( |
| _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start |
| ) |
| variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) |
| log_variance = _extract_into_tensor( |
| self.log_one_minus_alphas_cumprod, t, x_start.shape |
| ) |
| return mean, variance, log_variance |
|
|
| def q_sample(self, x_start, t, noise=None): |
| """ |
| Diffuse the dataset for a given number of diffusion steps. |
| |
| In other words, sample from q(x_t | x_0). |
| |
| :param x_start: the initial dataset batch. |
| :param t: the number of diffusion steps (minus 1). Here, 0 means one step. |
| :param noise: if specified, the split-out normal noise. |
| :return: A noisy version of x_start. |
| """ |
| if noise is None: |
| noise = th.randn_like(x_start) |
| assert noise.shape == x_start.shape |
| return ( |
| _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start |
| + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) |
| * noise |
| ) |
|
|
| def q_posterior_mean_variance(self, x_start, x_t, t): |
| """ |
| Compute the mean and variance of the diffusion posterior: |
| |
| q(x_{t-1} | x_t, x_0) |
| |
| """ |
| assert x_start.shape == x_t.shape |
| posterior_mean = ( |
| _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start |
| + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t |
| ) |
| posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape) |
| posterior_log_variance_clipped = _extract_into_tensor( |
| self.posterior_log_variance_clipped, t, x_t.shape |
| ) |
| assert ( |
| posterior_mean.shape[0] |
| == posterior_variance.shape[0] |
| == posterior_log_variance_clipped.shape[0] |
| == x_start.shape[0] |
| ) |
| return posterior_mean, posterior_variance, posterior_log_variance_clipped |
|
|
| def p_mean_variance_guided( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| model_kwargs=None, |
| guide=None, |
| target_motion=None, |
| ): |
| """ |
| Apply the model to get p(x_{t-1} | x_t), as well as a prediction of |
| the initial x, x_0. |
| |
| :param model: the model, which takes a signal and a batch of timesteps |
| as input. |
| :param x: the [N x C x ...] tensor at time t. |
| :param t: a 1-D Tensor of timesteps. |
| :param clip_denoised: if True, clip the denoised signal into [-1, 1]. |
| :param denoised_fn: if not None, a function which applies to the |
| x_start prediction before it is used to sample. Applies before |
| clip_denoised. |
| :param model_kwargs: if not None, a dict of extra keyword arguments to |
| pass to the model. This can be used for conditioning. |
| :return: a dict with the following keys: |
| - 'mean': the model mean output. |
| - 'variance': the model variance output. |
| - 'log_variance': the log of 'variance'. |
| - 'pred_xstart': the prediction for x_0. |
| """ |
| if model_kwargs is None: |
| model_kwargs = {} |
|
|
| B, C = x.shape[:2] |
| assert t.shape == (B,) |
| |
| with th.enable_grad(): |
| x = x.detach().requires_grad_() |
| model_output = model(x, self._scale_timesteps(t), **model_kwargs) |
|
|
| if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: |
| assert model_output.shape == (B, C * 2, *x.shape[2:]) |
| model_output, model_var_values = th.split(model_output, C, dim=1) |
| if self.model_var_type == ModelVarType.LEARNED: |
| model_log_variance = model_var_values |
| model_variance = th.exp(model_log_variance) |
| else: |
| min_log = _extract_into_tensor( |
| self.posterior_log_variance_clipped, t, x.shape |
| ) |
| max_log = _extract_into_tensor(np.log(self.betas), t, x.shape) |
| |
| frac = (model_var_values + 1) / 2 |
| model_log_variance = frac * max_log + (1 - frac) * min_log |
| model_variance = th.exp(model_log_variance) |
| else: |
| model_variance, model_log_variance = { |
| |
| |
| ModelVarType.FIXED_LARGE: ( |
| np.append(self.posterior_variance[1], self.betas[1:]), |
| np.log(np.append(self.posterior_variance[1], self.betas[1:])), |
| ), |
| ModelVarType.FIXED_SMALL: ( |
| self.posterior_variance, |
| self.posterior_log_variance_clipped, |
| ), |
| }[self.model_var_type] |
| model_variance = _extract_into_tensor(model_variance, t, x.shape) |
| model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) |
|
|
| if guide is not None: |
| model_output = guide.guide( |
| x, model_output, target_motion, model_variance, t |
| ) |
|
|
| def process_xstart(x): |
| if denoised_fn is not None: |
| x = denoised_fn(x, t) |
| if clip_denoised: |
| return x.clamp(-1, 1) |
| return x |
|
|
| if self.model_mean_type == ModelMeanType.PREVIOUS_X: |
| pred_xstart = process_xstart( |
| self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output) |
| ) |
| model_mean = model_output |
| elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: |
| if self.model_mean_type == ModelMeanType.START_X: |
| pred_xstart = process_xstart(model_output) |
| else: |
| pred_xstart = process_xstart( |
| self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) |
| ) |
| model_mean, _, _ = self.q_posterior_mean_variance( |
| x_start=pred_xstart, x_t=x, t=t |
| ) |
| else: |
| raise NotImplementedError(self.model_mean_type) |
|
|
| assert ( |
| model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape |
| ) |
| return { |
| "mean": model_mean, |
| "variance": model_variance, |
| "log_variance": model_log_variance, |
| "pred_xstart": pred_xstart, |
| } |
|
|
| def p_mean_variance( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| model_kwargs=None, |
| model_output=None, |
| ): |
| """ |
| Apply the model to get p(x_{t-1} | x_t), as well as a prediction of |
| the initial x, x_0. |
| |
| :param model: the model, which takes a signal and a batch of timesteps |
| as input. |
| :param x: the [N x C x ...] tensor at time t. |
| :param t: a 1-D Tensor of timesteps. |
| :param clip_denoised: if True, clip the denoised signal into [-1, 1]. |
| :param denoised_fn: if not None, a function which applies to the |
| x_start prediction before it is used to sample. Applies before |
| clip_denoised. |
| :param model_kwargs: if not None, a dict of extra keyword arguments to |
| pass to the model. This can be used for conditioning. |
| :return: a dict with the following keys: |
| - 'mean': the model mean output. |
| - 'variance': the model variance output. |
| - 'log_variance': the log of 'variance'. |
| - 'pred_xstart': the prediction for x_0. |
| """ |
| if model_kwargs is None: |
| model_kwargs = {} |
|
|
| B, C = x.shape[:2] |
| assert t.shape == (B,), (t.shape, B, x.shape) |
|
|
| if model_output is None: |
| model_output = model(x, self._scale_timesteps(t), **model_kwargs) |
| aux_output = {} |
| if isinstance(model_output, dict): |
| for k, v in model_output.items(): |
| if k != "pred_x_start": |
| aux_output[k] = v |
| model_output = model_output["pred_x_start"] |
| if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: |
| assert model_output.shape == (B, C * 2, *x.shape[2:]) |
| model_output, model_var_values = th.split(model_output, C, dim=1) |
| if self.model_var_type == ModelVarType.LEARNED: |
| model_log_variance = model_var_values |
| model_variance = th.exp(model_log_variance) |
| else: |
| min_log = _extract_into_tensor( |
| self.posterior_log_variance_clipped, t, x.shape |
| ) |
| max_log = _extract_into_tensor(np.log(self.betas), t, x.shape) |
| |
| frac = (model_var_values + 1) / 2 |
| model_log_variance = frac * max_log + (1 - frac) * min_log |
| model_variance = th.exp(model_log_variance) |
| else: |
| model_variance, model_log_variance = { |
| |
| |
| ModelVarType.FIXED_LARGE: ( |
| np.append(self.posterior_variance[1], self.betas[1:]) |
| if len(self.posterior_variance) > 1 |
| else self.betas, |
| np.log(np.append(self.posterior_variance[1], self.betas[1:])) |
| if len(self.posterior_variance) > 1 |
| else np.log(self.betas), |
| ), |
| ModelVarType.FIXED_SMALL: ( |
| self.posterior_variance, |
| self.posterior_log_variance_clipped, |
| ), |
| }[self.model_var_type] |
| model_variance = _extract_into_tensor(model_variance, t, x.shape) |
| model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) |
|
|
| |
| |
| |
|
|
| def process_xstart(x): |
| if denoised_fn is not None: |
| x = denoised_fn(x, t) |
| if clip_denoised: |
| return x.clamp(-1, 1) |
| return x |
|
|
| if self.model_mean_type == ModelMeanType.PREVIOUS_X: |
| pred_xstart = process_xstart( |
| self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output) |
| ) |
| model_mean = model_output |
| elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: |
| if self.model_mean_type == ModelMeanType.START_X: |
| pred_xstart = process_xstart(model_output) |
| else: |
| pred_xstart = process_xstart( |
| self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) |
| ) |
| model_mean, _, _ = self.q_posterior_mean_variance( |
| x_start=pred_xstart, x_t=x, t=t |
| ) |
| else: |
| raise NotImplementedError(self.model_mean_type) |
|
|
| assert ( |
| model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape |
| ) |
| return { |
| "mean": model_mean, |
| "variance": model_variance, |
| "log_variance": model_log_variance, |
| "pred_xstart": pred_xstart, |
| **aux_output, |
| } |
|
|
| def _predict_xstart_from_eps(self, x_t, t, eps): |
| assert x_t.shape == eps.shape |
| return ( |
| _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t |
| - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps |
| ) |
|
|
| def _predict_xstart_from_xprev(self, x_t, t, xprev): |
| assert x_t.shape == xprev.shape |
| return ( |
| _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev |
| - _extract_into_tensor( |
| self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape |
| ) |
| * x_t |
| ) |
|
|
| def _predict_eps_from_xstart(self, x_t, t, pred_xstart): |
| return ( |
| _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t |
| - pred_xstart |
| ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) |
|
|
| def _scale_timesteps(self, t): |
| if self.rescale_timesteps: |
| return t.float() * (1000.0 / self.num_timesteps) |
| return t |
|
|
| def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None): |
| """ |
| Compute the mean for the previous step, given a function cond_fn that |
| computes the gradient of a conditional log probability with respect to |
| x. In particular, cond_fn computes grad(log(p(y|x))), and we want to |
| condition on y. |
| |
| This uses the conditioning strategy from Sohl-Dickstein et al. (2015). |
| """ |
| gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs) |
| new_mean = ( |
| p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() |
| ) |
| return new_mean |
|
|
| def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None): |
| """ |
| Compute the mean for the previous step, given a function cond_fn that |
| computes the gradient of a conditional log probability with respect to |
| x. In particular, cond_fn computes grad(log(p(y|x))), and we want to |
| condition on y. |
| |
| This uses the conditioning strategy from Sohl-Dickstein et al. (2015). |
| """ |
| gradient = cond_fn(x, t, p_mean_var, **model_kwargs) |
| new_mean = ( |
| p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() |
| ) |
| return new_mean |
|
|
| def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None): |
| """ |
| Compute what the p_mean_variance output would have been, should the |
| model's score function be conditioned by cond_fn. |
| |
| See condition_mean() for details on cond_fn. |
| |
| Unlike condition_mean(), this instead uses the conditioning strategy |
| from Song et al (2020). |
| """ |
| alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) |
|
|
| eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) |
| eps = eps - (1 - alpha_bar).sqrt() * cond_fn( |
| x, self._scale_timesteps(t), **model_kwargs |
| ) |
|
|
| out = p_mean_var.copy() |
| out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) |
| out["mean"], _, _ = self.q_posterior_mean_variance( |
| x_start=out["pred_xstart"], x_t=x, t=t |
| ) |
| return out |
|
|
| def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None): |
| """ |
| Compute what the p_mean_variance output would have been, should the |
| model's score function be conditioned by cond_fn. |
| |
| See condition_mean() for details on cond_fn. |
| |
| Unlike condition_mean(), this instead uses the conditioning strategy |
| from Song et al (2020). |
| """ |
| alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) |
|
|
| eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) |
| eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, p_mean_var, **model_kwargs) |
|
|
| out = p_mean_var.copy() |
| out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) |
| out["mean"], _, _ = self.q_posterior_mean_variance( |
| x_start=out["pred_xstart"], x_t=x, t=t |
| ) |
| return out |
|
|
| def p_sample( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| const_noise=False, |
| ): |
| """ |
| Sample x_{t-1} from the model at the given timestep. |
| |
| :param model: the model to sample from. |
| :param x: the current tensor at x_{t-1}. |
| :param t: the value of t, starting at 0 for the first diffusion step. |
| :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. |
| :param denoised_fn: if not None, a function which applies to the |
| x_start prediction before it is used to sample. |
| :param cond_fn: if not None, this is a gradient function that acts |
| similarly to the model. |
| :param model_kwargs: if not None, a dict of extra keyword arguments to |
| pass to the model. This can be used for conditioning. |
| :return: a dict containing the following keys: |
| - 'sample': a random sample from the model. |
| - 'pred_xstart': a prediction of x_0. |
| """ |
| out = self.p_mean_variance( |
| model, |
| x, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| model_kwargs=model_kwargs, |
| ) |
| noise = th.randn_like(x) |
| |
| if const_noise: |
| noise = noise[[0]].repeat(x.shape[0], 1, 1, 1) |
|
|
| nonzero_mask = ( |
| (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) |
| ) |
| if cond_fn is not None: |
| out["mean"] = self.condition_mean( |
| cond_fn, out, x, t, model_kwargs=model_kwargs |
| ) |
| sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise |
| return {"sample": sample, "pred_xstart": out["pred_xstart"]} |
|
|
| def p_sample_with_grad( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| ): |
| """ |
| Sample x_{t-1} from the model at the given timestep. |
| |
| :param model: the model to sample from. |
| :param x: the current tensor at x_{t-1}. |
| :param t: the value of t, starting at 0 for the first diffusion step. |
| :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. |
| :param denoised_fn: if not None, a function which applies to the |
| x_start prediction before it is used to sample. |
| :param cond_fn: if not None, this is a gradient function that acts |
| similarly to the model. |
| :param model_kwargs: if not None, a dict of extra keyword arguments to |
| pass to the model. This can be used for conditioning. |
| :return: a dict containing the following keys: |
| - 'sample': a random sample from the model. |
| - 'pred_xstart': a prediction of x_0. |
| """ |
| with th.enable_grad(): |
| x = x.detach().requires_grad_() |
| out = self.p_mean_variance( |
| model, |
| x, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| model_kwargs=model_kwargs, |
| ) |
| noise = th.randn_like(x) |
| nonzero_mask = ( |
| (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) |
| ) |
| if cond_fn is not None: |
| out["mean"] = self.condition_mean_with_grad( |
| cond_fn, out, x, t, model_kwargs=model_kwargs |
| ) |
| sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise |
| return {"sample": sample, "pred_xstart": out["pred_xstart"].detach()} |
|
|
| def p_sample_loop( |
| self, |
| model, |
| shape, |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| device=None, |
| progress=False, |
| skip_timesteps=0, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| dump_steps=None, |
| const_noise=False, |
| ): |
| """ |
| Generate samples from the model. |
| |
| :param model: the model module. |
| :param shape: the shape of the samples, (N, C, H, W). |
| :param noise: if specified, the noise from the encoder to sample. |
| Should be of the same shape as `shape`. |
| :param clip_denoised: if True, clip x_start predictions to [-1, 1]. |
| :param denoised_fn: if not None, a function which applies to the |
| x_start prediction before it is used to sample. |
| :param cond_fn: if not None, this is a gradient function that acts |
| similarly to the model. |
| :param model_kwargs: if not None, a dict of extra keyword arguments to |
| pass to the model. This can be used for conditioning. |
| :param device: if specified, the device to create the samples on. |
| If not specified, use a model parameter's device. |
| :param progress: if True, show a tqdm progress bar. |
| :param const_noise: If True, will noise all samples with the same noise throughout sampling |
| :return: a non-differentiable batch of samples. |
| """ |
| final = None |
| if dump_steps is not None: |
| dump = [] |
|
|
| for i, sample in enumerate( |
| self.p_sample_loop_progressive( |
| model, |
| shape, |
| noise=noise, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=model_kwargs, |
| device=device, |
| progress=progress, |
| skip_timesteps=skip_timesteps, |
| init_image=init_image, |
| randomize_class=randomize_class, |
| cond_fn_with_grad=cond_fn_with_grad, |
| const_noise=const_noise, |
| ) |
| ): |
| if dump_steps is not None and i in dump_steps: |
| dump.append(deepcopy(sample["sample"])) |
| final = sample |
| if dump_steps is not None: |
| return dump |
| return final["sample"] |
|
|
| def p_sample_loop_progressive( |
| self, |
| model, |
| shape, |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| device=None, |
| progress=False, |
| skip_timesteps=0, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| const_noise=False, |
| ): |
| """ |
| Generate samples from the model and yield intermediate samples from |
| each timestep of diffusion. |
| |
| Arguments are the same as p_sample_loop(). |
| Returns a generator over dicts, where each dict is the return value of |
| p_sample(). |
| """ |
| if device is None: |
| device = next(model.parameters()).device |
| assert isinstance(shape, (tuple, list)) |
| if noise is not None: |
| img = noise |
| else: |
| img = th.randn(*shape, device=device) |
|
|
| if skip_timesteps and init_image is None: |
| init_image = th.zeros_like(img) |
|
|
| indices = list(range(self.num_timesteps - skip_timesteps))[::-1] |
|
|
| if init_image is not None: |
| my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0] |
| img = self.q_sample(init_image, my_t, img) |
|
|
| if progress: |
| |
| from tqdm.auto import tqdm |
|
|
| indices = tqdm(indices) |
|
|
| for i in indices: |
| t = th.tensor([i] * shape[0], device=device) |
| if randomize_class and "y" in model_kwargs: |
| model_kwargs["y"] = th.randint( |
| low=0, |
| high=model.num_classes, |
| size=model_kwargs["y"].shape, |
| device=model_kwargs["y"].device, |
| ) |
| with th.no_grad(): |
| sample_fn = ( |
| self.p_sample_with_grad if cond_fn_with_grad else self.p_sample |
| ) |
| out = sample_fn( |
| model, |
| img, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=model_kwargs, |
| const_noise=const_noise, |
| ) |
| yield out |
| img = out["sample"] |
|
|
| def ddim_sample( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| eta=0.0, |
| target_motion=None, |
| guide=None, |
| guide_2d=None, |
| overwrite_2d=False, |
| overwrite_data=None, |
| model_output=None, |
| ): |
| """ |
| Sample x_{t-1} from the model using DDIM. |
| |
| Same usage as p_sample(). |
| """ |
| out_orig = self.p_mean_variance( |
| model, |
| x, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| model_kwargs=model_kwargs, |
| model_output=model_output, |
| ) |
| if cond_fn is not None: |
| out = self.condition_score( |
| cond_fn, out_orig, x, t, model_kwargs=model_kwargs |
| ) |
| else: |
| out = out_orig |
|
|
| alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) |
| alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) |
|
|
| |
| |
| eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) |
|
|
| sigma = ( |
| eta |
| * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) |
| * th.sqrt(1 - alpha_bar / alpha_bar_prev) |
| ) |
| |
| noise = th.randn_like(x) |
| mean_pred = ( |
| out["pred_xstart"] * th.sqrt(alpha_bar_prev) |
| + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps |
| ) |
| nonzero_mask = ( |
| (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) |
| ) |
| sample = mean_pred + nonzero_mask * sigma * noise |
| return {"sample": sample, "pred_xstart": out_orig["pred_xstart"], **out_orig} |
|
|
| def ddim_get_xt(self, x, t, pred_xstart, eta): |
| eps = self._predict_eps_from_xstart(x, t, pred_xstart) |
|
|
| alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) |
| alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) |
| sigma = ( |
| eta |
| * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) |
| * th.sqrt(1 - alpha_bar / alpha_bar_prev) |
| ) |
| |
| mean_pred = ( |
| pred_xstart * th.sqrt(alpha_bar_prev) |
| + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps |
| ) |
| nonzero_mask = ( |
| (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) |
| ) |
| std = nonzero_mask * sigma |
| noise = th.randn_like(x) |
| x_t_1 = mean_pred + std * noise |
| return {"x_t-1": x_t_1, "x_t-1_mean": mean_pred, "std": std} |
|
|
| def ddim_sample_with_grad( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| eta=0.0, |
| guide=None, |
| ): |
| """ |
| Sample x_{t-1} from the model using DDIM. |
| |
| Same usage as p_sample(). |
| """ |
| with th.enable_grad(): |
| x = x.detach().requires_grad_() |
| out_orig = self.p_mean_variance( |
| model, |
| x, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| model_kwargs=model_kwargs, |
| ) |
| if cond_fn is not None: |
| out = self.condition_score_with_grad( |
| cond_fn, out_orig, x, t, model_kwargs=model_kwargs |
| ) |
| else: |
| out = out_orig |
|
|
| out["pred_xstart"] = out["pred_xstart"].detach() |
|
|
| |
| |
| eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) |
|
|
| alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) |
| alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) |
| sigma = ( |
| eta |
| * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) |
| * th.sqrt(1 - alpha_bar / alpha_bar_prev) |
| ) |
| |
| noise = th.randn_like(x) |
| mean_pred = ( |
| out["pred_xstart"] * th.sqrt(alpha_bar_prev) |
| + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps |
| ) |
| nonzero_mask = ( |
| (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) |
| ) |
| sample = mean_pred + nonzero_mask * sigma * noise |
| return {"sample": sample, "pred_xstart": out_orig["pred_xstart"].detach()} |
|
|
| def ddim_reverse_sample( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| model_kwargs=None, |
| eta=0.0, |
| ): |
| """ |
| Sample x_{t+1} from the model using DDIM reverse ODE. |
| """ |
| assert eta == 0.0, "Reverse ODE only for deterministic path" |
| out = self.p_mean_variance( |
| model, |
| x, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| model_kwargs=model_kwargs, |
| ) |
| |
| |
| eps = ( |
| _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x |
| - out["pred_xstart"] |
| ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape) |
| alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape) |
|
|
| |
| mean_pred = ( |
| out["pred_xstart"] * th.sqrt(alpha_bar_next) |
| + th.sqrt(1 - alpha_bar_next) * eps |
| ) |
|
|
| return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]} |
|
|
| def ddim_sample_loop( |
| self, |
| model, |
| shape, |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| model_kwargs_modify_fn=None, |
| device=None, |
| progress=False, |
| eta=0.0, |
| skip_timesteps=0, |
| repeat_final_timesteps=None, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| dump_steps=None, |
| const_noise=False, |
| target_motion=None, |
| guide=None, |
| update_sample_fn=None, |
| guide_2d=None, |
| overwrite_2d=False, |
| overwrite_data=None, |
| ): |
| """ |
| Generate samples from the model using DDIM. |
| |
| Same usage as p_sample_loop(). |
| """ |
| |
| if dump_steps is not None: |
| raise NotImplementedError() |
| if const_noise == True: |
| raise NotImplementedError() |
|
|
| final = None |
| for sample in self.ddim_sample_loop_progressive( |
| model, |
| shape, |
| noise=noise, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=model_kwargs, |
| model_kwargs_modify_fn=model_kwargs_modify_fn, |
| device=device, |
| progress=progress, |
| eta=eta, |
| skip_timesteps=skip_timesteps, |
| repeat_final_timesteps=repeat_final_timesteps, |
| init_image=init_image, |
| randomize_class=randomize_class, |
| cond_fn_with_grad=cond_fn_with_grad, |
| target_motion=target_motion, |
| guide=guide, |
| update_sample_fn=update_sample_fn, |
| guide_2d=guide_2d, |
| overwrite_2d=overwrite_2d, |
| overwrite_data=overwrite_data, |
| ): |
| final = sample |
| return final["sample"] |
|
|
| def ddim_sample_loop_with_aux( |
| self, |
| model, |
| shape, |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| model_kwargs_modify_fn=None, |
| device=None, |
| progress=False, |
| eta=0.0, |
| skip_timesteps=0, |
| repeat_final_timesteps=None, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| dump_steps=None, |
| const_noise=False, |
| target_motion=None, |
| guide=None, |
| update_sample_fn=None, |
| guide_2d=None, |
| overwrite_2d=False, |
| overwrite_data=None, |
| return_mid=False, |
| ): |
| """ |
| Generate samples from the model using DDIM. |
| |
| Same usage as p_sample_loop(). |
| """ |
| |
| if dump_steps is not None: |
| raise NotImplementedError() |
| if const_noise == True: |
| raise NotImplementedError() |
|
|
| final = None |
| intermediates = [] |
| for sample in self.ddim_sample_loop_progressive( |
| model, |
| shape, |
| noise=noise, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=model_kwargs, |
| model_kwargs_modify_fn=model_kwargs_modify_fn, |
| device=device, |
| progress=progress, |
| eta=eta, |
| skip_timesteps=skip_timesteps, |
| repeat_final_timesteps=repeat_final_timesteps, |
| init_image=init_image, |
| randomize_class=randomize_class, |
| cond_fn_with_grad=cond_fn_with_grad, |
| target_motion=target_motion, |
| guide=guide, |
| update_sample_fn=update_sample_fn, |
| guide_2d=guide_2d, |
| overwrite_2d=overwrite_2d, |
| overwrite_data=overwrite_data, |
| ): |
| intermediates.append(sample) |
| final = sample |
| if return_mid: |
| final["intermediates"] = intermediates |
| return final |
|
|
| def ddim_sample_loop_progressive( |
| self, |
| model, |
| shape, |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| model_kwargs_modify_fn=None, |
| device=None, |
| progress=False, |
| eta=0.0, |
| skip_timesteps=0, |
| repeat_final_timesteps=None, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| target_motion=False, |
| guide=None, |
| update_sample_fn=None, |
| guide_2d=None, |
| overwrite_2d=False, |
| overwrite_data=None, |
| ): |
| """ |
| Use DDIM to sample from the model and yield intermediate samples from |
| each timestep of DDIM. |
| |
| Same usage as p_sample_loop_progressive(). |
| """ |
| if device is None: |
| device = next(model.parameters()).device |
| assert isinstance(shape, (tuple, list)) |
| if noise is not None: |
| img = noise |
| else: |
| img = th.randn(*shape, device=device) |
| img_start = img.clone() |
|
|
| if skip_timesteps and init_image is None: |
| init_image = th.zeros_like(img) |
|
|
| indices = list(range(self.num_timesteps - skip_timesteps))[::-1] |
| if repeat_final_timesteps is not None: |
| if "%" in repeat_final_timesteps: |
| num_repeat_steps = int( |
| int(repeat_final_timesteps.replace("%", "")) / 100 * len(indices) |
| ) |
| else: |
| num_repeat_steps = int(repeat_final_timesteps) |
| indices = indices + indices[-num_repeat_steps:] |
|
|
| if init_image is not None: |
| my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0] |
| img = self.q_sample(init_image, my_t, img) |
|
|
| if progress: |
| |
| from tqdm.auto import tqdm |
|
|
| indices = tqdm(indices) |
|
|
| for k, i in enumerate(indices): |
| t = th.tensor([i] * shape[0], device=device) |
| if randomize_class and "y" in model_kwargs: |
| model_kwargs["y"] = th.randint( |
| low=0, |
| high=model.num_classes, |
| size=model_kwargs["y"].shape, |
| device=model_kwargs["y"].device, |
| ) |
| if model_kwargs_modify_fn is not None: |
| is_final_repeat_timestep = k >= len(indices) - num_repeat_steps |
| cur_model_kwargs = model_kwargs_modify_fn( |
| model_kwargs, img, i, is_final_repeat_timestep |
| ) |
| else: |
| cur_model_kwargs = model_kwargs |
|
|
| with th.no_grad(): |
| sample_fn = ( |
| self.ddim_sample_with_grad |
| if cond_fn_with_grad |
| else self.ddim_sample |
| ) |
| out = sample_fn( |
| model, |
| img, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=cur_model_kwargs, |
| eta=eta, |
| target_motion=target_motion, |
| guide=guide, |
| guide_2d=guide_2d, |
| overwrite_2d=overwrite_2d, |
| overwrite_data=overwrite_data, |
| ) |
| yield out |
| if update_sample_fn is not None: |
| before_repeat_timesteps = k == len(indices) - num_repeat_steps - 1 |
| img = update_sample_fn( |
| img, |
| out, |
| i, |
| is_final_repeat_timestep, |
| before_repeat_timesteps, |
| img_start, |
| ) |
| else: |
| img = out["sample"] |
|
|
| def ddim_sds_loop( |
| self, |
| model, |
| x0, |
| shape, |
| sds_weight_type="alphas", |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| model_kwargs_modify_fn=None, |
| device=None, |
| progress=False, |
| eta=0.0, |
| skip_timesteps=0, |
| repeat_final_timesteps=None, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| dump_steps=None, |
| opt_steps=500, |
| const_noise=False, |
| target_motion=None, |
| guide=None, |
| update_sample_fn=None, |
| guide_2d=None, |
| overwrite_2d=False, |
| overwrite_data=None, |
| ): |
| """ |
| Generate samples from the model using DDIM. |
| |
| Same usage as p_sample_loop(). |
| """ |
| |
| if dump_steps is not None: |
| raise NotImplementedError() |
| if const_noise == True: |
| raise NotImplementedError() |
|
|
| final = None |
| for sample in self.ddim_sds_loop_progressive( |
| model, |
| x0, |
| shape, |
| sds_weight_type=sds_weight_type, |
| noise=noise, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=model_kwargs, |
| model_kwargs_modify_fn=model_kwargs_modify_fn, |
| device=device, |
| progress=progress, |
| eta=eta, |
| opt_steps=opt_steps, |
| skip_timesteps=skip_timesteps, |
| repeat_final_timesteps=repeat_final_timesteps, |
| init_image=init_image, |
| randomize_class=randomize_class, |
| cond_fn_with_grad=cond_fn_with_grad, |
| target_motion=target_motion, |
| guide=guide, |
| update_sample_fn=update_sample_fn, |
| guide_2d=guide_2d, |
| overwrite_2d=overwrite_2d, |
| overwrite_data=overwrite_data, |
| ): |
| final = sample |
| return final |
|
|
| def ddim_sds_loop_progressive( |
| self, |
| model, |
| x0, |
| shape, |
| sds_weight_type="alphas", |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| model_kwargs_modify_fn=None, |
| device=None, |
| progress=False, |
| eta=0.0, |
| opt_steps=500, |
| skip_timesteps=0, |
| repeat_final_timesteps=None, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| target_motion=False, |
| guide=None, |
| update_sample_fn=None, |
| guide_2d=None, |
| overwrite_2d=False, |
| overwrite_data=None, |
| ): |
| """ |
| Use DDIM to sample from the model and yield intermediate samples from |
| each timestep of DDIM. |
| |
| Same usage as p_sample_loop_progressive(). |
| """ |
| if device is None: |
| device = next(model.parameters()).device |
| assert isinstance(shape, (tuple, list)) |
| init_smpl_pose, init_smpl_transl = self.motion2global( |
| x0, |
| mean=self.motion_mean, |
| std=self.motion_std, |
| smpl=self.smpl, |
| return_jts=False, |
| ) |
| init_smpl_pose_6d = matrix_to_rotation_6d(init_smpl_pose) |
| init_smpl_pose_6d = Variable( |
| init_smpl_pose_6d.clone().contiguous().detach(), requires_grad=True |
| ) |
| init_smpl_transl = Variable( |
| init_smpl_transl.clone().contiguous().detach(), requires_grad=True |
| ) |
| |
| |
| optim_type = "SGD" |
| bs, seqlen = init_smpl_pose_6d.shape[:2] |
| if optim_type == "LBFGS": |
| opt_steps = opt_steps // 10 |
| |
| optimizer = torch.optim.LBFGS( |
| [init_smpl_pose_6d, init_smpl_transl], |
| max_iter=4, |
| history_size=10, |
| line_search_fn="strong_wolfe", |
| ) |
| elif optim_type == "Adam": |
| |
| optimizer = torch.optim.Adam([init_smpl_pose_6d, init_smpl_transl], lr=1e-2) |
| elif optim_type == "SGD": |
| |
| optimizer = torch.optim.SGD([init_smpl_pose_6d, init_smpl_transl], lr=1e-3) |
| else: |
| raise ValueError(f"Unknown optimizer type: {optim_type}") |
|
|
| indices = list(range(self.num_timesteps - skip_timesteps))[::-1] |
|
|
| pbar = range(opt_steps) |
| if progress: |
| from tqdm.auto import tqdm |
|
|
| pbar = tqdm(pbar) |
|
|
| for _ in pbar: |
| ind = random.randint(0, len(indices) - 1) |
| t = th.tensor([indices[ind]] * shape[0], device=device) |
| cur_model_kwargs = model_kwargs |
| if sds_weight_type == "alphas": |
| w_t = _extract_into_tensor(self.alphas_cumprod, t, x0.shape) |
| elif sds_weight_type == "sqrt_alphas": |
| w_t = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x0.shape) |
| elif sds_weight_type == "1minus_alphas": |
| w_t = _extract_into_tensor(1 - self.alphas_cumprod, t, x0.shape) |
| elif sds_weight_type == "sqrt_1minus_alphas": |
| w_t = _extract_into_tensor( |
| self.sqrt_one_minus_alphas_cumprod, t, x0.shape |
| ) |
| elif sds_weight_type == "log_one_minus_alphas_cumprod": |
| w_t = _extract_into_tensor( |
| self.log_one_minus_alphas_cumprod, t, x0.shape |
| ) |
| elif sds_weight_type == "constant": |
| w_t = th.ones_like(x0) |
| else: |
| raise ValueError(f"Unknown sds weight type: {sds_weight_type}") |
|
|
| w_t = w_t[:, :1, :1, :1] |
|
|
| init_smpl_pose = rotation_6d_to_matrix(init_smpl_pose_6d) |
| with th.no_grad(): |
| orient_mat = init_smpl_pose[:, :, 0] |
| pose_feat = matrix_to_rotation_6d(init_smpl_pose[:, :, 1:]).reshape( |
| bs, seqlen, 23 * 6 |
| ) |
| x_start = self.smpl2motion( |
| orient_mat, init_smpl_transl, pose_feat, None, None |
| ) |
| x_start = (x_start - self.motion_mean) / self.motion_std |
| x_start = x_start.transpose(1, 2)[:, :, None, :] |
| xt = self.q_sample(x_start, t) |
| sample_fn = self.ddim_sample |
| out = sample_fn( |
| model, |
| xt, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=cur_model_kwargs, |
| eta=eta, |
| target_motion=target_motion, |
| guide=guide, |
| guide_2d=guide_2d, |
| overwrite_2d=overwrite_2d, |
| overwrite_data=overwrite_data, |
| ) |
| sampled_x0 = out["pred_xstart"] |
|
|
| def closure(): |
| optimizer.zero_grad() |
| cam2world = cur_model_kwargs["y"]["cam2world"] |
| local_kpt2d = cur_model_kwargs["y"]["local_kpt2d"] |
| intrinsics = cur_model_kwargs["y"]["cam_intrinsics"] |
| cam_orient = rotation_6d_to_matrix(cam2world[:, :, :6]) |
| cam_pos = cam2world[:, :, 6:] |
| local_orient = cur_model_kwargs["y"]["local_orient"] |
| local_transl = cur_model_kwargs["y"]["local_transl"] |
| betas = cur_model_kwargs["y"]["betas"] |
| kpt2d_score = cur_model_kwargs["y"]["kpt2d_score"] |
| R0 = local_orient[:, :1] |
| t0 = local_transl[:, :1] |
| cx, cy = intrinsics[:, :, 0, 2], intrinsics[:, :, 1, 2] |
| focal = intrinsics[:, :, 0, 0] |
| scale = focal |
| bs = local_kpt2d.size(0) |
|
|
| |
| |
| |
| |
| |
| |
| |
| smpl_pose = init_smpl_pose |
| smpl_transl = init_smpl_transl |
| global_orient = smpl_pose[:, :, 0] |
| global_orient = R0 @ global_orient |
| smpl_transl = (R0 @ smpl_transl[..., None])[..., 0] + t0 |
| local_pose = smpl_pose[:, :, 1:] |
| bs, seqlen = smpl_pose.shape[:2] |
| smpl_pose = torch.cat([global_orient[:, :, None], local_pose], dim=2) |
|
|
| |
| |
| select_idx = list(range(seqlen)) |
|
|
| select_global_orient = global_orient[:, select_idx] |
| select_smpl_transl = smpl_transl[:, select_idx] |
| select_local_pose = local_pose[:, select_idx] |
| select_betas = betas[:, select_idx] |
| select_kpt2d_score = kpt2d_score[:, select_idx] |
| select_local_kpt2d = local_kpt2d[:, select_idx] |
| new_len = select_global_orient.shape[1] |
| sout = self.smpl( |
| global_orient=select_global_orient.reshape(bs * new_len, 1, 3, 3), |
| body_pose=select_local_pose.reshape(bs * new_len, 23, 3, 3), |
| betas=select_betas.reshape(bs * new_len, 10), |
| root_trans=select_smpl_transl.reshape(bs * new_len, 3), |
| orig_joints=True, |
| pose2rot=False, |
| ) |
| joints17 = torch.einsum( |
| "bik,ji->bjk", [sout.vertices, self.smpl.J_regressor_wham] |
| )[:, :17] |
| joints17 = joints17.reshape(bs, new_len, 17, 3) |
|
|
| select_cam_orient = cam_orient[:, select_idx] |
| select_cam_pos = cam_pos[:, select_idx] |
| |
| joints = ( |
| select_cam_orient.transpose(2, 3) |
| @ (joints17 - select_cam_pos[:, :, None]).transpose(2, 3) |
| ).transpose(2, 3) |
| joints = joints / joints[..., 2:3] |
| joints = (intrinsics @ joints.transpose(2, 3)).transpose(2, 3) |
| joints = joints[..., :2] |
| diff = joints - select_local_kpt2d |
| robust_sqr_dist = gmof(diff, sigma=scale[..., None, None]) |
| robust_sqr_dist = ( |
| select_kpt2d_score**2 |
| ) * robust_sqr_dist |
| |
| |
| |
| |
| |
|
|
| |
| loss_reproj = robust_sqr_dist.reshape(bs, -1).mean(dim=-1).sum() |
|
|
| |
| loss_sds = ((sampled_x0 - x_start) ** 2) * w_t |
| |
| loss_sds = loss_sds.reshape(bs, -1).mean(dim=-1).sum() |
|
|
| sampled_x0_pose, sampled_x0_transl = self.motion2global( |
| sampled_x0, |
| mean=self.motion_mean, |
| std=self.motion_std, |
| smpl=self.smpl, |
| return_jts=False, |
| ) |
| loss_sds_pose = ( |
| (sampled_x0_pose - smpl_pose) ** 2 * w_t.reshape(bs, 1, 1, 1, 1) |
| )[:, :, 1:] |
| loss_sds_transl = (sampled_x0_transl - smpl_transl) ** 2 * w_t.reshape( |
| bs, 1, 1 |
| ) |
| |
| |
|
|
| loss_sds_pose = loss_sds_pose.reshape(bs, -1).mean(dim=-1).sum() |
| loss_sds_transl = loss_sds_transl.reshape(bs, -1).mean(dim=-1).sum() |
| |
| loss = ( |
| loss_sds * 0.1 + loss_sds_pose * 0.0001 + loss_sds_transl * 0.0001 |
| ) |
| |
| |
|
|
| loss.backward() |
| |
| optimizer.step() |
| return loss_sds |
|
|
| optimizer.step(closure) |
| yield x_start.detach() |
|
|
| def plms_sample( |
| self, |
| model, |
| x, |
| t, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| cond_fn_with_grad=False, |
| order=2, |
| old_out=None, |
| ): |
| """ |
| Sample x_{t-1} from the model using Pseudo Linear Multistep. |
| |
| Same usage as p_sample(). |
| """ |
| if not int(order) or not 1 <= order <= 4: |
| raise ValueError("order is invalid (should be int from 1-4).") |
|
|
| def get_model_output(x, t): |
| with th.set_grad_enabled(cond_fn_with_grad and cond_fn is not None): |
| x = x.detach().requires_grad_() if cond_fn_with_grad else x |
| out_orig = self.p_mean_variance( |
| model, |
| x, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| model_kwargs=model_kwargs, |
| ) |
| if cond_fn is not None: |
| if cond_fn_with_grad: |
| out = self.condition_score_with_grad( |
| cond_fn, out_orig, x, t, model_kwargs=model_kwargs |
| ) |
| x = x.detach() |
| else: |
| out = self.condition_score( |
| cond_fn, out_orig, x, t, model_kwargs=model_kwargs |
| ) |
| else: |
| out = out_orig |
|
|
| |
| |
| eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) |
| return eps, out, out_orig |
|
|
| alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) |
| alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) |
| eps, out, out_orig = get_model_output(x, t) |
|
|
| if order > 1 and old_out is None: |
| |
| old_eps = [eps] |
| mean_pred = ( |
| out["pred_xstart"] * th.sqrt(alpha_bar_prev) |
| + th.sqrt(1 - alpha_bar_prev) * eps |
| ) |
| eps_2, _, _ = get_model_output(mean_pred, t - 1) |
| eps_prime = (eps + eps_2) / 2 |
| pred_prime = self._predict_xstart_from_eps(x, t, eps_prime) |
| mean_pred = ( |
| pred_prime * th.sqrt(alpha_bar_prev) |
| + th.sqrt(1 - alpha_bar_prev) * eps_prime |
| ) |
| else: |
| |
| old_eps = old_out["old_eps"] |
| old_eps.append(eps) |
| cur_order = min(order, len(old_eps)) |
| if cur_order == 1: |
| eps_prime = old_eps[-1] |
| elif cur_order == 2: |
| eps_prime = (3 * old_eps[-1] - old_eps[-2]) / 2 |
| elif cur_order == 3: |
| eps_prime = (23 * old_eps[-1] - 16 * old_eps[-2] + 5 * old_eps[-3]) / 12 |
| elif cur_order == 4: |
| eps_prime = ( |
| 55 * old_eps[-1] |
| - 59 * old_eps[-2] |
| + 37 * old_eps[-3] |
| - 9 * old_eps[-4] |
| ) / 24 |
| else: |
| raise RuntimeError("cur_order is invalid.") |
| pred_prime = self._predict_xstart_from_eps(x, t, eps_prime) |
| mean_pred = ( |
| pred_prime * th.sqrt(alpha_bar_prev) |
| + th.sqrt(1 - alpha_bar_prev) * eps_prime |
| ) |
|
|
| if len(old_eps) >= order: |
| old_eps.pop(0) |
|
|
| nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) |
| sample = mean_pred * nonzero_mask + out["pred_xstart"] * (1 - nonzero_mask) |
|
|
| return { |
| "sample": sample, |
| "pred_xstart": out_orig["pred_xstart"], |
| "old_eps": old_eps, |
| } |
|
|
| def plms_sample_loop( |
| self, |
| model, |
| shape, |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| device=None, |
| progress=False, |
| skip_timesteps=0, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| order=2, |
| ): |
| """ |
| Generate samples from the model using Pseudo Linear Multistep. |
| |
| Same usage as p_sample_loop(). |
| """ |
| final = None |
| for sample in self.plms_sample_loop_progressive( |
| model, |
| shape, |
| noise=noise, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=model_kwargs, |
| device=device, |
| progress=progress, |
| skip_timesteps=skip_timesteps, |
| init_image=init_image, |
| randomize_class=randomize_class, |
| cond_fn_with_grad=cond_fn_with_grad, |
| order=order, |
| ): |
| final = sample |
| return final["sample"] |
|
|
| def plms_sample_loop_progressive( |
| self, |
| model, |
| shape, |
| noise=None, |
| clip_denoised=True, |
| denoised_fn=None, |
| cond_fn=None, |
| model_kwargs=None, |
| device=None, |
| progress=False, |
| skip_timesteps=0, |
| init_image=None, |
| randomize_class=False, |
| cond_fn_with_grad=False, |
| order=2, |
| ): |
| """ |
| Use PLMS to sample from the model and yield intermediate samples from each |
| timestep of PLMS. |
| |
| Same usage as p_sample_loop_progressive(). |
| """ |
| if device is None: |
| device = next(model.parameters()).device |
| assert isinstance(shape, (tuple, list)) |
| if noise is not None: |
| img = noise |
| else: |
| img = th.randn(*shape, device=device) |
|
|
| if skip_timesteps and init_image is None: |
| init_image = th.zeros_like(img) |
|
|
| indices = list(range(self.num_timesteps - skip_timesteps))[::-1] |
|
|
| if init_image is not None: |
| my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0] |
| img = self.q_sample(init_image, my_t, img) |
|
|
| if progress: |
| |
| from tqdm.auto import tqdm |
|
|
| indices = tqdm(indices) |
|
|
| old_out = None |
|
|
| for i in indices: |
| t = th.tensor([i] * shape[0], device=device) |
| if randomize_class and "y" in model_kwargs: |
| model_kwargs["y"] = th.randint( |
| low=0, |
| high=model.num_classes, |
| size=model_kwargs["y"].shape, |
| device=model_kwargs["y"].device, |
| ) |
| with th.no_grad(): |
| out = self.plms_sample( |
| model, |
| img, |
| t, |
| clip_denoised=clip_denoised, |
| denoised_fn=denoised_fn, |
| cond_fn=cond_fn, |
| model_kwargs=model_kwargs, |
| cond_fn_with_grad=cond_fn_with_grad, |
| order=order, |
| old_out=old_out, |
| ) |
| yield out |
| old_out = out |
| img = out["sample"] |
|
|
| def _vb_terms_bpd( |
| self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None |
| ): |
| """ |
| Get a term for the variational lower-bound. |
| |
| The resulting units are bits (rather than nats, as one might expect). |
| This allows for comparison to other papers. |
| |
| :return: a dict with the following keys: |
| - 'output': a shape [N] tensor of NLLs or KLs. |
| - 'pred_xstart': the x_0 predictions. |
| """ |
| true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance( |
| x_start=x_start, x_t=x_t, t=t |
| ) |
| out = self.p_mean_variance( |
| model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs |
| ) |
| kl = normal_kl( |
| true_mean, true_log_variance_clipped, out["mean"], out["log_variance"] |
| ) |
| kl = mean_flat(kl) / np.log(2.0) |
|
|
| decoder_nll = -discretized_gaussian_log_likelihood( |
| x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] |
| ) |
| assert decoder_nll.shape == x_start.shape |
| decoder_nll = mean_flat(decoder_nll) / np.log(2.0) |
|
|
| |
| |
| output = th.where((t == 0), decoder_nll, kl) |
| return {"output": output, "pred_xstart": out["pred_xstart"]} |
|
|
| def training_losses( |
| self, model, x_start, t, model_kwargs=None, noise=None, dataset=None |
| ): |
| """ |
| Compute training losses for a single timestep. |
| |
| :param model: the model to evaluate loss on. |
| :param x_start: the [N x C x ...] tensor of inputs. |
| :param t: a batch of timestep indices. |
| :param model_kwargs: if not None, a dict of extra keyword arguments to |
| pass to the model. This can be used for conditioning. |
| :param noise: if specified, the specific Gaussian noise to try to remove. |
| :return: a dict with the key "loss" containing a tensor of shape [N]. |
| Some mean or variance settings may also have other keys. |
| """ |
| if model_kwargs is None: |
| model_kwargs = {} |
| if noise is None: |
| noise = th.randn_like(x_start) |
| x_t = self.q_sample(x_start, t, noise=noise) |
|
|
| terms = {} |
|
|
| if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL: |
| terms["loss"] = self._vb_terms_bpd( |
| model=model, |
| x_start=x_start, |
| x_t=x_t, |
| t=t, |
| clip_denoised=False, |
| model_kwargs=model_kwargs, |
| )["output"] |
| if self.loss_type == LossType.RESCALED_KL: |
| terms["loss"] *= self.num_timesteps |
| elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE: |
| model_output = model(x_t, self._scale_timesteps(t), **model_kwargs) |
|
|
| if self.model_var_type in [ |
| ModelVarType.LEARNED, |
| ModelVarType.LEARNED_RANGE, |
| ]: |
| B, C = x_t.shape[:2] |
| assert model_output.shape == (B, C * 2, *x_t.shape[2:]) |
| model_output, model_var_values = th.split(model_output, C, dim=1) |
| |
| |
| frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) |
| terms["vb"] = self._vb_terms_bpd( |
| model=lambda *args, r=frozen_out: r, |
| x_start=x_start, |
| x_t=x_t, |
| t=t, |
| clip_denoised=False, |
| )["output"] |
| if self.loss_type == LossType.RESCALED_MSE: |
| |
| |
| terms["vb"] *= self.num_timesteps / 1000.0 |
|
|
| target = { |
| ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance( |
| x_start=x_start, x_t=x_t, t=t |
| )[0], |
| ModelMeanType.START_X: x_start, |
| ModelMeanType.EPSILON: noise, |
| }[self.model_mean_type] |
| assert ( |
| model_output.shape == target.shape == x_start.shape |
| ) |
|
|
| mask = model_kwargs["y"]["mask"] |
| terms["rot_mse"] = self.masked_l2( |
| target, model_output, mask |
| ) |
|
|
| terms["loss"] = terms["rot_mse"] + terms.get("vb", 0.0) |
|
|
| else: |
| raise NotImplementedError(self.loss_type) |
|
|
| return terms |
|
|
| def get_vb_term(self, x_t, x_start, t, model_output): |
| vb = None |
| if self.model_var_type in [ |
| ModelVarType.LEARNED, |
| ModelVarType.LEARNED_RANGE, |
| ]: |
| B, C = x_t.shape[:2] |
| assert model_output.shape == (B, C * 2, *x_t.shape[2:]) |
| model_output, model_var_values = th.split(model_output, C, dim=1) |
| |
| |
| frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) |
| vb = self._vb_terms_bpd( |
| model=lambda *args, r=frozen_out: r, |
| x_start=x_start, |
| x_t=x_t, |
| t=t, |
| clip_denoised=False, |
| )["output"] |
| if self.loss_type == LossType.RESCALED_MSE: |
| |
| |
| vb *= self.num_timesteps / 1000.0 |
| return vb |
|
|
| def _prior_bpd(self, x_start): |
| """ |
| Get the prior KL term for the variational lower-bound, measured in |
| bits-per-dim. |
| |
| This term can't be optimized, as it only depends on the encoder. |
| |
| :param x_start: the [N x C x ...] tensor of inputs. |
| :return: a batch of [N] KL values (in bits), one per batch element. |
| """ |
| batch_size = x_start.shape[0] |
| t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) |
| qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) |
| kl_prior = normal_kl( |
| mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 |
| ) |
| return mean_flat(kl_prior) / np.log(2.0) |
|
|
| def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None): |
| """ |
| Compute the entire variational lower-bound, measured in bits-per-dim, |
| as well as other related quantities. |
| |
| :param model: the model to evaluate loss on. |
| :param x_start: the [N x C x ...] tensor of inputs. |
| :param clip_denoised: if True, clip denoised samples. |
| :param model_kwargs: if not None, a dict of extra keyword arguments to |
| pass to the model. This can be used for conditioning. |
| |
| :return: a dict containing the following keys: |
| - total_bpd: the total variational lower-bound, per batch element. |
| - prior_bpd: the prior term in the lower-bound. |
| - vb: an [N x T] tensor of terms in the lower-bound. |
| - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep. |
| - mse: an [N x T] tensor of epsilon MSEs for each timestep. |
| """ |
| device = x_start.device |
| batch_size = x_start.shape[0] |
|
|
| vb = [] |
| xstart_mse = [] |
| mse = [] |
| for t in list(range(self.num_timesteps))[::-1]: |
| t_batch = th.tensor([t] * batch_size, device=device) |
| noise = th.randn_like(x_start) |
| x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise) |
| |
| with th.no_grad(): |
| out = self._vb_terms_bpd( |
| model, |
| x_start=x_start, |
| x_t=x_t, |
| t=t_batch, |
| clip_denoised=clip_denoised, |
| model_kwargs=model_kwargs, |
| ) |
| vb.append(out["output"]) |
| xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2)) |
| eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"]) |
| mse.append(mean_flat((eps - noise) ** 2)) |
|
|
| vb = th.stack(vb, dim=1) |
| xstart_mse = th.stack(xstart_mse, dim=1) |
| mse = th.stack(mse, dim=1) |
|
|
| prior_bpd = self._prior_bpd(x_start) |
| total_bpd = vb.sum(dim=1) + prior_bpd |
| return { |
| "total_bpd": total_bpd, |
| "prior_bpd": prior_bpd, |
| "vb": vb, |
| "xstart_mse": xstart_mse, |
| "mse": mse, |
| } |
|
|
|
|
| def _extract_into_tensor(arr, timesteps, broadcast_shape): |
| """ |
| Extract values from a 1-D numpy array for a batch of indices. |
| |
| :param arr: the 1-D numpy array. |
| :param timesteps: a tensor of indices into the array to extract. |
| :param broadcast_shape: a larger shape of K dimensions with the batch |
| dimension equal to the length of timesteps. |
| :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. |
| """ |
| res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float() |
| while len(res.shape) < len(broadcast_shape): |
| res = res[..., None] |
| return res.expand(broadcast_shape) |
|
|