Spaces:
Running
Running
| import torch | |
| import torch.nn as nn | |
| import numpy as np | |
| from tqdm import tqdm | |
| class GaussianFourierProjection(nn.Module): | |
| """Gaussian random features for encoding time steps.""" | |
| def __init__(self, embed_dim, scale=30.): | |
| super().__init__() | |
| # Randomly sample weights (frequencies) during initialization. | |
| # These weights (frequencies) are fixed during optimization and are not trainable. | |
| self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False) | |
| def forward(self, x): | |
| # Cosine(2 pi freq x), Sine(2 pi freq x) | |
| x_proj = x[:, None] * self.W[None, :] * 2 * np.pi | |
| return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) | |
| class Dense(nn.Module): | |
| """ | |
| Maps an embedding vector to a bias/scale tensor that can be broadcast over a | |
| 2-D feature map (B, C, H, W) – output shape is (B, C, 1, 1). | |
| """ | |
| def __init__(self, input_dim: int, output_dim: int): | |
| super().__init__() | |
| self.dense = nn.Linear(input_dim, output_dim) | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| B = x.size(0) | |
| x = x.view(B, -1) # (B, input_dim) | |
| return self.dense(x).view(B, -1, 1, 1) # (B, C, 1, 1) | |
| class UNet(nn.Module): | |
| """A time-dependent score-based model built upon U-Net architecture.""" | |
| def __init__(self, marginal_prob_std, channels=[32, 64, 128, 256, 512], embed_dim=256, | |
| embed_dim_mask=256, input_dim_mask=4*256*256): | |
| """Initialize a time-dependent score-based network. | |
| Args: | |
| marginal_prob_std: A function that takes time t and gives the standard | |
| deviation of the perturbation kernel p_{0t}(x(t) | x(0)). | |
| channels: The number of channels for feature maps of each resolution. | |
| embed_dim: The dimensionality of Gaussian random feature embeddings. | |
| """ | |
| super().__init__() | |
| # Gaussian random feature embedding layer for time | |
| self.time_embed = nn.Sequential( | |
| GaussianFourierProjection(embed_dim=embed_dim), | |
| nn.Linear(embed_dim, embed_dim) | |
| ) | |
| # flatten the mask and apply a linear layer | |
| self.cond_embed = nn.Sequential( | |
| nn.Flatten(), | |
| nn.Linear(input_dim_mask, embed_dim_mask) | |
| ) | |
| # Encoding layers where the resolution decreases | |
| self.conv1 = nn.Conv2d(4, channels[0], 3, stride=2, bias=False, padding=1) | |
| self.t_mod1 = Dense(embed_dim, channels[0]) | |
| self.gnorm1 = nn.GroupNorm(4, num_channels=channels[0]) | |
| self.conv1a = nn.Conv2d(channels[0], channels[0], 3, stride=1, bias=False, padding=1) | |
| self.t_mod1a = Dense(embed_dim, channels[0]) | |
| self.gnorm1a = nn.GroupNorm(4, num_channels=channels[0]) | |
| self.conv2 = nn.Conv2d(channels[0], channels[1], 3, stride=2, bias=False, padding=1) | |
| self.t_mod2 = Dense(embed_dim, channels[1]) | |
| self.y_mod2 = Dense(embed_dim, channels[1]) | |
| self.gnorm2 = nn.GroupNorm(32, num_channels=channels[1]) | |
| self.conv2a = nn.Conv2d(channels[1], channels[1], 3, stride=1, bias=False, padding=1) | |
| self.t_mod2a = Dense(embed_dim, channels[1]) | |
| self.y_mod2a = Dense(embed_dim, channels[1]) | |
| self.gnorm2a = nn.GroupNorm(32, num_channels=channels[1]) | |
| self.conv3 = nn.Conv2d(channels[1], channels[2], 3, stride=2, bias=False, padding=1) | |
| self.t_mod3 = Dense(embed_dim, channels[2]) | |
| self.y_mod3 = Dense(embed_dim, channels[2]) | |
| self.gnorm3 = nn.GroupNorm(32, num_channels=channels[2]) | |
| self.conv3a = nn.Conv2d(channels[2], channels[2], 3, stride=1, bias=False, padding=1) | |
| self.t_mod3a = Dense(embed_dim, channels[2]) | |
| self.y_mod3a = Dense(embed_dim, channels[2]) | |
| self.gnorm3a = nn.GroupNorm(32, num_channels=channels[2]) | |
| self.conv4 = nn.Conv2d(channels[2], channels[3], 3, stride=2, bias=False, padding=1) | |
| self.t_mod4 = Dense(embed_dim, channels[3]) | |
| self.y_mod4 = Dense(embed_dim, channels[3]) | |
| self.gnorm4 = nn.GroupNorm(32, num_channels=channels[3]) | |
| self.conv4a = nn.Conv2d(channels[3], channels[3], 3, stride=1, bias=False, padding=1) | |
| self.t_mod4a = Dense(embed_dim, channels[3]) | |
| self.y_mod4a = Dense(embed_dim, channels[3]) | |
| self.gnorm4a = nn.GroupNorm(32, num_channels=channels[3]) | |
| self.conv5 = nn.Conv2d(channels[3], channels[4], 3, stride=2, bias=False, padding=1) | |
| self.t_mod5 = Dense(embed_dim, channels[4]) | |
| self.y_mod5 = Dense(embed_dim, channels[4]) | |
| self.gnorm5 = nn.GroupNorm(32, num_channels=channels[4]) | |
| self.conv5a = nn.Conv2d(channels[4], channels[4], 3, stride=1, bias=False, padding=1) | |
| self.t_mod5a = Dense(embed_dim, channels[4]) | |
| self.y_mod5a = Dense(embed_dim, channels[4]) | |
| self.gnorm5a = nn.GroupNorm(32, num_channels=channels[4]) | |
| # Decoding layers where the resolution increases | |
| self.tconv5b = nn.Conv2d(channels[4], channels[4], 3, stride=1, bias=False, padding=1) | |
| self.t_mod6b = Dense(embed_dim, channels[4]) | |
| self.y_mod6b = Dense(embed_dim, channels[4]) | |
| self.tgnorm5b = nn.GroupNorm(32, num_channels=channels[4]) | |
| self.tconv5 = nn.ConvTranspose2d(2*channels[4], channels[3], 3, stride=2, bias=False, padding=1, output_padding=1) | |
| self.t_mod6 = Dense(embed_dim, channels[3]) | |
| self.y_mod6 = Dense(embed_dim, channels[3]) | |
| self.tgnorm5 = nn.GroupNorm(32, num_channels=channels[3]) | |
| self.tconv4b = nn.Conv2d(2*channels[3], channels[3], 3, stride=1, bias=False, padding=1) | |
| self.t_mod7b = Dense(embed_dim, channels[3]) | |
| self.y_mod7b = Dense(embed_dim, channels[3]) | |
| self.tgnorm4b = nn.GroupNorm(32, num_channels=channels[3]) | |
| self.tconv4 = nn.ConvTranspose2d(2*channels[3], channels[2], 3, stride=2, bias=False, padding=1, output_padding=1) | |
| self.t_mod7 = Dense(embed_dim, channels[2]) | |
| self.y_mod7 = Dense(embed_dim, channels[2]) | |
| self.tgnorm4 = nn.GroupNorm(32, num_channels=channels[2]) | |
| self.tconv3b = nn.Conv2d(2*channels[2], channels[2], 3, stride=1, bias=False, padding=1) | |
| self.t_mod8b = Dense(embed_dim, channels[2]) | |
| self.y_mod8b = Dense(embed_dim, channels[2]) | |
| self.tgnorm3b = nn.GroupNorm(32, num_channels=channels[2]) | |
| self.tconv3 = nn.ConvTranspose2d(2*channels[2], channels[1], 3, stride=2, bias=False, padding=1, output_padding=1) | |
| self.t_mod8 = Dense(embed_dim, channels[1]) | |
| self.y_mod8 = Dense(embed_dim, channels[1]) | |
| self.tgnorm3 = nn.GroupNorm(32, num_channels=channels[1]) | |
| self.tconv2b = nn.Conv2d(2*channels[1], channels[1], 3, stride=1, bias=False, padding=1) | |
| self.t_mod9b = Dense(embed_dim, channels[1]) | |
| self.y_mod9b = Dense(embed_dim, channels[1]) | |
| self.tgnorm2b = nn.GroupNorm(32, num_channels=channels[1]) | |
| self.tconv2 = nn.ConvTranspose2d(2*channels[1], channels[0], 3, stride=2, bias=False, padding=1, output_padding=1) | |
| self.t_mod9 = Dense(embed_dim, channels[0]) | |
| self.y_mod9 = Dense(embed_dim, channels[0]) | |
| self.tgnorm2 = nn.GroupNorm(32, num_channels=channels[0]) | |
| self.tconv1b = nn.Conv2d(2*channels[0], channels[0], 3, stride=1, bias=False, padding=1) | |
| self.t_mod10b = Dense(embed_dim, channels[0]) | |
| self.y_mod10b = Dense(embed_dim, channels[0]) | |
| self.tgnorm1b = nn.GroupNorm(32, num_channels=channels[0]) | |
| self.tconv1 = nn.ConvTranspose2d(2*channels[0], channels[0], 3, stride=2, bias=False, padding=1, output_padding=1) | |
| self.t_mod10 = Dense(embed_dim, channels[0]) | |
| self.y_mod10 = Dense(embed_dim, channels[0]) | |
| self.tgnorm1 = nn.GroupNorm(32, num_channels=channels[0]) | |
| self.tconv0 = nn.ConvTranspose2d(channels[0], 4, 3, stride=1, padding=1, output_padding=0) | |
| # The swish activation function | |
| self.act = nn.SiLU() | |
| # A restricted version of the `marginal_prob_std` function, after specifying a Lambda. | |
| self.marginal_prob_std = marginal_prob_std | |
| def forward(self, x, t, y=None): | |
| # Obtain the Gaussian random feature embedding for t | |
| embed = self.act(self.time_embed(t)) | |
| y_embed = self.cond_embed(y) | |
| # Encoding path, downsampling | |
| h1 = self.conv1(x) + self.t_mod1(embed) | |
| h1 = self.act(self.gnorm1(h1)) | |
| h1a = self.conv1a(h1) + self.t_mod1a(embed) | |
| h1a = self.act(self.gnorm1a(h1a)) | |
| # 2nd conv | |
| h2 = self.conv2(h1a) + self.t_mod2(embed) | |
| h2 = h2 * self.y_mod2(y_embed) | |
| h2 = self.act(self.gnorm2(h2)) | |
| h2a = self.conv2a(h2) + self.t_mod2a(embed) | |
| h2a = h2a * self.y_mod2a(y_embed) | |
| h2a = self.act(self.gnorm2a(h2a)) | |
| # 3rd conv | |
| h3 = self.conv3(h2a) + self.t_mod3(embed) | |
| h3 = h3 * self.y_mod3(y_embed) | |
| h3 = self.act(self.gnorm3(h3)) | |
| h3a = self.conv3a(h3) + self.t_mod3a(embed) | |
| h3a = h3a * self.y_mod3a(y_embed) | |
| h3a = self.act(self.gnorm3a(h3a)) | |
| # 4th conv | |
| h4 = self.conv4(h3a) + self.t_mod4(embed) | |
| h4 = h4 * self.y_mod4(y_embed) | |
| h4 = self.act(self.gnorm4(h4)) | |
| h4a = self.conv4a(h4) + self.t_mod4a(embed) | |
| h4a = h4a * self.y_mod4a(y_embed) | |
| h4a = self.act(self.gnorm4a(h4a)) | |
| # 5th conv | |
| h5 = self.conv5(h4a) + self.t_mod5(embed) | |
| h5 = h5 * self.y_mod5(y_embed) | |
| h5 = self.act(self.gnorm5(h5)) | |
| h5a = self.conv5a(h5) + self.t_mod5a(embed) | |
| h5a = h5a * self.y_mod5a(y_embed) | |
| h5a = self.act(self.gnorm5a(h5a)) | |
| # Decoding path up sampling | |
| h = self.tconv5b(h5a) + self.t_mod6b(embed) | |
| h = h * self.y_mod5(y_embed) | |
| h = self.act(self.tgnorm5b(h)) | |
| # Skip connection from the encoding path | |
| h = self.tconv5(torch.cat([h, h5], dim=1)) + self.t_mod6(embed) | |
| h = h * self.y_mod6(y_embed) | |
| h = self.act(self.tgnorm5(h)) | |
| h = self.tconv4b(torch.cat([h, h4a], dim=1)) + self.t_mod7b(embed) | |
| h = h * self.y_mod7b(y_embed) | |
| h = self.act(self.tgnorm4b(h)) | |
| h = self.tconv4(torch.cat([h, h4], dim=1)) + self.t_mod7(embed) | |
| h = h * self.y_mod7(y_embed) | |
| h = self.act(self.tgnorm4(h)) | |
| h = self.tconv3b(torch.cat([h, h3a], dim=1)) + self.t_mod8b(embed) | |
| h = h * self.y_mod8b(y_embed) | |
| h = self.act(self.tgnorm3b(h)) | |
| h = self.tconv3(torch.cat([h, h3], dim=1)) + self.t_mod8(embed) | |
| h = h * self.y_mod8(y_embed) | |
| h = self.act(self.tgnorm3(h)) | |
| h = self.tconv2b(torch.cat([h, h2a], dim=1)) + self.t_mod9b(embed) | |
| h = h * self.y_mod9b(y_embed) | |
| h = self.act(self.tgnorm2b(h)) | |
| h = self.tconv2(torch.cat([h, h2], dim=1)) + self.t_mod9(embed) | |
| h = h * self.y_mod9(y_embed) | |
| h = self.act(self.tgnorm2(h)) | |
| h = self.tconv1b(torch.cat([h, h1a], dim=1)) + self.t_mod10b(embed) | |
| h = h * self.y_mod10b(y_embed) | |
| h = self.act(self.tgnorm1b(h)) | |
| h = self.tconv1(torch.cat([h, h1], dim=1)) + self.t_mod10(embed) | |
| h = h * self.y_mod10(y_embed) | |
| h = self.act(self.tgnorm1(h)) | |
| h = self.tconv0(h) | |
| # Normalize output | |
| h = h / self.marginal_prob_std(t)[:, None, None, None] | |
| return h | |
| def marginal_prob_std(t, Lambda, device='cpu'): | |
| """Compute the standard deviation of $p_{0t}(x(t) | x(0))$. | |
| Args: | |
| t: A vector of time steps. | |
| Lambda: The $\lambda$ in our SDE. | |
| Returns: | |
| std : The standard deviation. | |
| """ | |
| t = t.to(device) | |
| std = torch.sqrt((Lambda**(2 * t) - 1.) / 2. / np.log(Lambda)) | |
| return std | |
| def diffusion_coeff(t, Lambda, device='cpu'): | |
| """Compute the diffusion coefficient of our SDE. | |
| Args: | |
| t: A vector of time steps. | |
| Lambda: The $\lambda$ in our SDE. | |
| Returns: | |
| diff_coeff : The vector of diffusion coefficients. | |
| """ | |
| diff_coeff = Lambda**t | |
| return diff_coeff.to(device) | |
| def Euler_Maruyama_sampler(score_model, | |
| marginal_prob_std, | |
| diffusion_coeff, | |
| batch_size=1, | |
| x_shape=(4, 256, 256), | |
| num_steps=250, | |
| device='cuda', | |
| eps=1e-3, | |
| y=None): | |
| """Generate samples from score-based models with the Euler-Maruyama solver. | |
| Args: | |
| score_model: A PyTorch model that represents the time-dependent score-based model. | |
| marginal_prob_std: A function that gives the standard deviation of | |
| the perturbation kernel. | |
| diffusion_coeff: A function that gives the diffusion coefficient of the SDE. | |
| batch_size: The number of samplers to generate by calling this function once. | |
| num_steps: The number of sampling steps. | |
| Equivalent to the number of discretized time steps. | |
| device: 'cuda' for running on GPUs, and 'cpu' for running on CPUs. | |
| eps: The smallest time step for numerical stability. | |
| Returns: | |
| Samples. | |
| """ | |
| t = torch.ones(batch_size).to(device) | |
| r = torch.randn(batch_size, *x_shape).to(device) | |
| init_x = r * marginal_prob_std(t)[:, None, None, None] | |
| init_x = init_x.to(device) | |
| time_steps = torch.linspace(1., eps, num_steps).to(device) | |
| step_size = time_steps[0] - time_steps[1] | |
| x = init_x | |
| with torch.no_grad(): | |
| for time_step in tqdm(time_steps): | |
| batch_time_step = torch.ones(batch_size, device=device) * time_step | |
| g = diffusion_coeff(batch_time_step) | |
| mean_x = x + (g**2)[:, None, None, None] * score_model(x, batch_time_step, y=y) * step_size | |
| x = mean_x + torch.sqrt(step_size) * g[:, None, None, None] * torch.randn_like(x) | |
| # Do not include any noise in the last sampling step. | |
| return mean_x |