Spaces:
Configuration error
Configuration error
| # pytorch_diffusion + derived encoder decoder | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| import numpy as np | |
| def get_timestep_embedding(timesteps, embedding_dim): | |
| """ | |
| This matches the implementation in Denoising Diffusion Probabilistic Models: | |
| From Fairseq. | |
| Build sinusoidal embeddings. | |
| This matches the implementation in tensor2tensor, but differs slightly | |
| from the description in Section 3.5 of "Attention Is All You Need". | |
| """ | |
| assert len(timesteps.shape) == 1 | |
| half_dim = embedding_dim // 2 | |
| emb = math.log(10000) / (half_dim - 1) | |
| emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) | |
| emb = emb.to(device=timesteps.device) | |
| emb = timesteps.float()[:, None] * emb[None, :] | |
| emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) | |
| if embedding_dim % 2 == 1: # zero pad | |
| emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) | |
| return emb | |
| def nonlinearity(x): | |
| # swish | |
| return x * torch.sigmoid(x) | |
| class SpatialNorm(nn.Module): | |
| def __init__( | |
| self, | |
| f_channels, | |
| zq_channels, | |
| norm_layer=nn.GroupNorm, | |
| freeze_norm_layer=False, | |
| add_conv=False, | |
| **norm_layer_params, | |
| ): | |
| super().__init__() | |
| self.norm_layer = norm_layer(num_channels=f_channels, **norm_layer_params) | |
| if freeze_norm_layer: | |
| for p in self.norm_layer.parameters: | |
| p.requires_grad = False | |
| self.add_conv = add_conv | |
| if self.add_conv: | |
| self.conv = nn.Conv2d(zq_channels, zq_channels, kernel_size=3, stride=1, padding=1) | |
| self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) | |
| self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) | |
| def forward(self, f, zq): | |
| f_size = f.shape[-2:] | |
| zq = torch.nn.functional.interpolate(zq, size=f_size, mode="nearest") | |
| if self.add_conv: | |
| zq = self.conv(zq) | |
| norm_f = self.norm_layer(f) | |
| new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) | |
| return new_f | |
| def Normalize(in_channels, zq_ch, add_conv): | |
| return SpatialNorm( | |
| in_channels, | |
| zq_ch, | |
| norm_layer=nn.GroupNorm, | |
| freeze_norm_layer=False, | |
| add_conv=add_conv, | |
| num_groups=32, | |
| eps=1e-6, | |
| affine=True, | |
| ) | |
| class Upsample(nn.Module): | |
| def __init__(self, in_channels, with_conv): | |
| super().__init__() | |
| self.with_conv = with_conv | |
| if self.with_conv: | |
| self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) | |
| def forward(self, x): | |
| x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") | |
| if self.with_conv: | |
| x = self.conv(x) | |
| return x | |
| class Downsample(nn.Module): | |
| def __init__(self, in_channels, with_conv): | |
| super().__init__() | |
| self.with_conv = with_conv | |
| if self.with_conv: | |
| # no asymmetric padding in torch conv, must do it ourselves | |
| self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) | |
| def forward(self, x): | |
| if self.with_conv: | |
| pad = (0, 1, 0, 1) | |
| x = torch.nn.functional.pad(x, pad, mode="constant", value=0) | |
| x = self.conv(x) | |
| else: | |
| x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) | |
| return x | |
| class ResnetBlock(nn.Module): | |
| def __init__( | |
| self, | |
| *, | |
| in_channels, | |
| out_channels=None, | |
| conv_shortcut=False, | |
| dropout, | |
| temb_channels=512, | |
| zq_ch=None, | |
| add_conv=False, | |
| ): | |
| super().__init__() | |
| self.in_channels = in_channels | |
| out_channels = in_channels if out_channels is None else out_channels | |
| self.out_channels = out_channels | |
| self.use_conv_shortcut = conv_shortcut | |
| self.norm1 = Normalize(in_channels, zq_ch, add_conv=add_conv) | |
| self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) | |
| if temb_channels > 0: | |
| self.temb_proj = torch.nn.Linear(temb_channels, out_channels) | |
| self.norm2 = Normalize(out_channels, zq_ch, add_conv=add_conv) | |
| self.dropout = torch.nn.Dropout(dropout) | |
| self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) | |
| if self.in_channels != self.out_channels: | |
| if self.use_conv_shortcut: | |
| self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) | |
| else: | |
| self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) | |
| def forward(self, x, temb, zq): | |
| h = x | |
| h = self.norm1(h, zq) | |
| h = nonlinearity(h) | |
| h = self.conv1(h) | |
| if temb is not None: | |
| h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] | |
| h = self.norm2(h, zq) | |
| h = nonlinearity(h) | |
| h = self.dropout(h) | |
| h = self.conv2(h) | |
| if self.in_channels != self.out_channels: | |
| if self.use_conv_shortcut: | |
| x = self.conv_shortcut(x) | |
| else: | |
| x = self.nin_shortcut(x) | |
| return x + h | |
| class AttnBlock(nn.Module): | |
| def __init__(self, in_channels, zq_ch=None, add_conv=False): | |
| super().__init__() | |
| self.in_channels = in_channels | |
| self.norm = Normalize(in_channels, zq_ch, add_conv=add_conv) | |
| self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) | |
| self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) | |
| self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) | |
| self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) | |
| def forward(self, x, zq): | |
| h_ = x | |
| h_ = self.norm(h_, zq) | |
| q = self.q(h_) | |
| k = self.k(h_) | |
| v = self.v(h_) | |
| # compute attention | |
| b, c, h, w = q.shape | |
| q = q.reshape(b, c, h * w) | |
| q = q.permute(0, 2, 1) # b,hw,c | |
| k = k.reshape(b, c, h * w) # b,c,hw | |
| w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] | |
| w_ = w_ * (int(c) ** (-0.5)) | |
| w_ = torch.nn.functional.softmax(w_, dim=2) | |
| # attend to values | |
| v = v.reshape(b, c, h * w) | |
| w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) | |
| h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] | |
| h_ = h_.reshape(b, c, h, w) | |
| h_ = self.proj_out(h_) | |
| return x + h_ | |
| class MOVQDecoder(nn.Module): | |
| def __init__( | |
| self, | |
| *, | |
| ch, | |
| out_ch, | |
| ch_mult=(1, 2, 4, 8), | |
| num_res_blocks, | |
| attn_resolutions, | |
| dropout=0.0, | |
| resamp_with_conv=True, | |
| in_channels, | |
| resolution, | |
| z_channels, | |
| give_pre_end=False, | |
| zq_ch=None, | |
| add_conv=False, | |
| **ignorekwargs, | |
| ): | |
| super().__init__() | |
| self.ch = ch | |
| self.temb_ch = 0 | |
| self.num_resolutions = len(ch_mult) | |
| self.num_res_blocks = num_res_blocks | |
| self.resolution = resolution | |
| self.in_channels = in_channels | |
| self.give_pre_end = give_pre_end | |
| # compute in_ch_mult, block_in and curr_res at lowest res | |
| in_ch_mult = (1,) + tuple(ch_mult) | |
| block_in = ch * ch_mult[self.num_resolutions - 1] | |
| curr_res = resolution // 2 ** (self.num_resolutions - 1) | |
| self.z_shape = (1, z_channels, curr_res, curr_res) | |
| print("Working with z of shape {} = {} dimensions.".format(self.z_shape, np.prod(self.z_shape))) | |
| # z to block_in | |
| self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) | |
| # middle | |
| self.mid = nn.Module() | |
| self.mid.block_1 = ResnetBlock( | |
| in_channels=block_in, | |
| out_channels=block_in, | |
| temb_channels=self.temb_ch, | |
| dropout=dropout, | |
| zq_ch=zq_ch, | |
| add_conv=add_conv, | |
| ) | |
| self.mid.attn_1 = AttnBlock(block_in, zq_ch, add_conv=add_conv) | |
| self.mid.block_2 = ResnetBlock( | |
| in_channels=block_in, | |
| out_channels=block_in, | |
| temb_channels=self.temb_ch, | |
| dropout=dropout, | |
| zq_ch=zq_ch, | |
| add_conv=add_conv, | |
| ) | |
| # upsampling | |
| self.up = nn.ModuleList() | |
| for i_level in reversed(range(self.num_resolutions)): | |
| block = nn.ModuleList() | |
| attn = nn.ModuleList() | |
| block_out = ch * ch_mult[i_level] | |
| for i_block in range(self.num_res_blocks + 1): | |
| block.append( | |
| ResnetBlock( | |
| in_channels=block_in, | |
| out_channels=block_out, | |
| temb_channels=self.temb_ch, | |
| dropout=dropout, | |
| zq_ch=zq_ch, | |
| add_conv=add_conv, | |
| ) | |
| ) | |
| block_in = block_out | |
| if curr_res in attn_resolutions: | |
| attn.append(AttnBlock(block_in, zq_ch, add_conv=add_conv)) | |
| up = nn.Module() | |
| up.block = block | |
| up.attn = attn | |
| if i_level != 0: | |
| up.upsample = Upsample(block_in, resamp_with_conv) | |
| curr_res = curr_res * 2 | |
| self.up.insert(0, up) # prepend to get consistent order | |
| # end | |
| self.norm_out = Normalize(block_in, zq_ch, add_conv=add_conv) | |
| self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) | |
| def forward(self, z, zq): | |
| # assert z.shape[1:] == self.z_shape[1:] | |
| self.last_z_shape = z.shape | |
| # timestep embedding | |
| temb = None | |
| # z to block_in | |
| h = self.conv_in(z) | |
| # middle | |
| h = self.mid.block_1(h, temb, zq) | |
| h = self.mid.attn_1(h, zq) | |
| h = self.mid.block_2(h, temb, zq) | |
| # upsampling | |
| for i_level in reversed(range(self.num_resolutions)): | |
| for i_block in range(self.num_res_blocks + 1): | |
| h = self.up[i_level].block[i_block](h, temb, zq) | |
| if len(self.up[i_level].attn) > 0: | |
| h = self.up[i_level].attn[i_block](h, zq) | |
| if i_level != 0: | |
| h = self.up[i_level].upsample(h) | |
| # end | |
| if self.give_pre_end: | |
| return h | |
| h = self.norm_out(h, zq) | |
| h = nonlinearity(h) | |
| h = self.conv_out(h) | |
| return h | |
| def forward_with_features_output(self, z, zq): | |
| # assert z.shape[1:] == self.z_shape[1:] | |
| self.last_z_shape = z.shape | |
| # timestep embedding | |
| temb = None | |
| output_features = {} | |
| # z to block_in | |
| h = self.conv_in(z) | |
| output_features["conv_in"] = h | |
| # middle | |
| h = self.mid.block_1(h, temb, zq) | |
| output_features["mid_block_1"] = h | |
| h = self.mid.attn_1(h, zq) | |
| output_features["mid_attn_1"] = h | |
| h = self.mid.block_2(h, temb, zq) | |
| output_features["mid_block_2"] = h | |
| # upsampling | |
| for i_level in reversed(range(self.num_resolutions)): | |
| for i_block in range(self.num_res_blocks + 1): | |
| h = self.up[i_level].block[i_block](h, temb, zq) | |
| output_features[f"up_{i_level}_block_{i_block}"] = h | |
| if len(self.up[i_level].attn) > 0: | |
| h = self.up[i_level].attn[i_block](h, zq) | |
| output_features[f"up_{i_level}_attn_{i_block}"] = h | |
| if i_level != 0: | |
| h = self.up[i_level].upsample(h) | |
| output_features[f"up_{i_level}_upsample"] = h | |
| # end | |
| if self.give_pre_end: | |
| return h | |
| h = self.norm_out(h, zq) | |
| output_features["norm_out"] = h | |
| h = nonlinearity(h) | |
| output_features["nonlinearity"] = h | |
| h = self.conv_out(h) | |
| output_features["conv_out"] = h | |
| return h, output_features | |