| | """HSIGene diffusion modules - UNet, ResBlock, etc. From openaimodel.""" |
| |
|
| | from abc import abstractmethod |
| | import math |
| |
|
| | import numpy as np |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| |
|
| | from .utils import ( |
| | checkpoint, |
| | conv_nd, |
| | linear, |
| | zero_module, |
| | normalization, |
| | timestep_embedding, |
| | exists, |
| | ) |
| | from .attention import SpatialTransformer |
| |
|
| |
|
| | def avg_pool_nd(dims, *args, **kwargs): |
| | """Create a 1D, 2D, or 3D average pooling module.""" |
| | if dims == 1: |
| | return nn.AvgPool1d(*args, **kwargs) |
| | elif dims == 2: |
| | return nn.AvgPool2d(*args, **kwargs) |
| | elif dims == 3: |
| | return nn.AvgPool3d(*args, **kwargs) |
| | raise ValueError(f"unsupported dimensions: {dims}") |
| |
|
| |
|
| | def convert_module_to_f16(x): |
| | pass |
| |
|
| |
|
| | def convert_module_to_f32(x): |
| | pass |
| |
|
| |
|
| | class TimestepBlock(nn.Module): |
| | """Any module where forward() takes timestep embeddings as a second argument.""" |
| |
|
| | @abstractmethod |
| | def forward(self, x, emb): |
| | """Apply the module to `x` given `emb` timestep embeddings.""" |
| |
|
| |
|
| | class TimestepEmbedSequential(nn.Sequential, TimestepBlock): |
| | """Sequential module that passes timestep embeddings to children that support it.""" |
| |
|
| | def forward(self, x, emb, context=None): |
| | for layer in self: |
| | if isinstance(layer, TimestepBlock): |
| | x = layer(x, emb) |
| | elif isinstance(layer, SpatialTransformer): |
| | x = layer(x, context) |
| | else: |
| | x = layer(x) |
| | return x |
| |
|
| |
|
| | class Upsample(nn.Module): |
| | """Upsampling layer with optional convolution.""" |
| |
|
| | def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): |
| | super().__init__() |
| | self.channels = channels |
| | self.out_channels = out_channels or channels |
| | self.use_conv = use_conv |
| | self.dims = dims |
| | if use_conv: |
| | self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) |
| |
|
| | def forward(self, x): |
| | assert x.shape[1] == self.channels |
| | if self.dims == 3: |
| | x = F.interpolate( |
| | x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" |
| | ) |
| | else: |
| | x = F.interpolate(x, scale_factor=2, mode="nearest") |
| | if self.use_conv: |
| | x = self.conv(x) |
| | return x |
| |
|
| |
|
| | class Downsample(nn.Module): |
| | """Downsampling layer with optional convolution.""" |
| |
|
| | def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): |
| | super().__init__() |
| | self.channels = channels |
| | self.out_channels = out_channels or channels |
| | self.use_conv = use_conv |
| | self.dims = dims |
| | stride = 2 if dims != 3 else (1, 2, 2) |
| | if use_conv: |
| | self.op = conv_nd( |
| | dims, self.channels, self.out_channels, 3, stride=stride, padding=padding |
| | ) |
| | else: |
| | assert self.channels == self.out_channels |
| | self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) |
| |
|
| | def forward(self, x): |
| | assert x.shape[1] == self.channels |
| | return self.op(x) |
| |
|
| |
|
| | class ResBlock(TimestepBlock): |
| | """Residual block with timestep conditioning.""" |
| |
|
| | def __init__( |
| | self, |
| | channels, |
| | emb_channels, |
| | dropout, |
| | out_channels=None, |
| | use_conv=False, |
| | use_scale_shift_norm=False, |
| | dims=2, |
| | use_checkpoint=False, |
| | up=False, |
| | down=False, |
| | ): |
| | super().__init__() |
| | self.channels = channels |
| | self.emb_channels = emb_channels |
| | self.dropout = dropout |
| | self.out_channels = out_channels or channels |
| | self.use_conv = use_conv |
| | self.use_checkpoint = use_checkpoint |
| | self.use_scale_shift_norm = use_scale_shift_norm |
| |
|
| | self.in_layers = nn.Sequential( |
| | normalization(channels), |
| | nn.SiLU(), |
| | conv_nd(dims, channels, self.out_channels, 3, padding=1), |
| | ) |
| |
|
| | self.updown = up or down |
| | if up: |
| | self.h_upd = Upsample(channels, False, dims) |
| | self.x_upd = Upsample(channels, False, dims) |
| | elif down: |
| | self.h_upd = Downsample(channels, False, dims) |
| | self.x_upd = Downsample(channels, False, dims) |
| | else: |
| | self.h_upd = self.x_upd = nn.Identity() |
| |
|
| | self.emb_layers = nn.Sequential( |
| | nn.SiLU(), |
| | linear( |
| | emb_channels, |
| | 2 * self.out_channels if use_scale_shift_norm else self.out_channels, |
| | ), |
| | ) |
| | self.out_layers = nn.Sequential( |
| | normalization(self.out_channels), |
| | nn.SiLU(), |
| | nn.Dropout(p=dropout), |
| | zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)), |
| | ) |
| |
|
| | if self.out_channels == channels: |
| | self.skip_connection = nn.Identity() |
| | elif use_conv: |
| | self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) |
| | else: |
| | self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) |
| |
|
| | def forward(self, x, emb): |
| | return checkpoint(self._forward, (x, emb), self.parameters(), self.use_checkpoint) |
| |
|
| | def _forward(self, x, emb): |
| | if self.updown: |
| | in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] |
| | h = in_rest(x) |
| | h = self.h_upd(h) |
| | x = self.x_upd(x) |
| | h = in_conv(h) |
| | else: |
| | h = self.in_layers(x) |
| | emb_out = self.emb_layers(emb).type(h.dtype) |
| | while len(emb_out.shape) < len(h.shape): |
| | emb_out = emb_out[..., None] |
| | if self.use_scale_shift_norm: |
| | out_norm, out_rest = self.out_layers[0], self.out_layers[1:] |
| | scale, shift = emb_out.chunk(2, dim=1) |
| | h = out_norm(h) * (1 + scale) + shift |
| | h = out_rest(h) |
| | else: |
| | h = h + emb_out |
| | h = self.out_layers(h) |
| | return self.skip_connection(x) + h |
| |
|
| |
|
| | class AttentionBlock(nn.Module): |
| | """Spatial self-attention block.""" |
| |
|
| | def __init__( |
| | self, |
| | channels, |
| | num_heads=1, |
| | num_head_channels=-1, |
| | use_checkpoint=False, |
| | use_new_attention_order=False, |
| | ): |
| | super().__init__() |
| | self.channels = channels |
| | if num_head_channels == -1: |
| | self.num_heads = num_heads |
| | else: |
| | assert channels % num_head_channels == 0 |
| | self.num_heads = channels // num_head_channels |
| | self.use_checkpoint = use_checkpoint |
| | self.norm = normalization(channels) |
| | self.qkv = conv_nd(1, channels, channels * 3, 1) |
| | self.attention = ( |
| | QKVAttention(self.num_heads) |
| | if use_new_attention_order |
| | else QKVAttentionLegacy(self.num_heads) |
| | ) |
| | self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) |
| |
|
| | def forward(self, x): |
| | return checkpoint(self._forward, (x,), self.parameters(), True) |
| |
|
| | def _forward(self, x): |
| | b, c, *spatial = x.shape |
| | x = x.reshape(b, c, -1) |
| | qkv = self.qkv(self.norm(x)) |
| | h = self.attention(qkv) |
| | h = self.proj_out(h) |
| | return (x + h).reshape(b, c, *spatial) |
| |
|
| |
|
| | class QKVAttentionLegacy(nn.Module): |
| | """QKV attention - split heads before split qkv.""" |
| |
|
| | def __init__(self, n_heads): |
| | super().__init__() |
| | self.n_heads = n_heads |
| |
|
| | def forward(self, qkv): |
| | bs, width, length = qkv.shape |
| | assert width % (3 * self.n_heads) == 0 |
| | ch = width // (3 * self.n_heads) |
| | q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) |
| | scale = 1 / math.sqrt(math.sqrt(ch)) |
| | weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) |
| | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) |
| | a = torch.einsum("bts,bcs->bct", weight, v) |
| | return a.reshape(bs, -1, length) |
| |
|
| |
|
| | class QKVAttention(nn.Module): |
| | """QKV attention - split qkv before split heads.""" |
| |
|
| | def __init__(self, n_heads): |
| | super().__init__() |
| | self.n_heads = n_heads |
| |
|
| | def forward(self, qkv): |
| | bs, width, length = qkv.shape |
| | assert width % (3 * self.n_heads) == 0 |
| | ch = width // (3 * self.n_heads) |
| | q, k, v = qkv.chunk(3, dim=1) |
| | scale = 1 / math.sqrt(math.sqrt(ch)) |
| | weight = torch.einsum( |
| | "bct,bcs->bts", |
| | (q * scale).view(bs * self.n_heads, ch, length), |
| | (k * scale).view(bs * self.n_heads, ch, length), |
| | ) |
| | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) |
| | a = torch.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) |
| | return a.reshape(bs, -1, length) |
| |
|
| |
|
| | class UNetModel(nn.Module): |
| | """Full UNet with attention and timestep embedding.""" |
| |
|
| | def __init__( |
| | self, |
| | image_size, |
| | in_channels, |
| | model_channels, |
| | out_channels, |
| | num_res_blocks, |
| | attention_resolutions, |
| | dropout=0, |
| | channel_mult=(1, 2, 4, 8), |
| | conv_resample=True, |
| | dims=2, |
| | num_classes=None, |
| | use_checkpoint=False, |
| | use_fp16=False, |
| | num_heads=-1, |
| | num_head_channels=-1, |
| | num_heads_upsample=-1, |
| | use_scale_shift_norm=False, |
| | resblock_updown=False, |
| | use_new_attention_order=False, |
| | use_spatial_transformer=False, |
| | transformer_depth=1, |
| | context_dim=None, |
| | n_embed=None, |
| | legacy=True, |
| | disable_self_attentions=None, |
| | num_attention_blocks=None, |
| | disable_middle_self_attn=False, |
| | use_linear_in_transformer=False, |
| | ): |
| | super().__init__() |
| | if use_spatial_transformer: |
| | assert context_dim is not None |
| | if context_dim is not None: |
| | assert use_spatial_transformer |
| | if hasattr(context_dim, "__iter__") and not isinstance(context_dim, (list, tuple)): |
| | context_dim = list(context_dim) |
| |
|
| | if num_heads_upsample == -1: |
| | num_heads_upsample = num_heads |
| | if num_heads == -1: |
| | assert num_head_channels != -1 |
| | if num_head_channels == -1: |
| | assert num_heads != -1 |
| |
|
| | self.image_size = image_size |
| | self.in_channels = in_channels |
| | self.model_channels = model_channels |
| | self.out_channels = out_channels |
| | if isinstance(num_res_blocks, int): |
| | self.num_res_blocks = len(channel_mult) * [num_res_blocks] |
| | else: |
| | assert len(num_res_blocks) == len(channel_mult) |
| | self.num_res_blocks = num_res_blocks |
| |
|
| | self.attention_resolutions = attention_resolutions |
| | self.dropout = dropout |
| | self.channel_mult = channel_mult |
| | self.conv_resample = conv_resample |
| | self.num_classes = num_classes |
| | self.use_checkpoint = use_checkpoint |
| | self.dtype = torch.float16 if use_fp16 else torch.float32 |
| | self.num_heads = num_heads |
| | self.num_head_channels = num_head_channels |
| | self.num_heads_upsample = num_heads_upsample |
| | self.predict_codebook_ids = n_embed is not None |
| |
|
| | time_embed_dim = model_channels * 4 |
| | self.time_embed = nn.Sequential( |
| | linear(model_channels, time_embed_dim), |
| | nn.SiLU(), |
| | linear(time_embed_dim, time_embed_dim), |
| | ) |
| |
|
| | if num_classes is not None: |
| | if isinstance(num_classes, int): |
| | self.label_emb = nn.Embedding(num_classes, time_embed_dim) |
| | elif num_classes == "continuous": |
| | self.label_emb = nn.Linear(1, time_embed_dim) |
| | else: |
| | raise ValueError() |
| |
|
| | self.input_blocks = nn.ModuleList( |
| | [TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))] |
| | ) |
| | self._feature_size = model_channels |
| | input_block_chans = [model_channels] |
| | ch = model_channels |
| | ds = 1 |
| |
|
| | for level, mult in enumerate(channel_mult): |
| | for nr in range(self.num_res_blocks[level]): |
| | layers = [ |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=mult * model_channels, |
| | dims=dims, |
| | use_checkpoint=use_checkpoint, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ) |
| | ] |
| | ch = mult * model_channels |
| | if ds in attention_resolutions: |
| | if num_head_channels == -1: |
| | dim_head = ch // num_heads |
| | else: |
| | num_heads_cur = ch // num_head_channels |
| | dim_head = num_head_channels |
| | if legacy: |
| | dim_head = ch // num_heads if use_spatial_transformer else num_head_channels |
| | disabled_sa = ( |
| | disable_self_attentions[level] |
| | if exists(disable_self_attentions) |
| | else False |
| | ) |
| | if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: |
| | attn_block = ( |
| | AttentionBlock( |
| | ch, |
| | use_checkpoint=use_checkpoint, |
| | num_heads=num_heads, |
| | num_head_channels=dim_head, |
| | use_new_attention_order=use_new_attention_order, |
| | ) |
| | if not use_spatial_transformer |
| | else SpatialTransformer( |
| | ch, |
| | num_heads, |
| | dim_head, |
| | depth=transformer_depth, |
| | context_dim=context_dim, |
| | disable_self_attn=disabled_sa, |
| | use_linear=use_linear_in_transformer, |
| | use_checkpoint=use_checkpoint, |
| | ) |
| | ) |
| | layers.append(attn_block) |
| | self.input_blocks.append(TimestepEmbedSequential(*layers)) |
| | self._feature_size += ch |
| | input_block_chans.append(ch) |
| | if level != len(channel_mult) - 1: |
| | out_ch = ch |
| | down_block = ( |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=out_ch, |
| | dims=dims, |
| | use_checkpoint=use_checkpoint, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | down=True, |
| | ) |
| | if resblock_updown |
| | else Downsample(ch, conv_resample, dims=dims, out_channels=out_ch) |
| | ) |
| | self.input_blocks.append(TimestepEmbedSequential(down_block)) |
| | ch = out_ch |
| | input_block_chans.append(ch) |
| | ds *= 2 |
| | self._feature_size += ch |
| |
|
| | if num_head_channels == -1: |
| | dim_head = ch // num_heads |
| | else: |
| | num_heads_cur = ch // num_head_channels |
| | dim_head = num_head_channels |
| | if legacy: |
| | dim_head = ch // num_heads if use_spatial_transformer else num_head_channels |
| | mid_attn = ( |
| | AttentionBlock( |
| | ch, |
| | use_checkpoint=use_checkpoint, |
| | num_heads=num_heads, |
| | num_head_channels=dim_head, |
| | use_new_attention_order=use_new_attention_order, |
| | ) |
| | if not use_spatial_transformer |
| | else SpatialTransformer( |
| | ch, |
| | num_heads, |
| | dim_head, |
| | depth=transformer_depth, |
| | context_dim=context_dim, |
| | disable_self_attn=disable_middle_self_attn, |
| | use_linear=use_linear_in_transformer, |
| | use_checkpoint=use_checkpoint, |
| | ) |
| | ) |
| | self.middle_block = TimestepEmbedSequential( |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | dims=dims, |
| | use_checkpoint=use_checkpoint, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ), |
| | mid_attn, |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | dims=dims, |
| | use_checkpoint=use_checkpoint, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ), |
| | ) |
| | self._feature_size += ch |
| |
|
| | self.output_blocks = nn.ModuleList([]) |
| | for level, mult in list(enumerate(channel_mult))[::-1]: |
| | for i in range(self.num_res_blocks[level] + 1): |
| | ich = input_block_chans.pop() |
| | layers = [ |
| | ResBlock( |
| | ch + ich, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=model_channels * mult, |
| | dims=dims, |
| | use_checkpoint=use_checkpoint, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ) |
| | ] |
| | ch = model_channels * mult |
| | if ds in attention_resolutions: |
| | if num_head_channels == -1: |
| | dim_head = ch // num_heads |
| | else: |
| | num_heads_cur = ch // num_head_channels |
| | dim_head = num_head_channels |
| | if legacy: |
| | dim_head = ( |
| | ch // num_heads if use_spatial_transformer else num_head_channels |
| | ) |
| | disabled_sa = ( |
| | disable_self_attentions[level] |
| | if exists(disable_self_attentions) |
| | else False |
| | ) |
| | if not exists(num_attention_blocks) or i < num_attention_blocks[level]: |
| | attn_block = ( |
| | AttentionBlock( |
| | ch, |
| | use_checkpoint=use_checkpoint, |
| | num_heads=num_heads_upsample, |
| | num_head_channels=dim_head, |
| | use_new_attention_order=use_new_attention_order, |
| | ) |
| | if not use_spatial_transformer |
| | else SpatialTransformer( |
| | ch, |
| | num_heads, |
| | dim_head, |
| | depth=transformer_depth, |
| | context_dim=context_dim, |
| | disable_self_attn=disabled_sa, |
| | use_linear=use_linear_in_transformer, |
| | use_checkpoint=use_checkpoint, |
| | ) |
| | ) |
| | layers.append(attn_block) |
| | if level and i == self.num_res_blocks[level]: |
| | out_ch = ch |
| | up_block = ( |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=out_ch, |
| | dims=dims, |
| | use_checkpoint=use_checkpoint, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | up=True, |
| | ) |
| | if resblock_updown |
| | else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) |
| | ) |
| | layers.append(up_block) |
| | ds //= 2 |
| | self.output_blocks.append(TimestepEmbedSequential(*layers)) |
| | self._feature_size += ch |
| |
|
| | self.out = nn.Sequential( |
| | normalization(ch), |
| | nn.SiLU(), |
| | zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), |
| | ) |
| | if self.predict_codebook_ids: |
| | self.id_predictor = nn.Sequential( |
| | normalization(ch), |
| | conv_nd(dims, model_channels, n_embed, 1), |
| | ) |
| |
|
| | def forward(self, x, timesteps=None, metadata=None, context=None, y=None, **kwargs): |
| | assert (y is not None) == (self.num_classes is not None) |
| | hs = [] |
| | t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) |
| | emb = self.time_embed(t_emb) |
| | if metadata is not None: |
| | if isinstance(metadata, (list, tuple)) and len(metadata) == 1: |
| | metadata = metadata[0] |
| | emb = emb + metadata |
| |
|
| | if self.num_classes is not None: |
| | assert y.shape[0] == x.shape[0] |
| | emb = emb + self.label_emb(y) |
| |
|
| | h = x.type(self.dtype) |
| | for module in self.input_blocks: |
| | h = module(h, emb, context) |
| | hs.append(h) |
| | h = self.middle_block(h, emb, context) |
| | for module in self.output_blocks: |
| | h = torch.cat([h, hs.pop()], dim=1) |
| | h = module(h, emb, context) |
| | h = h.type(x.dtype) |
| | if self.predict_codebook_ids: |
| | return self.id_predictor(h) |
| | return self.out(h) |
| |
|