File size: 5,833 Bytes
b701455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
"""Residual block components for diffusion models."""
from abc import abstractmethod
from typing import Optional, Any, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.NeuralNetwork import transformer
from src.Attention import Attention
from src.cond import cast
from src.sample import sampling_util

ops = cast.disable_weight_init


class TimestepBlock1(nn.Module):
    """Abstract timestep block."""
    @abstractmethod
    def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
        pass


def forward_timestep_embed1(ts, x, emb, context=None, transformer_options={}, output_shape=None, 
                            time_context=None, num_video_frames=None, image_only_indicator=None):
    """Forward pass for timestep embedding."""
    for layer in ts:
        if isinstance(layer, TimestepBlock1):
            x = layer(x, emb)
        elif isinstance(layer, transformer.SpatialTransformer):
            x = layer(x, context, transformer_options)
            if "transformer_index" in transformer_options:
                transformer_options["transformer_index"] += 1
        elif isinstance(layer, Upsample1):
            x = layer(x, output_shape=output_shape)
        else:
            x = layer(x)
    return x


class Upsample1(nn.Module):
    """Upsample layer with optional conv."""
    def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, 
                 dtype=None, device=None, operations=ops):
        super().__init__()
        self.channels = channels
        self.out_channels = out_channels or channels
        self.use_conv = use_conv
        if use_conv:
            self.conv = operations.conv_nd(dims, channels, self.out_channels, 3, 
                                           padding=padding, dtype=dtype, device=device)

    def forward(self, x, output_shape=None):
        shape = [x.shape[2] * 2, x.shape[3] * 2] if output_shape is None else [output_shape[2], output_shape[3]]
        x = F.interpolate(x, size=shape, mode="nearest")
        return self.conv(x) if self.use_conv else x


class Downsample1(nn.Module):
    """Downsample layer."""
    def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1,
                 dtype=None, device=None, operations=ops):
        super().__init__()
        self.channels = channels
        self.out_channels = out_channels or channels
        stride = 2 if dims != 3 else (1, 2, 2)
        self.op = operations.conv_nd(dims, channels, self.out_channels, 3, stride=stride, 
                                     padding=padding, dtype=dtype, device=device)

    def forward(self, x):
        return self.op(x)


class ResBlock1(TimestepBlock1):
    """Residual block with timestep embedding."""
    def __init__(self, channels, emb_channels, dropout, out_channels=None, use_conv=False,
                 use_scale_shift_norm=False, dims=2, use_checkpoint=False, up=False, down=False,
                 kernel_size=3, exchange_temb_dims=False, skip_t_emb=False, 
                 dtype=None, device=None, operations=ops):
        super().__init__()
        self.out_channels = out_channels or channels
        self.use_checkpoint = use_checkpoint
        self.skip_t_emb = skip_t_emb
        padding = kernel_size // 2

        self.in_layers = nn.Sequential(
            operations.GroupNorm(32, channels, dtype=dtype, device=device),
            nn.SiLU(),
            operations.conv_nd(dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device))
        
        self.emb_layers = nn.Sequential(
            nn.SiLU(),
            operations.Linear(emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels, 
                            dtype=dtype, device=device))
        
        self.out_layers = nn.Sequential(
            operations.GroupNorm(32, self.out_channels, dtype=dtype, device=device),
            nn.SiLU(),
            nn.Dropout(p=dropout),
            operations.conv_nd(dims, self.out_channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device))
        
        self.skip_connection = (nn.Identity() if self.out_channels == channels 
                               else operations.conv_nd(dims, channels, self.out_channels, 1, dtype=dtype, device=device))

    def forward(self, x, emb):
        return sampling_util.checkpoint(self._forward, (x, emb), self.parameters(), self.use_checkpoint)

    def _forward(self, x, emb):
        h = self.in_layers(x)
        if not self.skip_t_emb:
            emb_out = self.emb_layers(emb).type(h.dtype)
            while len(emb_out.shape) < len(h.shape):
                emb_out = emb_out[..., None]
            h = h + emb_out
        return self.skip_connection(x) + self.out_layers(h)


class ResnetBlock(nn.Module):
    """VAE-style ResNet block."""
    def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512):
        super().__init__()
        out_channels = out_channels or in_channels
        self.in_channels, self.out_channels = in_channels, out_channels

        self.norm1 = Attention.Normalize(in_channels)
        self.conv1 = ops.Conv2d(in_channels, out_channels, 3, 1, 1)
        self.norm2 = Attention.Normalize(out_channels)
        self.dropout = nn.Dropout(dropout, inplace=True)
        self.conv2 = ops.Conv2d(out_channels, out_channels, 3, 1, 1)
        self.nin_shortcut = ops.Conv2d(in_channels, out_channels, 1) if in_channels != out_channels else None
        self.swish = nn.SiLU(inplace=True)

    def forward(self, x, temb):
        h = self.swish(self.norm1(x))
        h = self.conv1(h)
        h = self.dropout(self.swish(self.norm2(h)))
        h = self.conv2(h)
        return (self.nin_shortcut(x) if self.nin_shortcut else x) + h