File size: 2,908 Bytes
31e19a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c21b884
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31e19a7
 
 
 
 
 
 
 
c21b884
 
 
31e19a7
c21b884
 
 
31e19a7
 
c21b884
31e19a7
c21b884
 
 
 
31e19a7
 
c21b884
 
31e19a7
c21b884
31e19a7
c21b884
31e19a7
 
c21b884
31e19a7
 
 
 
c21b884
31e19a7
c21b884
31e19a7
 
 
 
 
 
0b3883b
31e19a7
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import torch
import torch.nn as nn
 
class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super().__init__()
        self.block = nn.Sequential(
            nn.ReflectionPad2d(1),
            nn.Conv2d(channels, channels, 3),
            nn.InstanceNorm2d(channels),
            nn.ReLU(inplace=True),
            nn.ReflectionPad2d(1),
            nn.Conv2d(channels, channels, 3),
            nn.InstanceNorm2d(channels),
        )
    def forward(self, x):
        return x + self.block(x)

class SelfAttention(nn.Module):
    def __init__(self, channels):
        super().__init__()
        self.query = nn.Conv2d(channels, channels // 8, 1)
        self.key   = nn.Conv2d(channels, channels // 8, 1)
        self.value = nn.Conv2d(channels, channels, 1)
        self.gamma = nn.Parameter(torch.zeros(1))

    def forward(self, x):
        B, C, H, W = x.shape
        q = self.query(x).flatten(2)
        k = self.key(x).flatten(2)
        v = self.value(x).flatten(2)
        attn = torch.softmax(torch.bmm(q.transpose(1,2), k), dim=-1)
        out  = torch.bmm(v, attn.transpose(1,2)).view(B, C, H, W)
        return x + self.gamma * out

class ResNetGenerator(nn.Module):
    def __init__(self, in_channels=3, out_channels=3, n_filters=64, n_res_blocks=9):
        super().__init__()
        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(in_channels, n_filters, 7),
            nn.InstanceNorm2d(n_filters),
            nn.ReLU(inplace=True),

            nn.Conv2d(n_filters, n_filters*2, 3, stride=2, padding=1),
            nn.InstanceNorm2d(n_filters*2),
            nn.ReLU(inplace=True),

            nn.Conv2d(n_filters*2, n_filters*4, 3, stride=2, padding=1),
            nn.InstanceNorm2d(n_filters*4),
            nn.ReLU(inplace=True),
        ]

        for _ in range(n_res_blocks):
            model.append(ResidualBlock(n_filters*4))

        model.append(SelfAttention(n_filters*4))

        model += [
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            nn.Conv2d(n_filters*4, n_filters*2, 3, padding=1),
            nn.InstanceNorm2d(n_filters*2),
            nn.ReLU(inplace=True),

            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            nn.Conv2d(n_filters*2, n_filters, 3, padding=1),
            nn.InstanceNorm2d(n_filters),
            nn.ReLU(inplace=True),

            nn.ReflectionPad2d(3),
            nn.Conv2d(n_filters, out_channels, 7),
            nn.Tanh()
        ]

        self.model = nn.Sequential(*model)

    def forward(self, x):
        return self.model(x)
 
@torch.no_grad()
def load_generator(path, device="cpu"):
    gen = ResNetGenerator()
    state_dict = torch.load(path, map_location="cpu")
    state_dict = {k: v.float() for k, v in state_dict.items()}
    gen.load_state_dict(state_dict)
    gen.to(device).eval()
    return gen