File size: 4,048 Bytes
20c4b7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9882584
20c4b7d
 
9882584
20c4b7d
9882584
20c4b7d
9882584
20c4b7d
9882584
20c4b7d
 
9882584
20c4b7d
 
 
 
 
 
 
 
 
 
 
 
 
 
9882584
20c4b7d
 
 
9882584
20c4b7d
 
 
 
9882584
 
20c4b7d
 
 
9882584
20c4b7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F

def conv(n_in, n_out, **kwargs):
    return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)

class Clamp(nn.Module):
    def forward(self, x):
        return torch.tanh(x / 3) * 3

class Block(nn.Module):
    def __init__(self, n_in, n_out):
        super().__init__()
        self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out))
        self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
        self.fuse = nn.ReLU()
    def forward(self, x):
        return self.fuse(self.conv(x) + self.skip(x))

def Encoder(latent_channels=4):
    return nn.Sequential(
        conv(3, 64), Block(64, 64),
        conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
        conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
        conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
        conv(64, latent_channels),
    )

def Decoder(latent_channels=16):  # Adjusted to match expected input channels
    return nn.Sequential(
        Clamp(), 
        conv(latent_channels, 48),  # Reduced from 64 to 48 channels
        nn.ReLU(),
        Block(48, 48), Block(48, 48),  # Reduced number of blocks
        nn.Upsample(scale_factor=2), conv(48, 48, bias=False),
        Block(48, 48), Block(48, 48),  # Reduced number of blocks
        nn.Upsample(scale_factor=2), conv(48, 48, bias=False),
        Block(48, 48),  # Further reduction in blocks
        nn.Upsample(scale_factor=2), conv(48, 48, bias=False),
        Block(48, 48), 
        conv(48, 3),  # Final convolution to output channels
    )

        

class Model(nn.Module):
    latent_magnitude = 3
    latent_shift = 0.5

    def __init__(self, encoder_path="encoder.pth", decoder_path="decoder.pth", latent_channels=None):
        super().__init__()
        if latent_channels is None:
            latent_channels = self.guess_latent_channels(str(encoder_path))
        self.encoder = Encoder(latent_channels)
        self.decoder = Decoder(latent_channels)
        
        if encoder_path is not None:
            encoder_state_dict = torch.load(encoder_path, map_location="cpu", weights_only=True)
            filtered_state_dict = {k.strip('encoder.'): v for k, v in encoder_state_dict.items() if k.strip('encoder.') in self.encoder.state_dict() and v.size() == self.encoder.state_dict()[k.strip('encoder.')].size()}
            print(f" num of keys in filtered: {len(filtered_state_dict)} and in decoder: {len(self.encoder.state_dict())}")
            self.encoder.load_state_dict(filtered_state_dict, strict=False)
            
        if decoder_path is not None:
            decoder_state_dict = torch.load(decoder_path, map_location="cpu", weights_only=True)
            filtered_state_dict = {k.strip('decoder.'): v for k, v in decoder_state_dict.items() if k.strip('decoder.') in self.decoder.state_dict() and v.size() == self.decoder.state_dict()[k.strip('decoder.')].size()}
            print(f" num of keys in filtered: {len(filtered_state_dict)} and in decoder: {len(self.decoder.state_dict())}")
            self.decoder.load_state_dict(filtered_state_dict, strict=False)
            
        self.encoder.requires_grad_(False)
        self.decoder.requires_grad_(False)

    def guess_latent_channels(self, encoder_path):
        if "taef1" in encoder_path:return 16
        if "taesd3" in encoder_path:return 16
        return 4
        
    @staticmethod
    def scale_latents(x):
        return x.div(2 * Model.latent_magnitude).add(Model.latent_shift).clamp(0, 1)

    @staticmethod
    def unscale_latents(x):
        return x.sub(Model.latent_shift).mul(2 * Model.latent_magnitude)

    def forward(self, x, return_latent=False):
        latent = self.encoder(x)
        out = self.decoder(latent)
        if return_latent:
            return out.clamp(0, 1), latent
        return out.clamp(0, 1)