import torch import torch as th import torch.nn as nn import torch.nn.functional as F def conv(n_in, n_out, **kwargs): return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs) class Clamp(nn.Module): def forward(self, x): return torch.tanh(x / 3) * 3 class Block(nn.Module): def __init__(self, n_in, n_out): super().__init__() self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out)) self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity() self.fuse = nn.ReLU() def forward(self, x): return self.fuse(self.conv(x) + self.skip(x)) def Encoder(latent_channels=4): return nn.Sequential( conv(3, 64), Block(64, 64), conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), conv(64, latent_channels), ) def Decoder(latent_channels=4): return nn.Sequential( Clamp(), conv(latent_channels, 64), nn.ReLU(), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),nn.ReLU(), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),nn.ReLU(), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),nn.ReLU(), Block(64, 64), conv(64, 3), ) class Model(nn.Module): latent_magnitude = 3 latent_shift = 0.5 def __init__(self, encoder_path="encoder.pth", decoder_path="decoder.pth", latent_channels=None): super().__init__() if latent_channels is None: latent_channels = self.guess_latent_channels(str(encoder_path)) self.encoder = Encoder(latent_channels) self.decoder = Decoder(latent_channels) if encoder_path is not None: encoder_state_dict = torch.load(encoder_path, map_location="cpu", weights_only=True) filtered_state_dict = {k.strip('encoder.'): v for k, v in encoder_state_dict.items() if k.strip('encoder.') in self.encoder.state_dict() and v.size() == self.encoder.state_dict()[k.strip('encoder.')].size()} print(f" num of keys in filtered: {len(filtered_state_dict)} and in decoder: {len(self.encoder.state_dict())}") self.encoder.load_state_dict(filtered_state_dict, strict=False) if decoder_path is not None: decoder_state_dict = torch.load(decoder_path, map_location="cpu", weights_only=True) filtered_state_dict = {k.strip('decoder.'): v for k, v in decoder_state_dict.items() if k.strip('decoder.') in self.decoder.state_dict() and v.size() == self.decoder.state_dict()[k.strip('decoder.')].size()} print(f" num of keys in filtered: {len(filtered_state_dict)} and in decoder: {len(self.decoder.state_dict())}") self.decoder.load_state_dict(filtered_state_dict, strict=False) self.encoder.requires_grad_(False) self.decoder.requires_grad_(False) def guess_latent_channels(self, encoder_path): if "taef1" in encoder_path:return 16 if "taesd3" in encoder_path:return 16 return 4 @staticmethod def scale_latents(x): return x.div(2 * Model.latent_magnitude).add(Model.latent_shift).clamp(0, 1) @staticmethod def unscale_latents(x): return x.sub(Model.latent_shift).mul(2 * Model.latent_magnitude) def forward(self, x, return_latent=False): latent = self.encoder(x) out = self.decoder(latent) if return_latent: return out.clamp(0, 1), latent return out.clamp(0, 1)