Create model.py
Browse files- src/model.py +52 -0
src/model.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch as t, torch.nn as nn, torch.nn.functional as F
|
| 2 |
+
def C(n_in, n_out, **kwargs):
|
| 3 |
+
return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
|
| 4 |
+
class Clamp(nn.Module):
|
| 5 |
+
def forward(self, x):
|
| 6 |
+
return t.tanh(x / 3) * 3
|
| 7 |
+
class B(nn.Module):
|
| 8 |
+
def __init__(self, n_in, n_out):
|
| 9 |
+
super().__init__()
|
| 10 |
+
self.conv = nn.Sequential(C(n_in, n_out), nn.ReLU(), C(n_out, n_out), nn.ReLU(), C(n_out, n_out))
|
| 11 |
+
self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
|
| 12 |
+
self.fuse = nn.ReLU()
|
| 13 |
+
def forward(self, x):
|
| 14 |
+
return self.fuse(self.conv(x) + self.skip(x))
|
| 15 |
+
def E(latent_channels=4):
|
| 16 |
+
return nn.Sequential(
|
| 17 |
+
C(3, 64), B(64, 64),
|
| 18 |
+
C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64),
|
| 19 |
+
C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64),
|
| 20 |
+
C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64),
|
| 21 |
+
C(64, latent_channels),
|
| 22 |
+
)
|
| 23 |
+
def D(latent_channels=16):
|
| 24 |
+
return nn.Sequential(
|
| 25 |
+
Clamp(),
|
| 26 |
+
C(latent_channels, 48),nn.ReLU(),B(48, 48), B(48, 48),
|
| 27 |
+
nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48), B(48, 48),
|
| 28 |
+
nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48),
|
| 29 |
+
nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48),
|
| 30 |
+
C(48, 3),
|
| 31 |
+
)
|
| 32 |
+
class M(nn.Module):
|
| 33 |
+
lm, ls = 3, 0.5
|
| 34 |
+
def __init__(s, ep="encoder.pth", dp="decoder.pth", lc=None):
|
| 35 |
+
super().__init__()
|
| 36 |
+
if lc is None: lc = s.glc(str(ep))
|
| 37 |
+
s.e, s.d = E(lc), D(lc)
|
| 38 |
+
def f(sd, mod, pfx):
|
| 39 |
+
f_sd = {k.strip(pfx): v for k, v in sd.items() if k.strip(pfx) in mod.state_dict() and v.size() == mod.state_dict()[k.strip(pfx)].size()}
|
| 40 |
+
mod.load_state_dict(f_sd, strict=False)
|
| 41 |
+
if ep: f(t.load(ep, map_location="cpu", weights_only=True), s.e, "encoder.")
|
| 42 |
+
if dp: f(t.load(dp, map_location="cpu", weights_only=True), s.d, "decoder.")
|
| 43 |
+
s.e.requires_grad_(False)
|
| 44 |
+
s.d.requires_grad_(False)
|
| 45 |
+
def glc(s, ep): return 16 if "taef1" in ep or "taesd3" in ep else 4
|
| 46 |
+
@staticmethod
|
| 47 |
+
def sl(x): return x.div(2 * M.lm).add(M.ls).clamp(0, 1)
|
| 48 |
+
@staticmethod
|
| 49 |
+
def ul(x): return x.sub(M.ls).mul(2 * M.lm)
|
| 50 |
+
def forward(s, x, rl=False):
|
| 51 |
+
l, o = s.e(x), s.d(s.e(x))
|
| 52 |
+
return (o.clamp(0, 1), l) if rl else o.clamp(0, 1)
|