nambn0321 commited on
Commit
b68f83f
·
verified ·
1 Parent(s): 5473e0a

Delete models.py

Browse files
Files changed (1) hide show
  1. models.py +0 -88
models.py DELETED
@@ -1,88 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
-
6
- class ResBlock1(nn.Module):
7
- def __init__(self, channels, kernel_size, dilation):
8
- super(ResBlock1, self).__init__()
9
- self.convs1 = nn.ModuleList([
10
- nn.utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
11
- padding=self.get_padding(kernel_size, dilation[0]))),
12
- nn.utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
13
- padding=self.get_padding(kernel_size, dilation[1]))),
14
- nn.utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
15
- padding=self.get_padding(kernel_size, dilation[2])))
16
- ])
17
- self.convs2 = nn.ModuleList([
18
- nn.utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
19
- padding=self.get_padding(kernel_size, 1))),
20
- nn.utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
21
- padding=self.get_padding(kernel_size, 1))),
22
- nn.utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
23
- padding=self.get_padding(kernel_size, 1)))
24
- ])
25
-
26
- def get_padding(self, kernel_size, dilation):
27
- return int((kernel_size * dilation - dilation) / 2)
28
-
29
- def forward(self, x):
30
- for c1, c2 in zip(self.convs1, self.convs2):
31
- xt = F.leaky_relu(x, 0.1)
32
- xt = c1(xt)
33
- xt = F.leaky_relu(xt, 0.1)
34
- xt = c2(xt)
35
- x = xt + x
36
- return x
37
-
38
-
39
- class Generator(nn.Module):
40
- def __init__(self, config):
41
- super(Generator, self).__init__()
42
- self.num_kernels = len(config["resblock_kernel_sizes"])
43
- self.num_upsamples = len(config["upsample_rates"])
44
- self.conv_pre = nn.utils.weight_norm(
45
- nn.Conv1d(config["model_in_dim"], config["upsample_initial_channel"], 7, 1, padding=3)
46
- )
47
-
48
- resblock = ResBlock1
49
- self.ups = nn.ModuleList()
50
- self.resblocks = nn.ModuleList()
51
- for i, (u, k) in enumerate(zip(config["upsample_rates"], config["upsample_kernel_sizes"])):
52
- self.ups.append(
53
- nn.utils.weight_norm(
54
- nn.ConvTranspose1d(
55
- config["upsample_initial_channel"] // (2 ** i),
56
- config["upsample_initial_channel"] // (2 ** (i + 1)),
57
- k, u, padding=(k - u) // 2
58
- )
59
- )
60
- )
61
- for j in range(self.num_kernels):
62
- self.resblocks.append(
63
- resblock(
64
- config["upsample_initial_channel"] // (2 ** (i + 1)),
65
- config["resblock_kernel_sizes"][j],
66
- config["resblock_dilation_sizes"][j]
67
- )
68
- )
69
-
70
- self.conv_post = nn.utils.weight_norm(
71
- nn.Conv1d(config["upsample_initial_channel"] // (2 ** self.num_upsamples), 1, 7, 1, padding=3)
72
- )
73
-
74
- def forward(self, x):
75
- x = self.conv_pre(x)
76
- for i in range(self.num_upsamples):
77
- x = F.leaky_relu(x, 0.1)
78
- x = self.ups[i](x)
79
- xs = None
80
- for j in range(self.num_kernels):
81
- idx = i * self.num_kernels + j
82
- xt = self.resblocks[idx](x)
83
- xs = xt if xs is None else xs + xt
84
- x = xs / self.num_kernels
85
- x = F.leaky_relu(x)
86
- x = self.conv_post(x)
87
- x = torch.tanh(x)
88
- return x