Update model.py
Browse files
model.py
CHANGED
|
@@ -1,49 +1,42 @@
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
-
from .positional_encoding import
|
| 4 |
-
|
| 5 |
|
| 6 |
class RealtimeTTS(nn.Module):
|
| 7 |
-
def __init__(self, config
|
| 8 |
super().__init__()
|
| 9 |
|
| 10 |
-
self.embedding = nn.Embedding(
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
nn.TransformerEncoderLayer(
|
| 14 |
-
d_model=config.d_model,
|
| 15 |
-
nhead=config.n_heads,
|
| 16 |
-
batch_first=True
|
| 17 |
-
),
|
| 18 |
-
num_layers=config.num_encoder_layers
|
| 19 |
)
|
| 20 |
|
| 21 |
-
self.
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
nn.TransformerDecoderLayer(
|
| 25 |
-
d_model=config.d_model,
|
| 26 |
-
nhead=config.n_heads,
|
| 27 |
-
batch_first=True
|
| 28 |
-
),
|
| 29 |
-
num_layers=config.num_decoder_layers
|
| 30 |
)
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
# Text encoding
|
| 39 |
-
memory = self.encoder(x)
|
| 40 |
-
|
| 41 |
-
# Frame positional encoding
|
| 42 |
-
mel_inputs = self.frame_pe(mel_inputs)
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
| 46 |
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
+
from .positional_encoding import PositionalEncoding
|
| 4 |
+
|
| 5 |
|
| 6 |
class RealtimeTTS(nn.Module):
|
| 7 |
+
def __init__(self, config):
|
| 8 |
super().__init__()
|
| 9 |
|
| 10 |
+
self.embedding = nn.Embedding(
|
| 11 |
+
config.vocab_size,
|
| 12 |
+
config.d_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
)
|
| 14 |
|
| 15 |
+
self.positional_encoding = PositionalEncoding(
|
| 16 |
+
config.d_model,
|
| 17 |
+
config.max_seq_len
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
)
|
| 19 |
|
| 20 |
+
encoder_layer = nn.TransformerEncoderLayer(
|
| 21 |
+
d_model=config.d_model,
|
| 22 |
+
nhead=config.nhead,
|
| 23 |
+
dim_feedforward=config.dim_feedforward,
|
| 24 |
+
batch_first=True
|
| 25 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
self.transformer = nn.TransformerEncoder(
|
| 28 |
+
encoder_layer,
|
| 29 |
+
num_layers=config.num_layers
|
| 30 |
+
)
|
| 31 |
|
| 32 |
+
self.output_linear = nn.Linear(
|
| 33 |
+
config.d_model,
|
| 34 |
+
80 # mel bins
|
| 35 |
+
)
|
| 36 |
|
| 37 |
+
def forward(self, tokens, mel_input):
|
| 38 |
+
x = self.embedding(tokens)
|
| 39 |
+
x = self.positional_encoding(x)
|
| 40 |
+
x = self.transformer(x)
|
| 41 |
+
mel = self.output_linear(x)
|
| 42 |
+
return mel
|