Upload 11 files
Browse files- .gitattributes +5 -0
- configs.zip +3 -0
- inferences.py +202 -0
- models.zip +3 -0
- test.py +58 -0
- train (1).py +171 -0
- transformer_transducer_epoch_1 +3 -0
- transformer_transducer_epoch_2 +3 -0
- transformer_transducer_epoch_3 +3 -0
- transformer_transducer_epoch_4 +3 -0
- transformer_transducer_epoch_5 +3 -0
- utils.zip +3 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
transformer_transducer_epoch_1 filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
transformer_transducer_epoch_2 filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
transformer_transducer_epoch_3 filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
transformer_transducer_epoch_4 filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
transformer_transducer_epoch_5 filter=lfs diff=lfs merge=lfs -text
|
configs.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71c80cb87c6609745efdb44813307a3773ed31375a02f16f24eff67f91631040
|
| 3 |
+
size 973
|
inferences.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
|
| 7 |
+
|
| 8 |
+
class MultiHeadAtt(nn.Module):
|
| 9 |
+
"""A module that implements the multi-head attention mechanism described in
|
| 10 |
+
https://arxiv.org/abs/1706.03762.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
d_model (int): The dimensionality of the model.
|
| 14 |
+
|
| 15 |
+
h (int): The number of heads to use in the attention mechanism.
|
| 16 |
+
|
| 17 |
+
masking_value (float, optional): The value used for masking. Defaults
|
| 18 |
+
to -1e15.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, d_model: int, h: int, masking_value: int = -1e15) -> None:
|
| 22 |
+
super().__init__()
|
| 23 |
+
self.h = h
|
| 24 |
+
self.dk = d_model // h
|
| 25 |
+
self.d_model = d_model
|
| 26 |
+
self.masking_value = masking_value
|
| 27 |
+
assert d_model % h == 0, ValueError
|
| 28 |
+
self.query_fc = nn.Linear(in_features=d_model, out_features=d_model)
|
| 29 |
+
self.key_fc = nn.Linear(in_features=d_model, out_features=d_model)
|
| 30 |
+
self.value_fc = nn.Linear(in_features=d_model, out_features=d_model)
|
| 31 |
+
self.softmax = nn.Softmax(dim=-1)
|
| 32 |
+
|
| 33 |
+
def _reshape(self, x: Tensor) -> List[Tensor]:
|
| 34 |
+
batch_size, max_len, _ = x.shape
|
| 35 |
+
x = x.view(batch_size, max_len, self.h, self.dk)
|
| 36 |
+
return x
|
| 37 |
+
|
| 38 |
+
def _mask(self, att: Tensor, key_mask: Tensor, query_mask: Tensor) -> Tensor:
|
| 39 |
+
key_max_len = key_mask.shape[-1]
|
| 40 |
+
query_max_len = query_mask.shape[-1]
|
| 41 |
+
key_mask = key_mask.repeat(1, query_max_len)
|
| 42 |
+
key_mask = key_mask.view(-1, query_max_len, key_max_len)
|
| 43 |
+
if query_mask.dim() != key_mask.dim():
|
| 44 |
+
query_mask = query_mask.unsqueeze(dim=-1)
|
| 45 |
+
mask = key_mask & query_mask
|
| 46 |
+
mask = mask.unsqueeze(dim=1)
|
| 47 |
+
return att.masked_fill(~mask, self.masking_value)
|
| 48 |
+
|
| 49 |
+
def perform_attention(
|
| 50 |
+
self,
|
| 51 |
+
key: Tensor,
|
| 52 |
+
query: Tensor,
|
| 53 |
+
value: Tensor,
|
| 54 |
+
key_mask: Optional[Tensor] = None,
|
| 55 |
+
query_mask: Optional[Tensor] = None,
|
| 56 |
+
) -> Tensor:
|
| 57 |
+
"""Performs multi-head attention by computing a weighted sum of the
|
| 58 |
+
values using queries and keys. The weights are computed as a softmax
|
| 59 |
+
over the dot products of the queries and keys for each attention head.
|
| 60 |
+
Optionally, attention can be masked using key and query masks.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
key (Tensor): The key input tensor of shape [B, M, d]
|
| 64 |
+
|
| 65 |
+
query (Tensor): The query of shape [B, M, d]
|
| 66 |
+
|
| 67 |
+
value (Tensor): Teh value tensor of shape [B, M, d]
|
| 68 |
+
|
| 69 |
+
key_mask (Tensor, optional): A boolean tensor of shape
|
| 70 |
+
[B, M] where True indicates that the corresponding key position
|
| 71 |
+
contains data, not padding, and should not be masked
|
| 72 |
+
|
| 73 |
+
query_mask (Tensor, optional): A boolean tensor of shape
|
| 74 |
+
[B, M] where True indicates that the corresponding query position
|
| 75 |
+
contains data, not padding, and should not be masked
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
Tensor: The tensor of shape [B, M, d] resulting from the multi-head
|
| 79 |
+
attention computation.
|
| 80 |
+
"""
|
| 81 |
+
key = self._reshape(key) # B, M, h, dk
|
| 82 |
+
query = self._reshape(query) # B, M, h, dk
|
| 83 |
+
value = self._reshape(value) # B, M, h, dk
|
| 84 |
+
key = key.permute(0, 2, 3, 1) # B, h, dk, M
|
| 85 |
+
query = query.permute(0, 2, 1, 3) # B, h, M, dk
|
| 86 |
+
value = value.permute(0, 2, 1, 3) # B, h, M, dk
|
| 87 |
+
att = torch.matmul(query, key)
|
| 88 |
+
if key_mask is not None and query_mask is not None:
|
| 89 |
+
att = self._mask(att=att, key_mask=key_mask, query_mask=query_mask)
|
| 90 |
+
att = self.softmax(att / self.d_model)
|
| 91 |
+
out = torch.matmul(att, value)
|
| 92 |
+
out = out.permute(0, 2, 1, 3)
|
| 93 |
+
out = out.contiguous()
|
| 94 |
+
out = out.view(out.shape[0], out.shape[1], -1)
|
| 95 |
+
return out
|
| 96 |
+
|
| 97 |
+
def forward(
|
| 98 |
+
self,
|
| 99 |
+
key: Tensor,
|
| 100 |
+
query: Tensor,
|
| 101 |
+
value: Tensor,
|
| 102 |
+
key_mask: Union[Tensor, None] = None,
|
| 103 |
+
query_mask: Union[Tensor, None] = None,
|
| 104 |
+
) -> Tensor:
|
| 105 |
+
"""passes the input to the multi-head attention by computing a weighted
|
| 106 |
+
sum of the values using queries and keys. The weights are computed as a softmax
|
| 107 |
+
over the dot products of the queries and keys for each attention head.
|
| 108 |
+
Optionally, attention can be masked using key and query masks.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
key (Tensor): The key input tensor of shape [B, M, d]
|
| 112 |
+
|
| 113 |
+
query (Tensor): The query of shape [B, M, d]
|
| 114 |
+
|
| 115 |
+
value (Tensor): Teh value tensor of shape [B, M, d]
|
| 116 |
+
|
| 117 |
+
key_mask (Tensor, optional): A boolean tensor of shape
|
| 118 |
+
[B, M] where True indicates that the corresponding key position
|
| 119 |
+
contains data, not padding, and should not be masked
|
| 120 |
+
|
| 121 |
+
query_mask (Tensor, optional): A boolean tensor of shape
|
| 122 |
+
[B, M] where True indicates that the corresponding query position
|
| 123 |
+
contains data, not padding, and should not be masked
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
Tensor: The tensor of shape [B, M, d] resulting from the multi-head
|
| 127 |
+
attention computation.
|
| 128 |
+
"""
|
| 129 |
+
key = self.key_fc(key)
|
| 130 |
+
query = self.query_fc(query)
|
| 131 |
+
value = self.value_fc(value)
|
| 132 |
+
return self.perform_attention(
|
| 133 |
+
key=key, query=query, value=value, key_mask=key_mask, query_mask=query_mask
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class TruncatedSelfAttention(MultiHeadAtt):
|
| 138 |
+
"""Builds the truncated self attention module used
|
| 139 |
+
in https://arxiv.org/abs/1910.12977
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
|
| 143 |
+
d_model (int): The model dimension.
|
| 144 |
+
|
| 145 |
+
h (int): The number of attention heads.
|
| 146 |
+
|
| 147 |
+
left_size (int): The size of the left window that each time step is
|
| 148 |
+
allowed to look at.
|
| 149 |
+
|
| 150 |
+
right_size (int): The size of the right window that each time step is
|
| 151 |
+
allowed to look at.
|
| 152 |
+
|
| 153 |
+
masking_value (float): The attention masking value.
|
| 154 |
+
"""
|
| 155 |
+
|
| 156 |
+
def __init__(
|
| 157 |
+
self,
|
| 158 |
+
d_model: int,
|
| 159 |
+
h: int,
|
| 160 |
+
left_size: int,
|
| 161 |
+
right_size: int,
|
| 162 |
+
masking_value: float = -1e15,
|
| 163 |
+
) -> None:
|
| 164 |
+
super().__init__(d_model=d_model, h=h, masking_value=masking_value)
|
| 165 |
+
self.left_size = left_size
|
| 166 |
+
self.right_size = right_size
|
| 167 |
+
|
| 168 |
+
def get_looking_ahead_mask(self, mask: Tensor) -> Tensor:
|
| 169 |
+
truncated_mask = truncate_attention_mask(mask, self.right_size, self.left_size)
|
| 170 |
+
return truncated_mask
|
| 171 |
+
|
| 172 |
+
def _mask(self, att: Tensor, query_mask: Tensor, *args, **kwargs) -> Tensor:
|
| 173 |
+
query_mask = query_mask.unsqueeze(dim=1)
|
| 174 |
+
return att.masked_fill(~query_mask, self.masking_value)
|
| 175 |
+
|
| 176 |
+
def forward(
|
| 177 |
+
self,
|
| 178 |
+
x: Tensor,
|
| 179 |
+
mask: Union[Tensor, None],
|
| 180 |
+
) -> Tensor:
|
| 181 |
+
"""Applies truncated masked multi-head self attention to the input.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
|
| 185 |
+
x (Tensor): The input tensor of shape [B, M, d].
|
| 186 |
+
|
| 187 |
+
mask (Union[Tensor, None]): The mask tensor of the input of shape
|
| 188 |
+
[B, M] where True indicates that the corresponding input position
|
| 189 |
+
contains data not padding and therefore should not be masked.
|
| 190 |
+
If None, the function will act as a normal multi-head self attention.
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
|
| 194 |
+
Tensor: The attention result tensor of shape [B, M, d].
|
| 195 |
+
|
| 196 |
+
"""
|
| 197 |
+
query_mask = None
|
| 198 |
+
if mask is not None:
|
| 199 |
+
query_mask = self.get_looking_ahead_mask(mask=mask)
|
| 200 |
+
return super().forward(
|
| 201 |
+
key=x, query=x, value=x, key_mask=mask, query_mask=query_mask
|
| 202 |
+
)
|
models.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ca0e6aa7a827f32547647ec81b7d31b01887c7228b295be5babbc16843d5450
|
| 3 |
+
size 29336
|
test.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from utils.dataset import Speech2Text, speech_collate_fn
|
| 3 |
+
from models.model import TransformerTransducer
|
| 4 |
+
|
| 5 |
+
# ==== Load Dataset ====
|
| 6 |
+
train_dataset = Speech2Text(
|
| 7 |
+
json_path="/home/anhkhoa/transformer_transducer/data/train.json",
|
| 8 |
+
vocab_path="/home/anhkhoa/transformer_transducer/data/vocab.json"
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
train_loader = torch.utils.data.DataLoader(
|
| 12 |
+
train_dataset,
|
| 13 |
+
batch_size=2,
|
| 14 |
+
shuffle=True,
|
| 15 |
+
collate_fn = speech_collate_fn
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
# ==== Kiểm tra 1 batch ====
|
| 19 |
+
batch = next(iter(train_loader))
|
| 20 |
+
|
| 21 |
+
# print("✅ Batch loaded!")
|
| 22 |
+
# print("Fbank shape :", batch['fbank'].shape) # [B, T, 80]
|
| 23 |
+
# print("Fbank lengths :", batch['fbank_len']) # [B]
|
| 24 |
+
# print("Text shape :", batch['text'].shape) # [B, U]
|
| 25 |
+
# print("Text lengths :", batch['text_len']) # [B]
|
| 26 |
+
|
| 27 |
+
# ==== Load model (giả sử bạn có config) ====
|
| 28 |
+
model = TransformerTransducer(
|
| 29 |
+
in_features=80,
|
| 30 |
+
n_classes=len(train_dataset.vocab),
|
| 31 |
+
n_layers=4,
|
| 32 |
+
n_dec_layers=2,
|
| 33 |
+
d_model=256,
|
| 34 |
+
ff_size=1024,
|
| 35 |
+
h=4,
|
| 36 |
+
joint_size=512,
|
| 37 |
+
enc_left_size=2,
|
| 38 |
+
enc_right_size=2,
|
| 39 |
+
dec_left_size=1,
|
| 40 |
+
dec_right_size=1,
|
| 41 |
+
p_dropout=0.1
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
def calculate_mask(lengths, max_len):
|
| 45 |
+
"""Tạo mask cho các tensor có chiều dài khác nhau"""
|
| 46 |
+
mask = torch.arange(max_len, device=lengths.device)[None, :] < lengths[:, None]
|
| 47 |
+
return mask
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
with torch.no_grad():
|
| 51 |
+
output, fbank_len, text_len = model(
|
| 52 |
+
speech=batch["fbank"], # [B, T, 80]
|
| 53 |
+
speech_mask=batch["fbank_mask"], # [B, T]
|
| 54 |
+
text=batch["text"], # [B, U]
|
| 55 |
+
text_mask=batch["text_mask"] # [B, U]
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
print("✅ Model output shape:", output.shape) # [B, T, U, vocab_size]
|
train (1).py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from utils.dataset import Speech2Text, speech_collate_fn
|
| 3 |
+
from models.model import TransformerTransducer
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from models.loss import RNNTLoss
|
| 6 |
+
import argparse
|
| 7 |
+
import yaml
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
def train_one_epoch(model, dataloader, optimizer, criterion, device):
|
| 11 |
+
model.train()
|
| 12 |
+
total_loss = 0.0
|
| 13 |
+
|
| 14 |
+
progress_bar = tqdm(dataloader, desc="🔁 Training", leave=False)
|
| 15 |
+
|
| 16 |
+
for batch_idx, batch in enumerate(progress_bar):
|
| 17 |
+
speech = batch["fbank"].to(device)
|
| 18 |
+
text = batch["text"].to(device)
|
| 19 |
+
speech_mask = batch["fbank_mask"].to(device)
|
| 20 |
+
text_mask = batch["text_mask"].to(device)
|
| 21 |
+
fbank_len = batch["fbank_len"].to(device)
|
| 22 |
+
text_len = batch["text_len"].to(device)
|
| 23 |
+
|
| 24 |
+
optimizer.zero_grad()
|
| 25 |
+
|
| 26 |
+
output, _, _ = model(
|
| 27 |
+
speech=speech,
|
| 28 |
+
speech_mask=speech_mask,
|
| 29 |
+
text=text,
|
| 30 |
+
text_mask=text_mask,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# Bỏ <s> ở đầu nếu có
|
| 34 |
+
loss = criterion(output, text, fbank_len, text_len)
|
| 35 |
+
loss.backward()
|
| 36 |
+
optimizer.step()
|
| 37 |
+
|
| 38 |
+
total_loss += loss.item()
|
| 39 |
+
|
| 40 |
+
# === In loss từng batch ===
|
| 41 |
+
progress_bar.set_postfix(batch_loss=loss.item())
|
| 42 |
+
|
| 43 |
+
avg_loss = total_loss / len(dataloader)
|
| 44 |
+
print(f"✅ Average training loss: {avg_loss:.4f}")
|
| 45 |
+
return avg_loss
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
from torchaudio.functional import rnnt_loss
|
| 49 |
+
|
| 50 |
+
def evaluate(model, dataloader, criterion, device):
|
| 51 |
+
model.eval()
|
| 52 |
+
total_loss = 0.0
|
| 53 |
+
|
| 54 |
+
progress_bar = tqdm(dataloader, desc="🧪 Evaluating", leave=False)
|
| 55 |
+
|
| 56 |
+
with torch.no_grad():
|
| 57 |
+
for batch in progress_bar:
|
| 58 |
+
speech = batch["fbank"].to(device)
|
| 59 |
+
text = batch["text"].to(device)
|
| 60 |
+
speech_mask = batch["fbank_mask"].to(device)
|
| 61 |
+
text_mask = batch["text_mask"].to(device)
|
| 62 |
+
fbank_len = batch["fbank_len"].to(device)
|
| 63 |
+
text_len = batch["text_len"].to(device)
|
| 64 |
+
|
| 65 |
+
output, _, _ = model(
|
| 66 |
+
speech=speech,
|
| 67 |
+
speech_mask=speech_mask,
|
| 68 |
+
text=text,
|
| 69 |
+
text_mask=text_mask,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
loss = criterion(output, text, fbank_len, text_len)
|
| 73 |
+
total_loss += loss.item()
|
| 74 |
+
progress_bar.set_postfix(batch_loss=loss.item())
|
| 75 |
+
|
| 76 |
+
avg_loss = total_loss / len(dataloader)
|
| 77 |
+
print(f"✅ Average validation loss: {avg_loss:.4f}")
|
| 78 |
+
return avg_loss
|
| 79 |
+
|
| 80 |
+
def load_config(config_path):
|
| 81 |
+
with open(config_path, 'r') as f:
|
| 82 |
+
return yaml.safe_load(f)
|
| 83 |
+
|
| 84 |
+
def main():
|
| 85 |
+
from torch.optim import Adam
|
| 86 |
+
|
| 87 |
+
parser = argparse.ArgumentParser()
|
| 88 |
+
parser.add_argument("--config", type=str, required=True, help="Path to YAML config file")
|
| 89 |
+
args = parser.parse_args()
|
| 90 |
+
|
| 91 |
+
config = load_config(args.config)
|
| 92 |
+
training_cfg = config['training']
|
| 93 |
+
optimizer_cfg = config['optimizer']
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# ==== Load Dataset ====
|
| 97 |
+
train_dataset = Speech2Text(
|
| 98 |
+
json_path=training_cfg['train_path'],
|
| 99 |
+
vocab_path=training_cfg['vocab_path'],
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
train_loader = torch.utils.data.DataLoader(
|
| 103 |
+
train_dataset,
|
| 104 |
+
batch_size= training_cfg['batch_size'],
|
| 105 |
+
shuffle=True,
|
| 106 |
+
collate_fn = speech_collate_fn
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
dev_dataset = Speech2Text(
|
| 110 |
+
json_path=training_cfg['dev_path'],
|
| 111 |
+
vocab_path=training_cfg['vocab_path']
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
dev_loader = torch.utils.data.DataLoader(
|
| 115 |
+
dev_dataset,
|
| 116 |
+
batch_size= training_cfg['batch_size'],
|
| 117 |
+
shuffle=True,
|
| 118 |
+
collate_fn = speech_collate_fn
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 122 |
+
model = TransformerTransducer(
|
| 123 |
+
in_features=config['model']['in_features'],
|
| 124 |
+
n_classes=len(train_dataset.vocab),
|
| 125 |
+
n_layers=config['model']['n_layers'],
|
| 126 |
+
n_dec_layers=config['model']['n_dec_layers'],
|
| 127 |
+
d_model=config['model']['d_model'],
|
| 128 |
+
ff_size=config['model']['ff_size'],
|
| 129 |
+
h=config['model']['h'],
|
| 130 |
+
joint_size=config['model']['joint_size'],
|
| 131 |
+
enc_left_size=config['model']['enc_left_size'],
|
| 132 |
+
enc_right_size=config['model']['enc_right_size'],
|
| 133 |
+
dec_left_size=config['model']['dec_left_size'],
|
| 134 |
+
dec_right_size=config['model']['dec_right_size'],
|
| 135 |
+
p_dropout=config['model']['p_dropout']
|
| 136 |
+
).to(device)
|
| 137 |
+
|
| 138 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 139 |
+
model.to(device)
|
| 140 |
+
|
| 141 |
+
# === Khởi tạo loss ===
|
| 142 |
+
# Giả sử <blank> = 0, và bạn chưa dùng reduction 'mean' toàn bộ batch
|
| 143 |
+
criterion = RNNTLoss(config["rnnt_loss"]["blank"] , config["rnnt_loss"]["reduction"]) # hoặc "sum" nếu bạn custom average
|
| 144 |
+
|
| 145 |
+
# === Optimizer ===
|
| 146 |
+
optimizer = Adam(model.parameters(), lr=optimizer_cfg['lr'])
|
| 147 |
+
|
| 148 |
+
# === Huấn luyện ===
|
| 149 |
+
num_epochs = config["training"]["epochs"]
|
| 150 |
+
|
| 151 |
+
for epoch in range(1, num_epochs + 1):
|
| 152 |
+
train_loss = train_one_epoch(model, train_loader, optimizer, criterion, device)
|
| 153 |
+
val_loss = evaluate(model, dev_loader, criterion, device)
|
| 154 |
+
|
| 155 |
+
print(f"📘 Epoch {epoch}: Train Loss = {train_loss:.4f}, Val Loss = {val_loss:.4f}")
|
| 156 |
+
# Save model checkpoint
|
| 157 |
+
|
| 158 |
+
model_filename = os.path.join(
|
| 159 |
+
config['training']['save_path'],
|
| 160 |
+
f"transformer_transducer_epoch_{epoch}"
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
torch.save({
|
| 164 |
+
'epoch': epoch,
|
| 165 |
+
'model_state_dict': model.state_dict(),
|
| 166 |
+
'optimizer_state_dict': optimizer.state_dict(),
|
| 167 |
+
}, model_filename)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
if __name__ == "__main__":
|
| 171 |
+
main()
|
transformer_transducer_epoch_1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:514df935f43f03013403f14e094bbd9203fb81193fdf84147df88296e63473ea
|
| 3 |
+
size 760557182
|
transformer_transducer_epoch_2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a52a3614e4adab1b676f1d24a71bd0ca36c1b30fa7d70949f0ab6bc745f0485
|
| 3 |
+
size 760557182
|
transformer_transducer_epoch_3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:afa7a15cc9cd2b973b6b08a50fb762b8a138e633121da27138588643a2773452
|
| 3 |
+
size 760557182
|
transformer_transducer_epoch_4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:287ed73b2caccbfb91618e85d028795b09a7dd72484b89837bc7632c1f55b7d3
|
| 3 |
+
size 760557182
|
transformer_transducer_epoch_5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:52c12064cccc1fcdb944eb26d5f3839989d3dd22357c628781542c19e2c34651
|
| 3 |
+
size 760557182
|
utils.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3f1c016a65820bb3ae71241f84643343cc2e2bfb1f2876bccb13b85dbfbbd79c
|
| 3 |
+
size 5552
|