|
|
import torch |
|
|
from utils.dataset import Speech2Text, speech_collate_fn |
|
|
from models.model import TransformerTransducer |
|
|
|
|
|
|
|
|
train_dataset = Speech2Text( |
|
|
json_path="/home/anhkhoa/transformer_transducer/data/train.json", |
|
|
vocab_path="/home/anhkhoa/transformer_transducer/data/vocab.json" |
|
|
) |
|
|
|
|
|
train_loader = torch.utils.data.DataLoader( |
|
|
train_dataset, |
|
|
batch_size=2, |
|
|
shuffle=True, |
|
|
collate_fn = speech_collate_fn |
|
|
) |
|
|
|
|
|
|
|
|
batch = next(iter(train_loader)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = TransformerTransducer( |
|
|
in_features=80, |
|
|
n_classes=len(train_dataset.vocab), |
|
|
n_layers=4, |
|
|
n_dec_layers=2, |
|
|
d_model=256, |
|
|
ff_size=1024, |
|
|
h=4, |
|
|
joint_size=512, |
|
|
enc_left_size=2, |
|
|
enc_right_size=2, |
|
|
dec_left_size=1, |
|
|
dec_right_size=1, |
|
|
p_dropout=0.1 |
|
|
) |
|
|
|
|
|
def calculate_mask(lengths, max_len): |
|
|
"""Tạo mask cho các tensor có chiều dài khác nhau""" |
|
|
mask = torch.arange(max_len, device=lengths.device)[None, :] < lengths[:, None] |
|
|
return mask |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
output, fbank_len, text_len = model( |
|
|
speech=batch["fbank"], |
|
|
speech_mask=batch["fbank_mask"], |
|
|
text=batch["text"], |
|
|
text_mask=batch["text_mask"] |
|
|
) |
|
|
|
|
|
print("✅ Model output shape:", output.shape) |
|
|
|