repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
speechbrain | speechbrain-main/recipes/LJSpeech/TTS/tacotron2/train.py | # -*- coding: utf-8 -*-
"""
Recipe for training the Tacotron Text-To-Speech model, an end-to-end
neural text-to-speech (TTS) system
To run this recipe, do the following:
# python train.py --device=cuda:0 --max_grad_norm=1.0 --data_folder=/your_folder/LJSpeech-1.1 hparams/train.yaml
to infer simply load saved model and do
savemodel.infer(text_Sequence,len(textsequence))
were text_Sequence is the ouput of the text_to_sequence function from
textToSequence.py (from textToSequence import text_to_sequence)
Authors
* Georges Abous-Rjeili 2021
* Artem Ploujnikov 2021
* Yingzhi Wang 2022
"""
import torch
import speechbrain as sb
import sys
import logging
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.text_to_sequence import text_to_sequence
from speechbrain.utils.data_utils import scalarize
logger = logging.getLogger(__name__)
class Tacotron2Brain(sb.Brain):
"""The Brain implementation for Tacotron2"""
def on_fit_start(self):
"""Gets called at the beginning of ``fit()``, on multiple processes
if ``distributed_count > 0`` and backend is ddp and initializes statistics"""
self.hparams.progress_sample_logger.reset()
self.last_epoch = 0
self.last_batch = None
self.last_loss_stats = {}
return super().on_fit_start()
def compute_forward(self, batch, stage):
"""Computes the forward pass
Arguments
---------
batch: str
a single batch
stage: speechbrain.Stage
the training stage
Returns
-------
the model output
"""
effective_batch = self.batch_to_device(batch)
inputs, y, num_items, _, _ = effective_batch
_, input_lengths, _, _, _ = inputs
max_input_length = input_lengths.max().item()
return self.modules.model(inputs, alignments_dim=max_input_length)
def fit_batch(self, batch):
"""Fits a single batch and applies annealing
Arguments
---------
batch: tuple
a training batch
Returns
-------
loss: torch.Tensor
detached loss
"""
result = super().fit_batch(batch)
self.hparams.lr_annealing(self.optimizer)
return result
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs.
Arguments
---------
predictions : torch.Tensor
The model generated spectrograms and other metrics from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
effective_batch = self.batch_to_device(batch)
# Hold on to the batch for the inference sample. This is needed because
# the infernece sample is run from on_stage_end only, where
# batch information is not available
self.last_batch = effective_batch
# Hold on to a sample (for logging)
self._remember_sample(effective_batch, predictions)
# Compute the loss
loss = self._compute_loss(predictions, effective_batch, stage)
return loss
def _compute_loss(self, predictions, batch, stage):
"""Computes the value of the loss function and updates stats
Arguments
---------
predictions: tuple
model predictions
targets: tuple
ground truth data
Returns
-------
loss: torch.Tensor
the loss value
"""
inputs, targets, num_items, labels, wavs = batch
text_padded, input_lengths, _, max_len, output_lengths = inputs
loss_stats = self.hparams.criterion(
predictions, targets, input_lengths, output_lengths, self.last_epoch
)
self.last_loss_stats[stage] = scalarize(loss_stats)
return loss_stats.loss
def _remember_sample(self, batch, predictions):
"""Remembers samples of spectrograms and the batch for logging purposes
Arguments
---------
batch: tuple
a training batch
predictions: tuple
predictions (raw output of the Tacotron model)
"""
inputs, targets, num_items, labels, wavs = batch
text_padded, input_lengths, _, max_len, output_lengths = inputs
mel_target, _ = targets
mel_out, mel_out_postnet, gate_out, alignments = predictions
alignments_max = (
alignments[0]
.max(dim=-1)
.values.max(dim=-1)
.values.unsqueeze(-1)
.unsqueeze(-1)
)
alignments_output = alignments[0].T.flip(dims=(1,)) / alignments_max
self.hparams.progress_sample_logger.remember(
target=self._get_spectrogram_sample(mel_target),
output=self._get_spectrogram_sample(mel_out),
output_postnet=self._get_spectrogram_sample(mel_out_postnet),
alignments=alignments_output,
raw_batch=self.hparams.progress_sample_logger.get_batch_sample(
{
"text_padded": text_padded,
"input_lengths": input_lengths,
"mel_target": mel_target,
"mel_out": mel_out,
"mel_out_postnet": mel_out_postnet,
"max_len": max_len,
"output_lengths": output_lengths,
"gate_out": gate_out,
"alignments": alignments,
"labels": labels,
"wavs": wavs,
}
),
)
def batch_to_device(self, batch):
"""Transfers the batch to the target device
Arguments
---------
batch: tuple
the batch to use
Returns
-------
batch: tiuple
the batch on the correct device
"""
(
text_padded,
input_lengths,
mel_padded,
gate_padded,
output_lengths,
len_x,
labels,
wavs,
) = batch
text_padded = text_padded.to(self.device, non_blocking=True).long()
input_lengths = input_lengths.to(self.device, non_blocking=True).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = mel_padded.to(self.device, non_blocking=True).float()
gate_padded = gate_padded.to(self.device, non_blocking=True).float()
output_lengths = output_lengths.to(
self.device, non_blocking=True
).long()
x = (text_padded, input_lengths, mel_padded, max_len, output_lengths)
y = (mel_padded, gate_padded)
len_x = torch.sum(output_lengths)
return (x, y, len_x, labels, wavs)
def _get_spectrogram_sample(self, raw):
"""Converts a raw spectrogram to one that can be saved as an image
sample = sqrt(exp(raw))
Arguments
---------
raw: torch.Tensor
the raw spectrogram (as used in the model)
Returns
-------
sample: torch.Tensor
the spectrogram, for image saving purposes
"""
sample = raw[0]
return torch.sqrt(torch.exp(sample))
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
# At the end of validation, we can write
if stage == sb.Stage.VALID:
# Update learning rate
lr = self.optimizer.param_groups[-1]["lr"]
self.last_epoch = epoch
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats( # 1#2#
stats_meta={"Epoch": epoch, "lr": lr},
train_stats=self.last_loss_stats[sb.Stage.TRAIN],
valid_stats=self.last_loss_stats[sb.Stage.VALID],
)
# Save the current checkpoint and delete previous checkpoints.
epoch_metadata = {
**{"epoch": epoch},
**self.last_loss_stats[sb.Stage.VALID],
}
self.checkpointer.save_and_keep_only(
meta=epoch_metadata,
min_keys=["loss"],
ckpt_predicate=(
lambda ckpt: (
ckpt.meta["epoch"]
% self.hparams.keep_checkpoint_interval
!= 0
)
)
if self.hparams.keep_checkpoint_interval is not None
else None,
)
output_progress_sample = (
self.hparams.progress_samples
and epoch % self.hparams.progress_samples_interval == 0
)
if output_progress_sample:
self.run_inference_sample()
self.hparams.progress_sample_logger.save(epoch)
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=self.last_loss_stats[sb.Stage.TEST],
)
if self.hparams.progress_samples:
self.run_inference_sample()
self.hparams.progress_sample_logger.save("test")
def run_inference_sample(self):
"""Produces a sample in inference mode. This is called when producing
samples and can be useful because"""
if self.last_batch is None:
return
inputs, _, _, _, _ = self.last_batch
text_padded, input_lengths, _, _, _ = inputs
mel_out, _, _ = self.hparams.model.infer(
text_padded[:1], input_lengths[:1]
)
self.hparams.progress_sample_logger.remember(
inference_mel_out=self._get_spectrogram_sample(mel_out)
)
def dataio_prepare(hparams):
# Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "label")
@sb.utils.data_pipeline.provides("mel_text_pair")
def audio_pipeline(wav, label):
text_seq = torch.IntTensor(
text_to_sequence(label, hparams["text_cleaners"])
)
audio = sb.dataio.dataio.read_audio(wav)
mel = hparams["mel_spectogram"](audio=audio)
len_text = len(text_seq)
return text_seq, mel, len_text
datasets = {}
data_info = {
"train": hparams["train_json"],
"valid": hparams["valid_json"],
"test": hparams["test_json"],
}
for dataset in hparams["splits"]:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline],
output_keys=["mel_text_pair", "wav", "label"],
)
return datasets
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
sys.path.append("../")
from ljspeech_prepare import prepare_ljspeech
sb.utils.distributed.run_on_main(
prepare_ljspeech,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"splits": hparams["splits"],
"split_ratio": hparams["split_ratio"],
"seed": hparams["seed"],
"skip_prep": hparams["skip_prep"],
},
)
datasets = dataio_prepare(hparams)
# Brain class initialization
tacotron2_brain = Tacotron2Brain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Training
tacotron2_brain.fit(
tacotron2_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
if "test" in datasets:
tacotron2_brain.evaluate(
datasets["test"],
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 13,447 | 32.53616 | 113 | py |
speechbrain | speechbrain-main/recipes/LJSpeech/TTS/vocoder/hifi_gan/train.py | #!/usr/bin/env python3
"""Recipe for training a hifi-gan vocoder.
For more details about hifi-gan: https://arxiv.org/pdf/2010.05646.pdf
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/LJspeech
Authors
* Duret Jarod 2021
* Yingzhi WANG 2022
"""
import sys
import torch
import copy
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.data_utils import scalarize
import torchaudio
import os
class HifiGanBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""The forward function, generates synthesized waveforms,
calculates the scores and the features of the discriminator
for synthesized waveforms and real waveforms.
Arguments
---------
batch: str
a single batch
stage: speechbrain.Stage
the training stage
"""
batch = batch.to(self.device)
x, _ = batch.mel
y, _ = batch.sig
# generate sythesized waveforms
y_g_hat = self.modules.generator(x)[:, :, : y.size(2)]
# get scores and features from discriminator for real and synthesized waveforms
scores_fake, feats_fake = self.modules.discriminator(y_g_hat.detach())
scores_real, feats_real = self.modules.discriminator(y)
return (y_g_hat, scores_fake, feats_fake, scores_real, feats_real)
def compute_objectives(self, predictions, batch, stage):
"""Computes and combines generator and discriminator losses
"""
batch = batch.to(self.device)
x, _ = batch.mel
y, _ = batch.sig
# Hold on to the batch for the inference sample. This is needed because
# the infernece sample is run from on_stage_end only, where
# batch information is not available
self.last_batch = (x, y)
# Hold on to a sample (for logging)
self._remember_sample(self.last_batch, predictions)
y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions
loss_g = self.hparams.generator_loss(
y_hat, y, scores_fake, feats_fake, feats_real
)
loss_d = self.hparams.discriminator_loss(scores_fake, scores_real)
loss = {**loss_g, **loss_d}
self.last_loss_stats[stage] = scalarize(loss)
return loss
def fit_batch(self, batch):
"""Train discriminator and generator adversarially
"""
batch = batch.to(self.device)
y, _ = batch.sig
outputs = self.compute_forward(batch, sb.core.Stage.TRAIN)
(y_g_hat, scores_fake, feats_fake, scores_real, feats_real) = outputs
# calculate discriminator loss with the latest updated generator
loss_d = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[
"D_loss"
]
# First train the discriminator
self.optimizer_d.zero_grad()
loss_d.backward()
self.optimizer_d.step()
# calculate generator loss with the latest updated discriminator
scores_fake, feats_fake = self.modules.discriminator(y_g_hat)
scores_real, feats_real = self.modules.discriminator(y)
outputs = (y_g_hat, scores_fake, feats_fake, scores_real, feats_real)
loss_g = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[
"G_loss"
]
# Then train the generator
self.optimizer_g.zero_grad()
loss_g.backward()
self.optimizer_g.step()
return loss_g.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Evaluate one batch
"""
out = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(out, batch, stage=stage)
loss_g = loss["G_loss"]
return loss_g.detach().cpu()
def on_fit_start(self):
"""Gets called at the beginning of ``fit()``, on multiple processes
if ``distributed_count > 0`` and backend is ddp and initializes statistics
"""
self.last_epoch = 0
self.last_batch = None
self.last_loss_stats = {}
return super().on_fit_start()
def init_optimizers(self):
"""Called during ``on_fit_start()``, initialize optimizers
after parameters are fully configured (e.g. DDP, jit).
"""
if self.opt_class is not None:
(
opt_g_class,
opt_d_class,
sch_g_class,
sch_d_class,
) = self.opt_class
self.optimizer_g = opt_g_class(self.modules.generator.parameters())
self.optimizer_d = opt_d_class(
self.modules.discriminator.parameters()
)
self.scheduler_g = sch_g_class(self.optimizer_g)
self.scheduler_d = sch_d_class(self.optimizer_d)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"optimizer_g", self.optimizer_g
)
self.checkpointer.add_recoverable(
"optimizer_d", self.optimizer_d
)
self.checkpointer.add_recoverable(
"scheduler_g", self.scheduler_d
)
self.checkpointer.add_recoverable(
"scheduler_d", self.scheduler_d
)
def zero_grad(self, set_to_none=False):
if self.opt_class is not None:
self.optimizer_g.zero_grad(set_to_none)
self.optimizer_d.zero_grad(set_to_none)
def _remember_sample(self, batch, predictions):
"""Remembers samples of spectrograms and the batch for logging purposes
Arguments
---------
batch: tuple
a training batch
predictions: tuple
predictions (raw output of the Tacotron model)
"""
mel, sig = batch
y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a stage (TRAIN, VALID, Or TEST)
"""
if stage == sb.Stage.VALID:
# Update learning rate
self.scheduler_g.step()
self.scheduler_d.step()
lr_g = self.optimizer_g.param_groups[-1]["lr"]
lr_d = self.optimizer_d.param_groups[-1]["lr"]
self.hparams.train_logger.log_stats( # 1#2#
stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d},
train_stats=self.last_loss_stats[sb.Stage.TRAIN],
valid_stats=self.last_loss_stats[sb.Stage.VALID],
)
# The tensorboard_logger writes a summary to stdout and to the logfile.
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_stats(
stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d},
train_stats=self.last_loss_stats[sb.Stage.TRAIN],
valid_stats=self.last_loss_stats[sb.Stage.VALID],
)
# Save the current checkpoint and delete previous checkpoints.
epoch_metadata = {
**{"epoch": epoch},
**self.last_loss_stats[sb.Stage.VALID],
}
self.checkpointer.save_and_keep_only(
meta=epoch_metadata,
end_of_epoch=True,
min_keys=["loss"],
ckpt_predicate=(
lambda ckpt: (
ckpt.meta["epoch"]
% self.hparams.keep_checkpoint_interval
!= 0
)
)
if self.hparams.keep_checkpoint_interval is not None
else None,
)
self.run_inference_sample("Valid")
# We also write statistics about test data to stdout and to the TensorboardLogger.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats( # 1#2#
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=self.last_loss_stats[sb.Stage.TEST],
)
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=self.last_loss_stats[sb.Stage.TEST],
)
self.run_inference_sample("Test")
def run_inference_sample(self, name):
"""Produces a sample in inference mode. This is called when producing
samples.
"""
with torch.no_grad():
if self.last_batch is None:
return
x, y = self.last_batch
# Preparing model for inference by removing weight norm
inference_generator = copy.deepcopy(self.hparams.generator)
inference_generator.remove_weight_norm()
sig_out = inference_generator.inference(x)
spec_out = self.hparams.mel_spectogram(
audio=sig_out.squeeze(0).cpu()
)
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_audio(
f"{name}/audio_target", y.squeeze(0), self.hparams.sample_rate
)
self.tensorboard_logger.log_audio(
f"{name}/audio_pred",
sig_out.squeeze(0),
self.hparams.sample_rate,
)
self.tensorboard_logger.log_figure(f"{name}/mel_target", x)
self.tensorboard_logger.log_figure(f"{name}/mel_pred", spec_out)
else:
# folder name is the current epoch for validation and "test" for test
folder = (
self.hparams.epoch_counter.current
if name == "Valid"
else "test"
)
self.save_audio("target", y.squeeze(0), folder)
self.save_audio("synthesized", sig_out.squeeze(0), folder)
def save_audio(self, name, data, epoch):
"""Saves a single wav
Arguments
---------
name: str
the name of the saved audio
data: torch.Tensor
the wave data to save
epoch: int or str
the epoch number (used in file path calculations)
or "test" for test stage
"""
target_path = os.path.join(
self.hparams.progress_sample_path, str(epoch)
)
if not os.path.exists(target_path):
os.makedirs(target_path)
file_name = f"{name}.wav"
effective_file_name = os.path.join(target_path, file_name)
torchaudio.save(effective_file_name, data.cpu(), 22050)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
"""
segment_size = hparams["segment_size"]
# Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "segment")
@sb.utils.data_pipeline.provides("mel", "sig")
def audio_pipeline(wav, segment):
audio = sb.dataio.dataio.read_audio(wav)
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if segment:
if audio.size(1) >= segment_size:
max_audio_start = audio.size(1) - segment_size
audio_start = torch.randint(0, max_audio_start, (1,))
audio = audio[:, audio_start : audio_start + segment_size]
else:
audio = torch.nn.functional.pad(
audio, (0, segment_size - audio.size(1)), "constant"
)
mel = hparams["mel_spectogram"](audio=audio.squeeze(0))
return mel, audio
datasets = {}
data_info = {
"train": hparams["train_json"],
"valid": hparams["valid_json"],
"test": hparams["test_json"],
}
for dataset in hparams["splits"]:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline],
output_keys=["id", "mel", "sig"],
)
return datasets
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
sys.path.append("../../")
from ljspeech_prepare import prepare_ljspeech
sb.utils.distributed.run_on_main(
prepare_ljspeech,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"splits": hparams["splits"],
"split_ratio": hparams["split_ratio"],
"seed": hparams["seed"],
"skip_prep": hparams["skip_prep"],
},
)
datasets = dataio_prepare(hparams)
# Brain class initialization
hifi_gan_brain = HifiGanBrain(
modules=hparams["modules"],
opt_class=[
hparams["opt_class_generator"],
hparams["opt_class_discriminator"],
hparams["sch_class_generator"],
hparams["sch_class_discriminator"],
],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
if hparams["use_tensorboard"]:
hifi_gan_brain.tensorboard_logger = sb.utils.train_logger.TensorboardLogger(
save_dir=hparams["output_folder"] + "/tensorboard"
)
# Training
hifi_gan_brain.fit(
hifi_gan_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
if "test" in datasets:
hifi_gan_brain.evaluate(
datasets["test"],
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 14,460 | 34.618227 | 90 | py |
speechbrain | speechbrain-main/recipes/Fisher-Callhome-Spanish/fisher_callhome_prepare.py | """
Data preparation
Author
-----
YAO-FEI, CHENG 2021
"""
import os
import re
import json
import string
import logging
import subprocess
from typing import List
from dataclasses import dataclass, field
import torch
import torchaudio
from tqdm import tqdm
from speechbrain.utils.data_utils import get_all_files
from speechbrain.utils.torch_audio_backend import check_torchaudio_backend
from speechbrain.processing.speech_augmentation import Resample
try:
from sacremoses import MosesPunctNormalizer, MosesTokenizer
except ImportError:
err_msg = "The optional dependency sacremoses must be installed to run this recipe.\n"
err_msg += "Install using `pip install sacremoses`.\n"
raise ImportError(err_msg)
logger = logging.getLogger(__name__)
check_torchaudio_backend()
es_normalizer = MosesPunctNormalizer(lang="es")
en_normalizer = MosesPunctNormalizer(lang="en")
en_tokenizer = MosesTokenizer(lang="en")
SAMPLE_RATE = 16000
@dataclass
class TDF:
"""
channel: int
channel of utterance
start: int
start time of utterance
end: int
end time of utterance
transcript: str
transcript of utteranc
"""
channel: int
start: int
end: int
transcript: str
@dataclass
class Data:
"""
each data contains a transcription and a translation for train set
four translations for dev, dev2, test set
"""
uid: str = ""
wav: str = ""
transcription: str = ""
duration: float = 0
translations: List[str] = field(default_factory=lambda: [])
def prepare_fisher_callhome_spanish(
data_folder: str, save_folder: str, device: str = "cpu"
):
"""
Prepares the json files for the Mini Fisher-Callhome-Spanish dataset.
Arguments
---------
data_folder : str
Path to the folder where the Fisher-Callhome-Spanish dataset is stored.
save_folder: str:
Path of train/valid/test specification file will be saved.
Example
-------
>>> data_folder = '/path/to/fisher-callhome'
>>> save_foler = 'data'
>>> prepare_fisher_callhome_spanish(data_folder, save_folder)
"""
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# If the dataset doesn't exist yet, terminate the whole program
speech_folder = os.path.join(f"{data_folder}/LDC2010S01/data/speech")
transcription_folder = os.path.join(
f"{data_folder}/LDC2010T04/fisher_spa_tr/data/transcripts"
)
if check_folders(speech_folder, transcription_folder) is not True:
logger.error(
"Speech or transcription directories are missing or not properly organised within the speech data dir"
"Typical format is LDC2010S01/data/speech and LDC2010T04/fisher_spa_tr/data/transcripts"
)
return
datasets = ["dev", "dev2", "test", "train"]
corpus_path = f"{save_folder}/fisher-callhome-corpus"
download_translations(path=corpus_path)
make_data_splits(
f"{corpus_path}/mapping"
) # make splitted data list from mapping files
for dataset in datasets:
if not os.path.exists(f"{save_folder}/{dataset}/wav"):
os.makedirs(f"{save_folder}/{dataset}/wav")
if skip(save_folder, dataset):
logger.info(
f"Skipping preparation of {dataset}, completed in previous run."
)
continue
# get file lists
transcription_files = get_transcription_files_by_dataset(
dataset, transcription_folder=transcription_folder
)
# extract all transcriptions from files
extracted_transcriptions = {}
for transcription_file in transcription_files:
filename = transcription_file.split("/")[-1].split(".")[0]
extracted_transcriptions[filename] = extract_transcription(
transcription_file
)
# concate short utterance via mapping file
concated_data = concate_transcriptions_by_mapping_file(
speech_folder=speech_folder,
mapping_file_path=f"{corpus_path}/mapping/fisher_{dataset}",
extracted_transcriptions=extracted_transcriptions,
)
# get translation through fisher-callhome-corpus
if dataset != "train":
# dev, dev2, test got four translations
for number in range(4):
translation_path = (
f"{corpus_path}/corpus/ldc/fisher_{dataset}.en.{number}"
)
translations = get_translations_from_path(translation_path)
concated_data = insert_translation_into_existing_dataset(
data=concated_data, translations=translations
)
else:
translation_path = f"{corpus_path}/corpus/ldc/fisher_{dataset}.en"
translations = get_translations_from_path(translation_path)
concated_data = insert_translation_into_existing_dataset(
data=concated_data, translations=translations
)
# filter out empty or long transcription/translation
concated_data = list(
filter(
lambda data: 0 < len(data.transcription) < 400, concated_data
)
)
if dataset == "train":
concated_data = list(
filter(
lambda data: 0 < len(data.translations[0]) < 400,
concated_data,
)
)
else:
for number in range(4):
concated_data = list(
filter(
lambda data: 0 < len(data.translations[number]) < 400,
concated_data,
)
)
# ignore empty or long utterances
concated_data = list(
filter(lambda data: 0 < data.duration < 30, concated_data)
)
# sort by utterance id
concated_data = sorted(concated_data, key=lambda data: data.uid)
# store transcription/translation/wav files
data_dict = {}
for data in tqdm(concated_data, desc=f"pre-processing [{dataset}]"):
wav_save_path = f"{save_folder}/{dataset}/wav/{data.uid}.wav"
# prepare audio files
wav_information = data.wav.split(" ")
segment_audio(
audio_path=wav_information[0],
channel=int(wav_information[1]),
start=int(wav_information[2]),
end=int(wav_information[3]),
save_path=wav_save_path,
sample_rate=SAMPLE_RATE,
device=device,
)
# prepare json file
if dataset != "train":
data_dict[data.uid] = {
"wav": "{data_root}" + f"/{dataset}/wav/{data.uid}.wav",
"duration": data.duration,
"transcription": data.transcription,
}
for number in range(4):
translation_dict = {
f"translation_{number}": data.translations[number]
}
data_dict[data.uid].update(translation_dict)
else:
data_dict[data.uid] = {
"wav": "{data_root}" + f"/{dataset}/wav/{data.uid}.wav",
"duration": data.duration,
"transcription": data.transcription,
"translation_0": data.translations[0],
"transcription_and_translation": f"{data.transcription}\n{data.translations[0]}",
}
# save json
json_path = f"{save_folder}/{dataset}/data.json"
with open(json_path, "w", encoding="utf-8") as data_json:
json.dump(data_dict, data_json, indent=2, ensure_ascii=False)
logger.info(f"{json_path} successfully created!")
def skip(save_folder: str, dataset: str) -> bool:
"""Detect when fisher-callhome data preparation can be skipped"""
is_skip = True
if not os.path.isfile(f"{save_folder}/{dataset}/data.json"):
is_skip = False
return is_skip
def check_folders(*folders) -> bool:
"""Returns False if any passed folder does not exist."""
for folder in folders:
if not os.path.exists(folder):
return False
return True
def get_data_list(path: str) -> str:
with open(path, "r", encoding="utf-8") as data_file:
return data_file.readlines()
def extract_transcription(transcription_path: str) -> List[TDF]:
"""Extract transcriptions from given file"""
extracted_transcriptions = []
with open(transcription_path) as transcription_file:
# get rid of the first three useless headers
transcriptions = transcription_file.readlines()[3:]
for transcription in transcriptions:
transcription_fields = transcription.split("\t")
channel = int(transcription_fields[1])
start = float(transcription_fields[2]) * 100
end = float(transcription_fields[3]) * 100
start = int(start)
end = int(end)
transcript = transcription_fields[7]
cleaned_transcript = clean_transcription(transcript)
extracted_transcriptions.append(
TDF(
channel=channel,
start=start,
end=end,
transcript=cleaned_transcript,
)
)
return extracted_transcriptions
def concate_transcriptions_by_mapping_file(
speech_folder: str,
mapping_file_path: str,
extracted_transcriptions: List[TDF],
) -> List[Data]:
"""return concated transcriptions from the given mapping file"""
with open(mapping_file_path, "r", encoding="utf-8") as fisher_mapping_file:
fisher_mapping = fisher_mapping_file.readlines()
utterances = []
for fisher_mapping_line in fisher_mapping:
fisher_mapping_line = fisher_mapping_line.strip()
fisher_mapping_line = fisher_mapping_line.split(" ")
uid = fisher_mapping_line[0]
need_to_be_concate_lines = fisher_mapping_line[1].split("_")
need_to_be_concate_lines = list(map(int, need_to_be_concate_lines))
selected_transcription = extracted_transcriptions[uid]
# concate multiple transcripts
if len(need_to_be_concate_lines) > 1:
# index shift one is because id is count from 1 in file however, list start from 0
concated_transcripts = selected_transcription[
need_to_be_concate_lines[0]
- 1 : need_to_be_concate_lines[-1]
]
concated_transcripts = list(
map(lambda tdf: tdf.transcript, concated_transcripts)
)
concated_transcripts = " ".join(concated_transcripts)
start = selected_transcription[
need_to_be_concate_lines[0] - 1
].start
end = selected_transcription[
need_to_be_concate_lines[-1] - 1
].end
else:
concated_transcripts = selected_transcription[
need_to_be_concate_lines[-1] - 1
].transcript
start = selected_transcription[
need_to_be_concate_lines[-1] - 1
].start
end = selected_transcription[
need_to_be_concate_lines[-1] - 1
].end
# clean up
concated_transcripts = normalize_punctuation(concated_transcripts)
concated_transcripts = es_normalizer.normalize(concated_transcripts)
channel = selected_transcription[
need_to_be_concate_lines[0] - 1
].channel
channel_symbol = "B" if channel == 1 else "A"
uttrance_id = f"{uid}-{channel_symbol}-{start:06d}-{end:06d}"
utterances.append(
Data(
uid=uttrance_id,
transcription=concated_transcripts,
wav=f"{speech_folder}/{uid}.sph {channel} {start} {end}",
duration=(end - start) / 100,
)
)
return utterances
def segment_audio(
audio_path: str,
channel: int,
start: int,
end: int,
save_path: str,
sample_rate: int = 16000,
device: str = "cpu",
):
"""segment and resample audio"""
start = int(start / 100 * 8000)
end = int(end / 100 * 8000)
num_frames = end - start
data, _ = torchaudio.load(
audio_path, frame_offset=start, num_frames=num_frames
)
resampler = Resample(orig_freq=8000, new_freq=sample_rate).to(device=device)
data = resampler(data)
data = torch.unsqueeze(data[channel], 0)
torchaudio.save(save_path, src=data, sample_rate=sample_rate)
def get_transcription_files_by_dataset(
dataset: str, transcription_folder: str
) -> List[str]:
"""return paths of transcriptions from the given data set and the path of all of transcriptions"""
train_set = get_data_list(f"splits/{dataset}")
transcription_train_set = list(
map(lambda path: path.split(".")[0].strip(), train_set)
)
transcription_train_set = list(
map(lambda path: f"{path}.tdf", transcription_train_set)
)
transcription_files = get_all_files(
transcription_folder, match_or=transcription_train_set
)
return transcription_files
def get_translations_from_path(translation_path: str) -> List[str]:
""""return translations from the given path"""
extracted_translations = []
with open(translation_path, "rb") as translations_file:
original_translations = translations_file.readlines()
for translation in original_translations:
translation = translation.replace(b"\r", b"")
translation = translation.decode("utf-8")
translation = clean_translation(translation)
translation = normalize_punctuation(translation)
translation = en_normalizer.normalize(translation)
translation = remove_punctuation(translation)
translation = en_tokenizer.tokenize(translation)
translation = " ".join(translation)
extracted_translations.append(translation)
return extracted_translations
def insert_translation_into_existing_dataset(
data: List[Data], translations: List[str]
) -> List[Data]:
"""insert corresponding translation to given data"""
for index in range(len(data)):
corresponding_translation = translations[index]
data[index].translations.append(corresponding_translation)
return data
def download_translations(path: str):
repo = "https://github.com/joshua-decoder/fisher-callhome-corpus.git"
if not os.path.isdir(path):
logger.info(f"Translation file not found. Downloading from {repo}.")
subprocess.run(["git", "clone", repo])
subprocess.run(["mv", "fisher-callhome-corpus", f"{path}"])
def make_data_splits(
mapping_folder: str = "../data/fisher-callhome-corpus/mapping",
):
"""make data split from mapping file"""
fisher_splits = ["dev", "dev2", "test", "train"]
if not os.path.exists("splits"):
os.mkdir("splits")
for fisher_split in fisher_splits:
split = set()
with open(
f"{mapping_folder}/fisher_{fisher_split}", "r", encoding="utf-8"
) as fisher_file, open(
f"./splits/{fisher_split}", "a+", encoding="utf-8"
) as split_file:
fisher_file_lines = fisher_file.readlines()
for fisher_file_line in fisher_file_lines:
fisher_file_line = fisher_file_line.strip()
fisher_file_id = fisher_file_line.split(" ")[0]
split.add(fisher_file_id)
split = sorted(list(split))
for file_id in split:
split_file.write(f"{file_id}\n")
def remove_punctuation(text: str) -> str:
"""remove punctuation from given string"""
# remove punctuation except apostrophe
text = text.replace("<space>", "spacemark")
text = text.replace("'", "apostrophe")
# based on the definition of [[:punct]]
punctuation = r"[{}]".format(string.punctuation)
text = re.sub(punctuation, "", text)
text = text.replace("spacemark", "<space>")
text = text.replace("apostrophe", "'")
# remove consecutive commas and spaces
text = re.sub(r"\s+", " ", text)
text = re.sub(r"^\s+", "", text)
text = re.sub(r"\s+$", "", text)
return text
def normalize_punctuation(text: str) -> str:
"""remove punctuation from given string"""
# remove brachets and inside
text = re.sub(r"\([^)]*\)", " ", text)
text = re.sub(r"\[[^]]+\]", " ", text)
# normalize punctuation
text = re.sub(r"_", "", text)
text = re.sub(r"`", "'", text) # for En
text = re.sub(r"´", "'", text) # for En
text = re.sub(r"\¨", "'", text) # I¨m -> I'm etc.
# remove noisy parts
text = re.sub(r"noise", "", text)
text = re.sub(r"laughter", "", text)
text = re.sub(r"background noise", "", text)
text = re.sub(r"background speech", "", text)
# fisher_train
text = re.sub(r"i\/he", "i", text)
text = re.sub(r"i\/she", "i", text)
text = re.sub(r" \/\?", "\\?", text)
text = re.sub(r" \/ ", " ", text)
text = re.sub(r"a\/c", "", text)
text = re.sub(r"stay\/", "stay", text)
text = re.sub(r"boys\/", "boys", text)
text = re.sub(r"right\/", "right", text)
text = re.sub(r"follow\/", "follow", text)
text = re.sub(r"Jose\/Josefina", "Jose", text)
text = re.sub(r"welfare\/foreign", "welfare", text)
text = re.sub(r"\<foreign lang=\"English\"", "", text)
text = re.sub(r"\/foreign/", "", text)
text = re.sub(r"\<plural\>", "", text)
text = re.sub(r"\<barely makes any sense\>", "", text)
text = re.sub(r"\<kind of a weird phrase\>", "", text)
text = re.sub(r"\<last word does not fit there\>", "", text)
text = re.sub(r"\<players with the meaning of singers\>", "", text)
text = re.sub(r"\<this phrase barely made any sense whatsoever\>", "", text)
text = re.sub(
r"\<colorcito does not exist as a word so I have no ideea what he means about that\>",
"",
text,
)
text = re.sub(r"\<foreign", "", text)
text = re.sub(r"foreign\>", "", text)
# fisher_dev
text = re.sub(r"her\/his", "her", text)
text = re.sub(r"o\/", "o", text)
text = re.sub(r"co\/", "co", text)
text = re.sub(r"L \/ ", "", text)
text = re.sub(r"\<\?\?\?\>", "", text)
text = re.sub(r"\<from Texas\>", "", text)
text = re.sub(r"\<weird phrase\>", "", text)
text = re.sub(r"\<weird phrase\>", "", text)
text = re.sub(r"\<this makes no sense\>", "", text)
text = re.sub(r"Salvador\>", "Salvador", text)
# fisher_dev 2
text = re.sub(r"A\/C", "", text)
text = re.sub(r"She\/he", "She", text)
text = re.sub(r"you\/he", "you", text)
text = re.sub(r"you\/she", "you", text)
text = re.sub(r"Um\/", "Um", text)
text = re.sub(r"name\/", "name", text)
text = re.sub(r"American\/", "American", text)
text = re.sub(r"\<\?\>", "", text)
text = re.sub(r"\<metaphoric meaning\>", "", text)
text = re.sub(r"\<missing text \? \>", "", text)
text = re.sub(
r"\<broken phrase but I tried to guess what would it mean if it was complete\>",
"",
text,
)
# fisher_test
text = re.sub(r"she\/he", "she", text)
text = re.sub(r"her\/him", "her", text)
text = re.sub(r"is\/", "is", text)
text = re.sub(r"and\/or", "and", text)
text = re.sub(r"Then\/Well", "Then", text)
text = re.sub(r"fine\/well", "fine", text)
text = re.sub(r"Likewise\/Equally", "Likewise", text)
text = re.sub(r"boyfriend\/girlfriend", "boyfriend", text)
text = re.sub(r"living room \/ dining room", "living room", text)
text = re.sub(r"\<very bad phrase\>", "", text)
text = re.sub(r"\<poorly written phrase\>", "", text)
text = re.sub(r"\<this phrase barely even made sense\>", "", text)
text = re.sub(
r"\<very poorly written phrase but I think this is what was supposed to mean\>",
"",
text,
)
text = re.sub(r"what\)\)", "what", text)
# remove noisy punctuation
text = re.sub(r"\(", " ", text)
text = re.sub(r"\)", " ", text)
text = re.sub(r"\<", " ", text)
text = re.sub(r"\>", " ", text)
text = re.sub(r"\[", " ", text)
text = re.sub(r"\]", " ", text)
text = re.sub(r"\{", " ", text)
text = re.sub(r"\}", " ", text)
text = re.sub(r"\\", " ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\;", " ", text)
text = re.sub(r"~", " ", text)
text = re.sub(r"=", " ", text)
text = re.sub(r"\·", " ", text)
text = re.sub(r"^\.\s*$", "", text) # only period sentence
text = re.sub(r"^\?\s*$", "", text) # only question mark sentence
text = re.sub(r"\s+", " ", text) # remove consecutive whitespaces
# remove the first and last whitespaces
text = re.sub(r"^\s+", "", text)
text = re.sub(r"\s+$", "", text)
text = text.lstrip()
return text
def clean_transcription(transcription: str) -> str:
"""
original: https://github.com/jamfly/AlloST/blob/main/egs/fisher_callhome_spanish/st1/local/fsp_data_prep.sh
clean a given transcription and return a cleaned transcription
"""
transcription = transcription.replace("</", "lendarrow")
transcription = transcription.replace("<", "larrow")
transcription = transcription.replace(">", "rarrow")
punctuation = r"[{}]".format(string.punctuation)
transcription = re.sub(punctuation, "", transcription)
transcription = transcription.replace("larrow", "<")
transcription = transcription.replace("rarrow", ">")
transcription = transcription.replace("lendarrow", "</")
transcription = transcription.replace("Á", "á")
transcription = transcription.replace("Í", "í")
transcription = transcription.replace("Ó", "ó")
transcription = transcription.replace("Ú", "ú")
transcription = transcription.replace("¨", "")
transcription = transcription.replace("·", "")
transcription = transcription.replace("´", "")
transcription = transcription.replace("¿", "")
transcription = transcription.replace("¡", "")
transcription = transcription.replace("N", "n")
transcription = transcription.lower()
transcription = remove_labels(transcription)
return transcription
def clean_translation(translation: str) -> str:
"""clean a given translation and returne a cleaned translation"""
translation = translation.strip()
translation = translation.lower()
translation = translation.replace("¿", "")
translation = translation.replace("¡", "")
return translation
def remove_labels(transcription: str):
"""remove label such as <laugh> from transcript"""
transcription = re.sub(
r"<\s*[/]*\s*\s*for[ei][ei]g[nh]\s*\w*>", "", transcription
)
transcriptions = re.findall(r"<lname>\([^<]*\)<\/lname>", transcription)
if len(transcriptions) > 0:
transcription = transcriptions[0]
transcription = re.sub(r"<lname[\/]*>", "", transcription)
transcription = re.sub(r"<laugh>", "", transcription)
transcription = re.sub(r"<\/laugh>", "", transcription)
transcription = re.sub(r"<\s*cough[\/]*>", "[noise]", transcription)
transcription = re.sub(r"<sneeze[\/]*>", "[noise]", transcription)
transcription = re.sub(r"<breath[\/]*>", "[noise]", transcription)
transcription = re.sub(r"<lipsmack[\/]*>", "[noise]", transcription)
transcription = re.sub(r"<background>", "", transcription)
transcription = re.sub(r"<\/background>", "", transcription)
transcription = re.sub(r"<[/]?background[/]?>", "[noise]", transcription)
transcription = re.sub(r"<laugh>", "", transcription)
transcription = re.sub(r"<\/laugh>", "", transcription)
transcription = re.sub(r"<[/]?laugh[/]?>", "[laughter]", transcription)
transcription = re.sub(r"<foreign langenglishhip hop", "", transcription)
transcription = re.sub(r"<foreign langenglishonline", "", transcription)
transcription = re.sub(r"<foreign langenglish", "", transcription)
transcription = re.sub(r"</foreign", "", transcription)
transcription = re.sub(r"<[/]?foreing\s*\w*>", "", transcription)
transcription = re.sub(r"</b", "", transcription)
transcription = re.sub(r"<foreign langengullís>", "", transcription)
transcription = re.sub(r"foreign>", "", transcription)
transcription = re.sub(r">", "", transcription)
is_match = re.search(r"\(\)", transcription)
if is_match is not True:
transcription = re.sub(r"\[noise\]", "", transcription)
transcription = re.sub(r"\[laughter\]", "", transcription)
transcription = re.sub(r"^\s\s*|\s\s*$", "", transcription)
transcription = re.sub(r"^\s\s*", " ", transcription)
return transcription
if __name__ == "__main__":
data_folder = "/mnt/md0/user_jamfly/CORPUS"
save_folder = "data"
device = "cuda:0"
prepare_fisher_callhome_spanish(data_folder, save_folder, device=device)
| 25,525 | 33.682065 | 114 | py |
speechbrain | speechbrain-main/recipes/Fisher-Callhome-Spanish/ST/transformer/train.py | #!/usr/bin/env/python3
"""Recipe for training a Transformer based ST system with Fisher-Callhome.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beam search coupled with a neural
language model.
To run this recipe, do the following:
> python train.py hparams/conformer.yaml
Authors
* YAO-FEI, CHENG 2021
"""
import sys
import torch
import logging
import speechbrain as sb
from sacremoses import MosesDetokenizer
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
en_detoeknizer = MosesDetokenizer(lang="en")
class ST(sb.core.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos # for translation task
transcription_bos, _ = batch.transcription_bos # for asr task
transcription_tokens, _ = batch.transcription_tokens # for mt task
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.hparams.normalize(feats, wav_lens, epoch=current_epoch)
# forward modules
src = self.modules.CNN(feats)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index
)
asr_p_seq = None
# asr output layer for seq2seq log-probabilities
if self.hparams.asr_weight > 0 and self.hparams.ctc_weight < 1:
asr_pred = self.modules.Transformer.forward_asr(
enc_out,
src,
transcription_bos,
wav_lens,
pad_idx=self.hparams.pad_index,
)
asr_pred = self.modules.asr_seq_lin(asr_pred)
asr_p_seq = self.hparams.log_softmax(asr_pred)
# st output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# asr ctc
p_ctc = None
if self.hparams.ctc_weight > 0:
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# mt task
mt_p_seq = None
if self.hparams.mt_weight > 0:
_, mt_pred = self.modules.Transformer.forward_mt(
transcription_tokens,
tokens_bos,
pad_idx=self.hparams.pad_index,
)
# mt output layer for seq2seq log-probabilities
mt_pred = self.modules.seq_lin(mt_pred)
mt_p_seq = self.hparams.log_softmax(mt_pred)
# compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
return p_ctc, p_seq, asr_p_seq, mt_p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given predictions and targets."""
(p_ctc, p_seq, asr_p_seq, mt_p_seq, wav_lens, hyps,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
transcription_eos, transcription_eos_lens = batch.transcription_eos
transcription_tokens, transcription_lens = batch.transcription_tokens
# loss for different tasks
# asr loss = ctc_weight * ctc loss + (1 - ctc_weight) * asr attention loss
# mt loss = mt attention loss
# st loss =
# (1 - asr_weight - mt_weight) * st attention loss +
# asr_weight * asr loss +
# mt_weight * mt loss
attention_loss = 0
asr_ctc_loss = 0
asr_attention_loss = 0
mt_loss = 0
# st attention loss
attention_loss = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens,
)
# asr attention loss
if self.hparams.ctc_weight < 1 and self.hparams.asr_weight > 0:
asr_attention_loss = self.hparams.seq_cost(
asr_p_seq, transcription_eos, length=transcription_eos_lens,
)
# asr ctc loss
if self.hparams.ctc_weight > 0 and self.hparams.asr_weight > 0:
asr_ctc_loss = self.hparams.ctc_cost(
p_ctc, transcription_tokens, wav_lens, transcription_lens,
)
# mt attention loss
if self.hparams.mt_weight > 0:
mt_loss = self.hparams.seq_cost(
mt_p_seq, tokens_eos, length=tokens_eos_lens,
)
asr_loss = (self.hparams.ctc_weight * asr_ctc_loss) + (
1 - self.hparams.ctc_weight
) * asr_attention_loss
loss = (
(1 - self.hparams.asr_weight - self.hparams.mt_weight)
* attention_loss
+ self.hparams.asr_weight * asr_loss
+ self.hparams.mt_weight * mt_loss
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if stage == sb.Stage.TEST:
# 4 references bleu score
predictions = [
en_detoeknizer.detokenize(
hparams["tokenizer"].decode_ids(utt_seq).split(" ")
)
for utt_seq in hyps
]
four_references = [
batch.translation_0,
batch.translation_1,
batch.translation_2,
batch.translation_3,
]
targets = []
for reference in four_references:
detokenized_translation = [
en_detoeknizer.detokenize(translation.split(" "))
for translation in reference
]
targets.append(detokenized_translation)
self.bleu_metric.append(ids, predictions, targets)
elif (
current_epoch % valid_search_interval == 0
and stage == sb.Stage.VALID
):
predictions = [
en_detoeknizer.detokenize(
hparams["tokenizer"].decode_ids(utt_seq).split(" ")
)
for utt_seq in hyps
]
targets = [
en_detoeknizer.detokenize(translation.split(" "))
for translation in batch.translation_0
]
self.bleu_metric.append(ids, predictions, [targets])
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# check if we need to switch optimizer
# if so change the optimizer from Adam to SGD
self.check_and_reset_optimizer()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.bleu_metric = self.hparams.bleu_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if stage == sb.Stage.TEST:
stage_stats["BLEU"] = self.bleu_metric.summarize("BLEU")
elif (
current_epoch % valid_search_interval == 0
and stage == sb.Stage.VALID
):
stage_stats["BLEU"] = self.bleu_metric.summarize("BLEU")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
current_epoch = self.hparams.epoch_counter.current
# report different epoch stages according current stage
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.stage_one_epochs:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
optimizer = self.optimizer.__class__.__name__
else:
lr = self.hparams.lr_sgd
steps = -1
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
num_to_keep=5,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.bleu_file, "a+", encoding="utf-8") as w:
self.bleu_metric.write_stats(w)
# save the averaged checkpoint at the end of the evaluation stage
# delete the rest of the intermediate checkpoints
# ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint
self.checkpointer.save_and_keep_only(
meta={"ACC": 1.1, "epoch": epoch},
max_keys=["ACC"],
num_to_keep=1,
)
def check_and_reset_optimizer(self):
"""reset the optimizer if training enters stage 2"""
current_epoch = self.hparams.epoch_counter.current
if not hasattr(self, "switched"):
self.switched = False
if isinstance(self.optimizer, torch.optim.SGD):
self.switched = True
if self.switched is True:
return
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
self.switched = True
def on_fit_start(self):
"""Initialize the right optimizer on the training start"""
super().on_fit_start()
# if the model is resumed from stage two, reinitialize the optimizer
current_epoch = self.hparams.epoch_counter.current
current_optimizer = self.optimizer
if current_epoch > self.hparams.stage_one_epochs:
del self.optimizer
self.optimizer = self.hparams.SGD(self.modules.parameters())
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
# do not reload the weights if training is interrupted right before stage 2
group = current_optimizer.param_groups[0]
if "momentum" not in group:
return
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
def on_evaluate_start(self, max_key=None, min_key=None):
"""perform checkpoint averge if needed"""
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(
max_key=max_key, min_key=min_key
)
ckpt = sb.utils.checkpoints.average_checkpoints(
ckpts, recoverable_name="model", device=self.device
)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# Define audio pipeline. In this case, we simply read the path contained
# in the variable wav with the audio reader.
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the audio signal. This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
return sig
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def sp_audio_pipeline(wav):
"""Load the audio signal. This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
sig = sig.unsqueeze(0)
sig = hparams["speed_perturb"](sig)
sig = sig.squeeze(0)
return sig
# Define text processing pipeline. We start from the raw text and then
# encode it using the tokenizer. The tokens with BOS are used for feeding
# decoder during training, the tokens with EOS for computing the cost function.
# The tokens without BOS or EOS is for computing CTC loss.
@sb.utils.data_pipeline.takes("translation_0")
@sb.utils.data_pipeline.provides(
"translation_0", "tokens_list", "tokens_bos", "tokens_eos", "tokens",
)
def one_reference_text_pipeline(translation):
"""Processes the transcriptions to generate proper labels"""
yield translation
tokens_list = hparams["tokenizer"].encode_as_ids(translation)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
@sb.utils.data_pipeline.takes(
"translation_0", "translation_1", "translation_2", "translation_3",
)
@sb.utils.data_pipeline.provides(
"translation_0",
"translation_1",
"translation_2",
"translation_3",
"tokens_list",
"tokens_bos",
"tokens_eos",
"tokens",
)
def four_reference_text_pipeline(*translations):
"""Processes the transcriptions to generate proper labels"""
yield translations[0]
yield translations[1]
yield translations[2]
yield translations[3]
tokens_list = hparams["tokenizer"].encode_as_ids(translations[0])
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
@sb.utils.data_pipeline.takes("transcription")
@sb.utils.data_pipeline.provides(
"transcription",
"transcription_list",
"transcription_bos",
"transcription_eos",
"transcription_tokens",
)
def transcription_text_pipeline(transcription):
yield transcription
tokens_list = hparams["tokenizer"].encode_as_ids(transcription)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
datasets = {}
data_folder = hparams["data_folder"]
for dataset in ["train", "dev"]:
json_path = f"{data_folder}/{dataset}/data.json"
dataset = dataset if dataset == "train" else "valid"
is_use_sp = dataset == "train" and "speed_perturb" in hparams
audio_pipeline_func = sp_audio_pipeline if is_use_sp else audio_pipeline
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=json_path,
replacements={"data_root": data_folder},
dynamic_items=[
audio_pipeline_func,
one_reference_text_pipeline,
transcription_text_pipeline,
],
output_keys=[
"id",
"sig",
"duration",
"translation_0",
"tokens_bos",
"tokens_eos",
"tokens",
"transcription",
"transcription_list",
"transcription_bos",
"transcription_eos",
"transcription_tokens",
],
)
for dataset in ["dev", "dev2", "test"]:
json_path = f"{data_folder}/{dataset}/data.json"
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=json_path,
replacements={"data_root": data_folder},
dynamic_items=[
audio_pipeline,
four_reference_text_pipeline,
transcription_text_pipeline,
],
output_keys=[
"id",
"sig",
"duration",
"translation_0",
"translation_1",
"translation_2",
"translation_3",
"tokens_bos",
"tokens_eos",
"tokens",
"transcription",
"transcription_list",
"transcription_bos",
"transcription_eos",
"transcription_tokens",
],
)
# Sorting training data with ascending order makes the code much
# faster because we minimize zero-padding. In most of the cases, this
# does not harm the performance.
if hparams["sorting"] == "ascending":
# use smaller dataset to debug the model
if hparams["debug"]:
datasets["train"] = datasets["train"].filtered_sorted(
key_min_value={"duration": 1},
key_max_value={"duration": 5},
sort_key="duration",
reverse=True,
)
datasets["valid"] = datasets["valid"].filtered_sorted(
key_min_value={"duration": 1},
key_max_value={"duration": 5},
sort_key="duration",
reverse=True,
)
else:
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="duration"
)
datasets["valid"] = datasets["valid"].filtered_sorted(
sort_key="duration"
)
hparams["train_dataloader_opts"]["shuffle"] = False
hparams["valid_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
# use smaller dataset to debug the model
if hparams["debug"]:
datasets["train"] = datasets["train"].filtered_sorted(
key_min_value={"duration": 1},
key_max_value={"duration": 5},
sort_key="duration",
reverse=True,
)
datasets["valid"] = datasets["valid"].filtered_sorted(
key_min_value={"duration": 1},
key_max_value={"duration": 5},
sort_key="duration",
reverse=True,
)
else:
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="duration", reverse=True
)
datasets["valid"] = datasets["valid"].filtered_sorted(
sort_key="duration", reverse=True
)
hparams["train_dataloader_opts"]["shuffle"] = False
hparams["valid_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
# use smaller dataset to debug the model
if hparams["debug"]:
datasets["train"] = datasets["train"].filtered_sorted(
key_min_value={"duration": 3},
key_max_value={"duration": 5},
sort_key="duration",
)
datasets["valid"] = datasets["valid"].filtered_sorted(
key_min_value={"duration": 1}, key_max_value={"duration": 5},
)
hparams["train_dataloader_opts"]["shuffle"] = True
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
return datasets
if __name__ == "__main__":
# Reading command line arguments
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# transcription/translation tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# We can now directly create the datasets for training, valid, and test
datasets = dataio_prepare(hparams)
st_brain = ST(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
st_brain.fit(
st_brain.hparams.epoch_counter,
datasets["train"],
datasets["valid"],
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
for dataset in ["dev", "dev2", "test"]:
st_brain.evaluate(
datasets[dataset],
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 23,243 | 35.778481 | 91 | py |
speechbrain | speechbrain-main/recipes/UrbanSound8k/urbansound8k_prepare.py | """
Creates data manifest files from UrbanSound8k, suitable for use in SpeechBrain.
https://urbansounddataset.weebly.com/urbansound8k.html
From the authors of UrbanSound8k:
1. Don't reshuffle the data! Use the predefined 10 folds and perform 10-fold (not 5-fold) cross validation
The experiments conducted by vast majority of publications using UrbanSound8K (by ourselves and others)
evaluate classification models via 10-fold cross validation using the predefined splits*.
We strongly recommend following this procedure.
Why?
If you reshuffle the data (e.g. combine the data from all folds and generate a random train/test split)
you will be incorrectly placing related samples in both the train and test sets, leading to inflated
scores that don't represent your model's performance on unseen data. Put simply, your results will be wrong.
Your results will NOT be comparable to previous results in the literature, meaning any claims to an
improvement on previous research will be invalid. Even if you don't reshuffle the data, evaluating using
different splits (e.g. 5-fold cross validation) will mean your results are not comparable to previous research.
2. Don't evaluate just on one split! Use 10-fold (not 5-fold) cross validation and average the scores
We have seen reports that only provide results for a single train/test split, e.g. train on folds 1-9,
test on fold 10 and report a single accuracy score. We strongly advise against this. Instead, perform
10-fold cross validation using the provided folds and report the average score.
Why?
Not all the splits are as "easy". That is, models tend to obtain much higher scores when trained on folds
1-9 and tested on fold 10, compared to (e.g.) training on folds 2-10 and testing on fold 1. For this reason,
it is important to evaluate your model on each of the 10 splits and report the average accuracy.
Again, your results will NOT be comparable to previous results in the literature.
* 10-fold cross validation using the predefined folds: train on data from 9 of the 10 predefined folds and
test on data from the remaining fold. Repeat this process 10 times (each time using a different set of
9 out of the 10 folds for training and the remaining fold for testing). Finally report the average classification
accuracy over all 10 experiments (as an average score + standard deviation, or, even better, as a boxplot).
Authors:
* David Whipps, 2021
"""
import os
import json
import logging
import ntpath
import torchaudio
from speechbrain.dataio.dataio import read_audio
from speechbrain.dataio.dataio import load_data_csv
logger = logging.getLogger(__name__)
URBAN_SOUND_8K_DOWNLOAD_FORM_URL = (
"https://urbansounddataset.weebly.com/download-urbansound8k.html"
)
MODIFIED_METADATA_FILE_NAME = "UrbanSound8k_speechbrain.csv"
ACCEPTABLE_FOLD_NUMS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def prepare_urban_sound_8k(
data_folder,
audio_data_folder,
save_json_train,
save_json_valid,
save_json_test,
train_fold_nums=[1, 2, 3, 4, 5, 6, 7, 8],
valid_fold_nums=[9],
test_fold_nums=[10],
skip_manifest_creation=False,
):
"""
Prepares the json files for the UrbanSound8k dataset.
Prompts to download the dataset if it is not found in the `data_folder`.
Arguments
---------
data_folder : str
Path to the folder where the UrbanSound8k dataset metadata is stored.
audio_data_folder: str
Path to the folder where the UrbanSound8k dataset audio files are stored.
save_json_train : str
Path where the train data specification file will be saved.
save_json_valid : str
Path where the validation data specification file will be saved.
save_json_test : str
Path where the test data specification file will be saved.
train_folds: list or int (integers [1,10])
A list of integers defining which pre-defined "folds" to use for training. Must be
exclusive of valid_folds and test_folds.
valid_folds: list or int (integers [1,10])
A list of integers defining which pre-defined "folds" to use for validation. Must be
exclusive of train_folds and test_folds.
test_folds: list or int (integers [1,10])
A list of integers defining which pre-defined "folds" to use for test. Must be
exclusive of train_folds and valid_folds.
Example
-------
>>> data_folder = '/path/to/UrbanSound8k'
>>> prepare_urban_sound_8k(data_folder, 'train.json', 'valid.json', 'test.json', [1,2,3,4,5,6,7,8], [9], [10])
"""
# Tease params to correct type if necessary
if type(train_fold_nums) is int:
train_fold_nums = [train_fold_nums]
if type(valid_fold_nums) is int:
valid_fold_nums = [valid_fold_nums]
if type(test_fold_nums) is int:
test_fold_nums = [test_fold_nums]
# Validate passed fold params
for fold_num in train_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
for fold_num in valid_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
for fold_num in test_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
# Check if train, and valid and train and test folds are exclusive
if folds_overlap(train_fold_nums, valid_fold_nums):
print(
f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!"
)
logger.info(
f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!"
)
return
if folds_overlap(train_fold_nums, test_fold_nums):
print(
f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!"
)
logger.info(
f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!"
)
return
# If the dataset doesn't exist yet, prompt the user to set or download it
if not check_folders(audio_data_folder):
prompt_download_urban_sound_8k(audio_data_folder)
return
# Don't need to do this every single time
if skip_manifest_creation is True:
return
# If our modified metadata file does not exist, create it
urban_sound_8k_speechbrain_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "metadata/", MODIFIED_METADATA_FILE_NAME
)
if not os.path.exists(urban_sound_8k_speechbrain_metadata_csv_path):
urban_sound_8k_speechbrain_metadata_csv_path = create_metadata_speechbrain_file(
data_folder
)
# TODO: If it does not exist, we create it, but next step will certainly fail?
# Read the metadata into a dictionary
# Every key of this dictionary is now one of the sound filenames, without the ".wav" suffix
metadata = load_data_csv(urban_sound_8k_speechbrain_metadata_csv_path)
# List files and create manifest from list
logger.info(
f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}"
)
# Creating json files
create_json(metadata, audio_data_folder, train_fold_nums, save_json_train)
create_json(metadata, audio_data_folder, valid_fold_nums, save_json_valid)
create_json(metadata, audio_data_folder, test_fold_nums, save_json_test)
def create_json(metadata, audio_data_folder, folds_list, json_file):
"""
Creates the json file given a list of wav files.
Arguments
---------
metadata: dict
A dictionary containing the UrbanSound8k metadata file modified for the
SpeechBrain, such that keys are IDs (which are the .wav file names without the file extension).
folds_list : list of int
The list of folds [1,10] to include in this batch
json_file : str
The path of the output json file
"""
# Processing all the wav files in the list
json_dict = {}
for ID, sample_metadata in metadata.items():
fold_num = int(sample_metadata["fold"])
if fold_num in folds_list:
# Reading the signal (to retrieve duration in seconds)
wav_file = os.path.join(
os.path.abspath(audio_data_folder),
"fold" + str(fold_num) + "/",
sample_metadata["slice_file_name"],
)
try:
signal = read_audio(wav_file)
file_info = torchaudio.info(wav_file)
# If we're using sox/soundfile backend, file_info will have the old type
if isinstance(
file_info, torchaudio.backend.common.AudioMetaData
):
duration = signal.shape[0] / file_info.sample_rate
else:
duration = signal.shape[0] / file_info[0].rate
# Create entry for this sample ONLY if we have successfully read-in the file using SpeechBrain/torchaudio
json_dict[ID] = {
"wav": sample_metadata["slice_file_name"],
"classID": int(sample_metadata["classID"]),
"class_string": sample_metadata["class_string"],
"salience": int(sample_metadata["salience"]),
"fold": sample_metadata["fold"],
"duration": duration,
}
except Exception:
print(
f"There was a problem reading the file:{wav_file}. Skipping duration field for it."
)
logger.exception(
f"There was a problem reading the file:{wav_file}. Skipping it."
)
# Writing the dictionary to the json file
# Need to make sure sub folder "manifest" exists, if not create it
parent_dir = os.path.dirname(json_file)
if not os.path.exists(parent_dir):
os.mkdir(parent_dir)
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, indent=2)
logger.info(f"{json_file} successfully created!")
def folds_overlap(list1, list2):
"""Returns True if any passed lists has incorrect type OR has items in common."""
if (type(list1) != list) or (type(list2) != list):
return True
if any(item in list1 for item in list2):
return True
return False
def check_folders(*folders):
"""Returns False if any passed folder does not exist."""
for folder in folders:
if not os.path.exists(folder):
return False
return True
def full_path_to_audio_file(data_folder, slice_file_name, fold_num):
"""Get path to file given slice file name and fold number
Arguments
---------
slice_file_name : str
Filename.
fold_num : int
Fold number.
Returns
------
string containing absolute path to corresponding file
"""
return os.path.join(
os.path.abspath(data_folder),
"audio/",
"fold" + str(fold_num) + "/",
slice_file_name,
)
def create_metadata_speechbrain_file(data_folder):
"""Get path to file given slice file name and fold number
Arguments
---------
data_folder : str
UrbanSound8k data folder.
Returns
------
string containing absolute path to metadata csv file modified for SpeechBrain or None if source file not found
"""
import pandas as pd
urban_sound_8k_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "metadata/UrbanSound8K.csv"
)
if not os.path.exists(urban_sound_8k_metadata_csv_path):
return None
urbansound_metadata_df = pd.read_csv(urban_sound_8k_metadata_csv_path)
# SpeechBrain wants an ID column
urbansound_metadata_df["ID"] = urbansound_metadata_df.apply(
lambda row: removesuffix(row["slice_file_name"], ".wav"), axis=1
)
urbansound_metadata_df = urbansound_metadata_df.rename(
columns={"class": "class_string"}
)
urban_sound_speechbrain_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "metadata/", MODIFIED_METADATA_FILE_NAME
)
urbansound_metadata_df.to_csv(
urban_sound_speechbrain_metadata_csv_path, index=False
)
return urban_sound_speechbrain_metadata_csv_path
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def removesuffix(somestring, suffix):
"""Removed a suffix from a string
Arguments
---------
somestring : str
Any string.
suffix : str
Suffix to be removed from somestring.
Returns
------
string resulting from suffix removed from somestring, if found, unchanged otherwise
"""
if somestring.endswith(suffix):
return somestring[: -1 * len(suffix)]
else:
return somestring
def prompt_download_urban_sound_8k(destination):
"""Prompt to download dataset
Arguments
---------
destination : str
Place to put dataset.
"""
print(
"UrbanSound8k data is missing from {}!\nRequest it from here: {}".format(
destination, URBAN_SOUND_8K_DOWNLOAD_FORM_URL
)
)
# Testing
if __name__ == "__main__":
import speechbrain
# Data preparation, to be run on only one process.
speechbrain.utils.distributed.run_on_main(
prepare_urban_sound_8k,
kwargs={
"data_folder": "/Volumes/BigMule/BigDevDocuments/UrbanSound8K",
"audio_data_folder": "/Volumes/BigMule/BigDevDocuments/UrbanSound8K/audio",
"save_json_train": "./UrbanSound8k/manifest/train.json",
"save_json_valid": "./UrbanSound8k/manifest/valid.json",
"save_json_test": "./UrbanSound8k/manifest/test.json",
"train_fold_nums": [1, 2, 3, 4, 5, 6, 7, 8],
"valid_fold_nums": [9],
"test_fold_nums": [10],
},
)
| 14,774 | 37.476563 | 122 | py |
speechbrain | speechbrain-main/recipes/UrbanSound8k/SoundClassification/custom_model.py | """
This file contains a very simple TDNN module to use for sound class identification.
To replace this model, change the `!new:` tag in the hyperparameter file
to refer to a built-in SpeechBrain model or another file containing
a custom PyTorch module.
Authors
* David Whipps 2021
* Ala Eddine Limame 2021
Adapted From:
* Nauman Dawalatabad 2020
* Mirco Ravanelli 2020
"""
import torch # noqa: F401
import torch.nn as nn
import speechbrain as sb
from speechbrain.nnet.pooling import StatisticsPooling
from speechbrain.nnet.CNN import Conv1d
from speechbrain.nnet.linear import Linear
from speechbrain.nnet.normalization import BatchNorm1d
class Xvector(torch.nn.Module):
"""This model extracts X-vectors for sound class recognition
Arguments
---------
activation : torch class
A class for constructing the activation layers.
tdnn_blocks : int
Number of time-delay neural (TDNN) layers.
tdnn_channels : list of ints
Output channels for TDNN layer.
tdnn_kernel_sizes : list of ints
List of kernel sizes for each TDNN layer.
tdnn_dilations : list of ints
List of dilations for kernels in each TDNN layer.
lin_neurons : int
Number of neurons in linear layers.
Example
-------
>>> compute_xvect = Xvector()
>>> input_feats = torch.rand([5, 10, 40])
>>> outputs = compute_xvect(input_feats)
>>> outputs.shape
torch.Size([5, 1, 512])
"""
def __init__(
self,
device="cpu",
activation=torch.nn.LeakyReLU,
tdnn_blocks=5,
tdnn_channels=[512, 512, 512, 512, 1500],
tdnn_kernel_sizes=[5, 3, 3, 1, 1],
tdnn_dilations=[1, 2, 3, 1, 1],
lin_neurons=512,
in_channels=40,
):
super().__init__()
self.blocks = nn.ModuleList()
# TDNN has convolutional layers with the given dilation factors
# and kernel sizes. We here loop over all the convolutional layers
# that we wanna add. Note that batch normalization is used after
# the activations function in this case. This improves the
# sound classification performance a bit.
for block_index in range(tdnn_blocks):
out_channels = tdnn_channels[block_index]
self.blocks.extend(
[
Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=tdnn_kernel_sizes[block_index],
dilation=tdnn_dilations[block_index],
),
activation(),
BatchNorm1d(input_size=out_channels),
]
)
in_channels = tdnn_channels[block_index]
# Statistical pooling. It converts a tensor of variable length
# into a fixed-length tensor. The statistical pooling returns the
# mean and the standard deviation.
self.blocks.append(StatisticsPooling())
# Final linear transformation.
self.blocks.append(
Linear(
input_size=out_channels * 2, # mean + std,
n_neurons=lin_neurons,
bias=True,
combine_dims=False,
)
)
def forward(self, x, lens=None):
"""Returns the x-vectors.
Arguments
---------
x : torch.Tensor
"""
for layer in self.blocks:
try:
x = layer(x, lengths=lens)
except TypeError:
x = layer(x)
return x
class Classifier(sb.nnet.containers.Sequential):
"""This class implements the last MLP on the top of xvector features.
Arguments
---------
input_shape : tuple
Expected shape of an example input.
activation : torch class
A class for constructing the activation layers.
lin_blocks : int
Number of linear layers.
lin_neurons : int
Number of neurons in linear layers.
out_neurons : int
Number of output neurons.
Example
-------
>>> input_feats = torch.rand([5, 10, 40])
>>> compute_xvect = Xvector()
>>> xvects = compute_xvect(input_feats)
>>> classify = Classifier(input_shape=xvects.shape)
>>> output = classify(xvects)
>>> output.shape
torch.Size([5, 1, 1211])
"""
def __init__(
self,
input_shape,
activation=torch.nn.LeakyReLU,
lin_blocks=1,
lin_neurons=512,
out_neurons=1211,
):
super().__init__(input_shape=input_shape)
self.append(activation(), layer_name="act")
self.append(sb.nnet.normalization.BatchNorm1d, layer_name="norm")
if lin_blocks > 0:
self.append(sb.nnet.containers.Sequential, layer_name="DNN")
# Adding fully-connected layers
for block_index in range(lin_blocks):
block_name = f"block_{block_index}"
self.DNN.append(
sb.nnet.containers.Sequential, layer_name=block_name
)
self.DNN[block_name].append(
sb.nnet.linear.Linear,
n_neurons=lin_neurons,
bias=True,
layer_name="linear",
)
self.DNN[block_name].append(activation(), layer_name="act")
self.DNN[block_name].append(
sb.nnet.normalization.BatchNorm1d, layer_name="norm"
)
# Final Softmax classifier
self.append(
sb.nnet.linear.Linear, n_neurons=out_neurons, layer_name="out"
)
self.append(
sb.nnet.activations.Softmax(apply_log=True), layer_name="softmax"
)
| 5,731 | 29.489362 | 83 | py |
speechbrain | speechbrain-main/recipes/UrbanSound8k/SoundClassification/train.py | #!/usr/bin/python3
"""Recipe for training sound class embeddings (e.g, xvectors) using the UrbanSound8k.
We employ an encoder followed by a sound classifier.
To run this recipe, use the following command:
> python train_class_embeddings.py {hyperparameter_file}
Using your own hyperparameter file or one of the following:
hparams/train_x_vectors.yaml (for standard xvectors)
hparams/train_ecapa_tdnn.yaml (for the ecapa+tdnn system)
Authors
* David Whipps 2021
* Ala Eddine Limame 2021
Based on VoxCeleb By:
* Mirco Ravanelli 2020
* Hwidong Na 2020
* Nauman Dawalatabad 2020
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from urbansound8k_prepare import prepare_urban_sound_8k
from sklearn.metrics import confusion_matrix
import numpy as np
from confusion_matrix_fig import create_cm_fig
class UrbanSound8kBrain(sb.core.Brain):
"""Class for sound class embedding training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + sound classifier.
Data augmentation and environmental corruption are applied to the
input sound.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
if self.hparams.amp_to_db:
Amp2db = torchaudio.transforms.AmplitudeToDB(
stype="power", top_db=80
) # try "magnitude" Vs "power"? db= 80, 50...
feats = Amp2db(feats)
# Normalization
if self.hparams.normalize:
feats = self.modules.mean_var_norm(feats, lens)
# Embeddings + sound classifier
embeddings = self.modules.embedding_model(feats)
outputs = self.modules.classifier(embeddings)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using class-id as label.
"""
predictions, lens = predictions
uttid = batch.id
classid, _ = batch.class_string_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN:
classid = torch.cat([classid] * self.n_augment, dim=0)
loss = self.hparams.compute_cost(predictions, classid, lens)
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
# Append this batch of losses to the loss metric for easy
self.loss_metric.append(
uttid, predictions, classid, lens, reduction="batch"
)
# Confusion matrices
if stage != sb.Stage.TRAIN:
y_true = classid.cpu().detach().numpy().squeeze(-1)
y_pred = predictions.cpu().detach().numpy().argmax(-1).squeeze(-1)
if stage == sb.Stage.VALID:
confusion_matix = confusion_matrix(
y_true,
y_pred,
labels=sorted(self.hparams.label_encoder.ind2lab.keys()),
)
self.valid_confusion_matrix += confusion_matix
if stage == sb.Stage.TEST:
confusion_matix = confusion_matrix(
y_true,
y_pred,
labels=sorted(self.hparams.label_encoder.ind2lab.keys()),
)
self.test_confusion_matrix += confusion_matix
# Compute Accuracy using MetricStats
self.acc_metric.append(
uttid, predict=predictions, target=classid, lengths=lens
)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, classid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
self.loss_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.losses.nll_loss
)
# Compute Accuracy using MetricStats
# Define function taking (prediction, target, length) for eval
def accuracy_value(predict, target, lengths):
"""Computes Accuracy"""
nbr_correct, nbr_total = sb.utils.Accuracy.Accuracy(
predict, target, lengths
)
acc = torch.tensor([nbr_correct / nbr_total])
return acc
self.acc_metric = sb.utils.metric_stats.MetricStats(
metric=accuracy_value, n_jobs=1
)
# Confusion matrices
if stage == sb.Stage.VALID:
self.valid_confusion_matrix = np.zeros(
shape=(self.hparams.out_n_neurons, self.hparams.out_n_neurons),
dtype=int,
)
if stage == sb.Stage.TEST:
self.test_confusion_matrix = np.zeros(
shape=(self.hparams.out_n_neurons, self.hparams.out_n_neurons),
dtype=int,
)
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Compute/store important stats
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {
"loss": self.train_loss,
"acc": self.acc_metric.summarize(
"average"
), # "acc": self.train_acc_metric.summarize(),
}
# Summarize Valid statistics from the stage for record-keeping.
elif stage == sb.Stage.VALID:
valid_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize(
"average"
), # "acc": self.valid_acc_metric.summarize(),
"error": self.error_metrics.summarize("average"),
}
# Summarize Test statistics from the stage for record-keeping.
else:
test_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize(
"average"
), # "acc": self.test_acc_metric.summarize(),
"error": self.error_metrics.summarize("average"),
}
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# Tensorboard logging
if self.hparams.use_tensorboard:
self.hparams.tensorboard_train_logger.log_stats(
stats_meta={"Epoch": epoch},
train_stats=self.train_stats,
valid_stats=valid_stats,
)
# Log confusion matrix fig to tensorboard
cm_fig = create_cm_fig(
self.valid_confusion_matrix,
display_labels=list(
self.hparams.label_encoder.ind2lab.values()
),
)
self.hparams.tensorboard_train_logger.writer.add_figure(
"Validation Confusion Matrix", cm_fig, epoch
) # TODO use global_step from writer
# Per class accuracy from Validation confusion matrix
per_class_acc_arr = np.diag(self.valid_confusion_matrix) / np.sum(
self.valid_confusion_matrix, axis=1
)
per_class_acc_arr_str = "\n" + "\n".join(
"{:}: {:.3f}".format(
self.hparams.label_encoder.decode_ndim(class_id), class_acc
)
for class_id, class_acc in enumerate(per_class_acc_arr)
)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=valid_stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(
meta=valid_stats, min_keys=["error"]
)
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
# Per class accuracy from Test confusion matrix
per_class_acc_arr = np.diag(self.test_confusion_matrix) / np.sum(
self.test_confusion_matrix, axis=1
)
per_class_acc_arr_str = "\n" + "\n".join(
"{:}: {:.3f}".format(class_id, class_acc)
for class_id, class_acc in enumerate(per_class_acc_arr)
)
self.hparams.train_logger.log_stats(
{
"Epoch loaded": self.hparams.epoch_counter.current,
"\n Per Class Accuracy": per_class_acc_arr_str,
"\n Confusion Matrix": "\n{:}\n".format(
self.test_confusion_matrix
),
},
test_stats=test_stats,
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_audio_folder = hparams["audio_data_folder"]
config_sample_rate = hparams["sample_rate"]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# TODO use SB implementation but need to make sure it give the same results as PyTorch
# resampler = sb.processing.speech_augmentation.Resample(orig_freq=latest_file_sr, new_freq=config_sample_rate)
hparams["resampler"] = torchaudio.transforms.Resample(
new_freq=config_sample_rate
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "fold")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, fold):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
wave_file = data_audio_folder + "/fold{:}/{:}".format(fold, wav)
sig, read_sr = torchaudio.load(wave_file)
# If multi-channels, downmix it to a mono channel
sig = torch.squeeze(sig)
if len(sig.shape) > 1:
sig = torch.mean(sig, dim=0)
# Convert sample rate to required config_sample_rate
if read_sr != config_sample_rate:
# Re-initialize sampler if source file sample rate changed compared to last file
if read_sr != hparams["resampler"].orig_freq:
hparams["resampler"] = torchaudio.transforms.Resample(
orig_freq=read_sr, new_freq=config_sample_rate
)
# Resample audio
sig = hparams["resampler"].forward(sig)
return sig
# 3. Define label pipeline:
@sb.utils.data_pipeline.takes("class_string")
@sb.utils.data_pipeline.provides("class_string", "class_string_encoded")
def label_pipeline(class_string):
yield class_string
class_string_encoded = label_encoder.encode_label_torch(class_string)
yield class_string_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "class_string_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mappinng.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="class_string",
)
return datasets, label_encoder
if __name__ == "__main__":
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Tensorboard logging
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs_folder"]
)
run_on_main(
prepare_urban_sound_8k,
kwargs={
"data_folder": hparams["data_folder"],
"audio_data_folder": hparams["audio_data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"train_fold_nums": hparams["train_fold_nums"],
"valid_fold_nums": hparams["valid_fold_nums"],
"test_fold_nums": hparams["test_fold_nums"],
"skip_manifest_creation": hparams["skip_manifest_creation"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
datasets, label_encoder = dataio_prep(hparams)
hparams["label_encoder"] = label_encoder
class_labels = list(label_encoder.ind2lab.values())
print("Class Labels:", class_labels)
urban_sound_8k_brain = UrbanSound8kBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
urban_sound_8k_brain.fit(
epoch_counter=urban_sound_8k_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load the best checkpoint for evaluation
test_stats = urban_sound_8k_brain.evaluate(
test_set=datasets["test"],
min_key="error",
progressbar=True,
test_loader_kwargs=hparams["dataloader_options"],
)
| 16,914 | 36.175824 | 115 | py |
speechbrain | speechbrain-main/recipes/CommonLanguage/common_language_prepare.py | """
Data preparation of CommonLangauge dataset for LID.
Download: https://zenodo.org/record/5036977#.YNo1mHVKg5k
Author
------
Pavlo Ruban 2021
"""
import os
import csv
import logging
import torchaudio
from tqdm.contrib import tzip
from speechbrain.utils.data_utils import get_all_files
logger = logging.getLogger(__name__)
LANGUAGES = [
"Arabic",
"Basque",
"Breton",
"Catalan",
"Chinese_China",
"Chinese_Hongkong",
"Chinese_Taiwan",
"Chuvash",
"Czech",
"Dhivehi",
"Dutch",
"English",
"Esperanto",
"Estonian",
"French",
"Frisian",
"Georgian",
"German",
"Greek",
"Hakha_Chin",
"Indonesian",
"Interlingua",
"Italian",
"Japanese",
"Kabyle",
"Kinyarwanda",
"Kyrgyz",
"Latvian",
"Maltese",
"Mangolian",
"Persian",
"Polish",
"Portuguese",
"Romanian",
"Romansh_Sursilvan",
"Russian",
"Sakha",
"Slovenian",
"Spanish",
"Swedish",
"Tamil",
"Tatar",
"Turkish",
"Ukranian",
"Welsh",
]
def prepare_common_language(data_folder, save_folder, skip_prep=False):
"""
Prepares the csv files for the CommonLanguage dataset for LID.
Download: https://drive.google.com/uc?id=1Vzgod6NEYO1oZoz_EcgpZkUO9ohQcO1F
Arguments
---------
data_folder : str
Path to the folder where the CommonLanguage dataset for LID is stored.
This path should include the multi: /datasets/CommonLanguage
save_folder : str
The directory where to store the csv files.
max_duration : int, optional
Max duration (in seconds) of training uterances.
skip_prep: bool
If True, skip data preparation.
Example
-------
>>> from recipes.CommonLanguage.common_language_prepare import prepare_common_language
>>> data_folder = '/datasets/CommonLanguage'
>>> save_folder = 'exp/CommonLanguage_exp'
>>> prepare_common_language(\
data_folder,\
save_folder,\
skip_prep=False\
)
"""
if skip_prep:
return
# Setting the save folder
os.makedirs(save_folder, exist_ok=True)
# Setting ouput files
save_csv_train = os.path.join(save_folder, "train.csv")
save_csv_dev = os.path.join(save_folder, "dev.csv")
save_csv_test = os.path.join(save_folder, "test.csv")
# If csv already exists, we skip the data preparation
if skip(save_csv_train, save_csv_dev, save_csv_test):
csv_exists = " already exists, skipping data preparation!"
msg = save_csv_train + csv_exists
logger.info(msg)
msg = save_csv_dev + csv_exists
logger.info(msg)
msg = save_csv_test + csv_exists
logger.info(msg)
return
# Additional checks to make sure the data folder contains Common Language
check_common_language_folder(data_folder)
# Audio files extensions
extension = [".wav"]
# Create the signal list of train, dev, and test sets.
data_split = create_sets(data_folder, extension)
# Creating csv files for training, dev and test data
create_csv(wav_list=data_split["train"], csv_file=save_csv_train)
create_csv(wav_list=data_split["dev"], csv_file=save_csv_dev)
create_csv(wav_list=data_split["test"], csv_file=save_csv_test)
def skip(save_csv_train, save_csv_dev, save_csv_test):
"""
Detects if the CommonLanguage data preparation for LID has been already done.
If the preparation has been done, we can skip it.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
# Checking folders and save options
skip = (
os.path.isfile(save_csv_train)
and os.path.isfile(save_csv_dev)
and os.path.isfile(save_csv_test)
)
return skip
def create_sets(data_folder, extension):
"""
Creates lists for train, dev and test sets with data from the data_folder
Arguments
---------
data_folder : str
Path of the CommonLanguage dataset.
extension: list of file extentions
List of strings with file extentions that correspond to the audio files
in the CommonLanguage dataset
Returns
-------
dictionary containing train, dev, and test splits.
"""
# Datasets initialization
datasets = {"train", "dev", "test"}
data_split = {dataset: [] for dataset in datasets}
# Get the list of languages from the dataset folder
languages = [
name
for name in os.listdir(data_folder)
if os.path.isdir(os.path.join(data_folder, name))
and datasets.issubset(os.listdir(os.path.join(data_folder, name)))
]
msg = f"{len(languages)} languages detected!"
logger.info(msg)
# Fill the train, dev and test datasets with audio filenames
for language in languages:
for dataset in datasets:
curr_folder = os.path.join(data_folder, language, dataset)
wav_list = get_all_files(curr_folder, match_and=extension)
data_split[dataset].extend(wav_list)
msg = "Data successfully split!"
logger.info(msg)
return data_split
def create_csv(wav_list, csv_file):
"""
Creates the csv file given a list of wav files.
Arguments
---------
wav_list : list of str
The list of wav files.
csv_file : str
The path of the output json file
"""
# Adding some Prints
msg = f"Creating csv lists in {csv_file} ..."
logger.info(msg)
csv_lines = []
# Start processing lines
total_duration = 0.0
# Starting index
idx = 0
for wav_file in tzip(wav_list):
wav_file = wav_file[0]
path_parts = wav_file.split(os.path.sep)
file_name, wav_format = os.path.splitext(path_parts[-1])
# Peeking at the signal (to retrieve duration in seconds)
if os.path.isfile(wav_file):
info = torchaudio.info(wav_file)
else:
msg = "\tError loading: %s" % (str(len(file_name)))
logger.info(msg)
continue
audio_duration = info.num_frames / info.sample_rate
total_duration += audio_duration
# Actual name of the language
language = path_parts[-4]
# Create a row with whole utterences
csv_line = [
idx, # ID
wav_file, # File name
wav_format, # File format
str(info.num_frames / info.sample_rate), # Duration (sec)
language, # Language
]
# Adding this line to the csv_lines list
csv_lines.append(csv_line)
# Increment index
idx += 1
# CSV column titles
csv_header = ["ID", "wav", "wav_format", "duration", "language"]
# Add titles to the list at indexx 0
csv_lines.insert(0, csv_header)
# Writing the csv lines
with open(csv_file, mode="w", encoding="utf-8") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_lines:
csv_writer.writerow(line)
# Final prints
msg = f"{csv_file} sucessfully created!"
logger.info(msg)
msg = f"Number of samples: {len(wav_list)}."
logger.info(msg)
msg = f"Total duration: {round(total_duration / 3600, 2)} hours."
logger.info(msg)
def check_common_language_folder(data_folder):
"""
Check if the data folder actually contains the CommonLanguage dataset.
If not, raises an error.
Returns
-------
None
Raises
------
FileNotFoundError
If data folder doesn't contain at least two languages.
"""
# Checking if at least two languages are present in the data
if len(set(os.listdir(data_folder)) & set(LANGUAGES)) < 2:
err_msg = f"{data_folder} must have at least two languages from CommonLanguage in it."
raise FileNotFoundError(err_msg)
| 7,969 | 24.876623 | 94 | py |
speechbrain | speechbrain-main/recipes/CommonLanguage/lang_id/train.py | #!/usr/bin/env python3
import os
import sys
import torch
import logging
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from common_language_prepare import prepare_common_language
"""Recipe for training a LID system with CommonLanguage.
To run this recipe, do the following:
> python train.py hparams/train_ecapa_tdnn.yaml
Author
------
* Mirco Ravanelli 2021
* Pavlo Ruban 2021
"""
logger = logging.getLogger(__name__)
# Brain class for Language ID training
class LID(sb.Brain):
def prepare_features(self, wavs, stage):
"""Prepare the features for computation, including augmentation.
Arguments
---------
wavs : tuple
Input signals (tensor) and their relative lengths (tensor).
stage : sb.Stage
The current stage of training.
"""
wavs, lens = wavs
# Add augmentation if specified. In this version of augmentation, we
# concatenate the original and the augment batches in a single bigger
# batch. This is more memory-demanding, but helps to improve the
# performance. Change it if you run OOM.
if stage == sb.Stage.TRAIN:
wavs_noise = self.modules.env_corrupt(wavs, lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
lens = torch.cat([lens, lens], dim=0)
wavs = self.hparams.augmentation(wavs, lens)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm_input(feats, lens)
return feats, lens
def compute_forward(self, batch, stage):
"""Runs all the computation of that transforms the input into the
output probabilities over the N classes.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
predictions : Tensor
Tensor that contains the posterior probabilities over the N classes.
"""
# We first move the batch to the appropriate device.
batch = batch.to(self.device)
# Compute features, embeddings and output
feats, lens = self.prepare_features(batch.sig, stage)
embeddings = self.modules.embedding_model(feats)
outputs = self.modules.classifier(embeddings)
return outputs, lens
def compute_objectives(self, inputs, batch, stage):
"""Computes the loss given the predicted and targeted outputs.
Arguments
---------
inputs : tensors
The output tensors from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
predictions, lens = inputs
targets = batch.language_encoded.data
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN:
targets = torch.cat([targets, targets], dim=0)
lens = torch.cat([lens, lens], dim=0)
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
loss = self.hparams.compute_cost(predictions, targets)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(batch.id, predictions, targets, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
"error": self.error_metrics.summarize("average"),
}
# At the end of validation...
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(meta=stats, min_keys=["error"])
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def dataio_prep(hparams):
""" This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
We expect `prepare_common_language` to have been called before this,
so that the `train.csv`, `dev.csv`, and `test.csv` manifest files
are available.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Contains two keys, "train" and "dev" that correspond
to the appropriate DynamicItemDataset object.
"""
# Initialization of the label encoder. The label encoder assignes to each
# of the observed label a unique index (e.g, 'lang01': 0, 'lang02': 1, ..)
language_encoder = sb.dataio.encoder.CategoricalEncoder()
# Define audio pipeline
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
sig, _ = torchaudio.load(wav)
sig = sig.transpose(0, 1).squeeze(1)
return sig
# Define label pipeline:
@sb.utils.data_pipeline.takes("language")
@sb.utils.data_pipeline.provides("language", "language_encoded")
def label_pipeline(language):
yield language
language_encoded = language_encoder.encode_label_torch(language)
yield language_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
for dataset in ["train", "dev", "test"]:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams[f"{dataset}_csv"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "language_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mappinng.
language_encoder_file = os.path.join(
hparams["save_folder"], "language_encoder.txt"
)
language_encoder.load_or_create(
path=language_encoder_file,
from_didatasets=[datasets["train"]],
output_key="language",
)
return datasets, language_encoder
# Recipe begins!
if __name__ == "__main__":
# Reading command line arguments.
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training).
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides.
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Data preparation, to be run on only one process.
sb.utils.distributed.run_on_main(
prepare_common_language,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects "train", "dev", and "test" and language_encoder
datasets, language_encoder = dataio_prep(hparams)
# Fetch and laod pretrained modules
sb.utils.distributed.run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Initialize the Brain object to prepare for mask training.
lid_brain = LID(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
lid_brain.fit(
epoch_counter=lid_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["dev"],
train_loader_kwargs=hparams["train_dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Load the best checkpoint for evaluation
test_stats = lid_brain.evaluate(
test_set=datasets["test"],
min_key="error",
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 10,881 | 33.328076 | 80 | py |
speechbrain | speechbrain-main/recipes/Aishell1Mix/separation/dynamic_mixing.py | import speechbrain as sb
import numpy as np
import torch
import torchaudio
import glob
import os
from speechbrain.dataio.batch import PaddedBatch
from tqdm import tqdm
import warnings
import pyloudnorm
import random
"""
The functions to implement Dynamic Mixing For SpeechSeparation
Authors
* Samuele Cornell 2021
* Cem Subakan 2021
"""
def build_spk_hashtable_aishell1mix(hparams):
"""
This function builds a dictionary of speaker-utterance pairs to be used in dynamic mixing
"""
aishell1_utterances = glob.glob(
os.path.join(hparams["base_folder_dm"], "**/*.wav"), recursive=True
)
spk_hashtable = {}
# just for one file check if the sample rate is correct
assert (
torchaudio.info(aishell1_utterances[0]).sample_rate
== hparams["sample_rate"]
)
for utt in tqdm(aishell1_utterances):
path = os.path.normpath(utt)
path_list = path.split(os.sep)
spk_id = path_list[-2]
if spk_id not in spk_hashtable.keys():
spk_hashtable[spk_id] = [utt]
else:
spk_hashtable[spk_id].append(utt)
# calculate weights for each speaker ( len of list of utterances)
spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]
return spk_hashtable, spk_weights
def get_wham_noise_filenames(hparams):
"This function lists the WHAM! noise files to be used in dynamic mixing"
if "Aishell1" in hparams["data_folder"]:
# Data folder should point to Aishell1Mix2 folder
if hparams["sample_rate"] == 8000:
noise_path = "wav8k/min/train/noise/"
elif hparams["sample_rate"] == 16000:
noise_path = "wav16k/min/train/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
else:
if hparams["sample_rate"] == 8000:
noise_path = "wav8k/min/tr/noise/"
elif hparams["sample_rate"] == 16000:
noise_path = "wav16k/min/tr/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
noise_files = glob.glob(
os.path.join(hparams["data_folder"], noise_path, "*.wav")
)
return noise_files
def dynamic_mix_data_prep_aishell1mix(hparams):
"""
Dynamic mixing for Aishell1Mix
"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
# we build an dictionary where keys are speakers id and entries are list
# of utterances files of that speaker
print("Building the speaker hashtable for dynamic mixing")
spk_hashtable, spk_weights = build_spk_hashtable_aishell1mix(hparams)
spk_list = [x for x in spk_hashtable.keys()]
spk_weights = [x / sum(spk_weights) for x in spk_weights]
if hparams["use_wham_noise"]:
noise_files = get_wham_noise_filenames(hparams)
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides(
"mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"
)
def audio_pipeline(
mix_wav,
): # this is dummy --> it means one epoch will be same as without dynamic mixing
"""
This audio pipeline defines the compute graph for dynamic mixing
"""
speakers = np.random.choice(
spk_list, hparams["num_spks"], replace=False, p=spk_weights
)
if hparams["use_wham_noise"]:
noise_file = np.random.choice(noise_files, 1, replace=False)
noise, fs_read = torchaudio.load(noise_file[0])
noise = noise.squeeze()
# select two speakers randomly
sources = []
spk_files = [
np.random.choice(spk_hashtable[spk], 1, False)[0]
for spk in speakers
]
minlen = min(
*[torchaudio.info(x).num_frames for x in spk_files],
hparams["training_signal_len"],
)
meter = pyloudnorm.Meter(hparams["sample_rate"])
MAX_AMP = 0.9
MIN_LOUDNESS = -33
MAX_LOUDNESS = -25
def normalize(signal, is_noise=False):
"""
This function normalizes the audio signals for loudness
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
c_loudness = meter.integrated_loudness(signal)
if is_noise:
target_loudness = random.uniform(
MIN_LOUDNESS - 5, MAX_LOUDNESS - 5
)
else:
target_loudness = random.uniform(MIN_LOUDNESS, MAX_LOUDNESS)
signal = pyloudnorm.normalize.loudness(
signal, c_loudness, target_loudness
)
# check for clipping
if np.max(np.abs(signal)) >= 1:
signal = signal * MAX_AMP / np.max(np.abs(signal))
return torch.from_numpy(signal)
for i, spk_file in enumerate(spk_files):
# select random offset
length = torchaudio.info(spk_file).num_frames
start = 0
stop = length
if length > minlen: # take a random window
start = np.random.randint(0, length - minlen)
stop = start + minlen
tmp, fs_read = torchaudio.load(
spk_file, frame_offset=start, num_frames=stop - start,
)
tmp = tmp[0].numpy()
tmp = normalize(tmp)
sources.append(tmp)
sources = torch.stack(sources)
mixture = torch.sum(sources, 0)
if hparams["use_wham_noise"]:
len_noise = len(noise)
len_mix = len(mixture)
min_len = min(len_noise, len_mix)
noise = normalize(noise.numpy(), is_noise=True)
mixture = mixture[:min_len] + noise[:min_len]
# check for clipping
max_amp_insig = mixture.abs().max().item()
if max_amp_insig > MAX_AMP:
weight = MAX_AMP / max_amp_insig
else:
weight = 1
sources = weight * sources
mixture = weight * mixture
yield mixture
for i in range(hparams["num_spks"]):
yield sources[i]
# If the number of speakers is 2, yield None for the 3rd speaker
if hparams["num_spks"] == 2:
yield None
if hparams["use_wham_noise"]:
noise = noise * weight
yield noise
else:
yield None
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)
sb.dataio.dataset.set_output_keys(
[train_data],
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
train_data = torch.utils.data.DataLoader(
train_data,
batch_size=hparams["dataloader_opts"]["batch_size"],
num_workers=hparams["dataloader_opts"]["num_workers"],
collate_fn=PaddedBatch,
worker_init_fn=lambda x: np.random.seed(
int.from_bytes(os.urandom(4), "little") + x
),
)
return train_data
| 7,142 | 30.46696 | 93 | py |
speechbrain | speechbrain-main/recipes/Aishell1Mix/separation/train.py | #!/usr/bin/env/python3
"""Recipe for training a neural speech separation system on Aishell1Mix2/3 datasets.
The system employs an encoder, a decoder, and a masking network.
To run this recipe, do the following:
> python train.py hparams/sepformer-aishell1mix2.yaml
> python train.py hparams/sepformer-aishell1mix3.yaml
The experiment file is flexible enough to support different neural
networks. By properly changing the parameter files, you can try
different architectures. The script supports both aishell1mix2 and
aishell1mix3.
Authors
* Cem Subakan 2020
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Mirko Bronzi 2020
* Jianyuan Zhong 2020
"""
import os
import csv
import sys
import torch
import torchaudio
import numpy as np
from tqdm import tqdm
import speechbrain as sb
import torch.nn.functional as F
from torch.cuda.amp import autocast
import speechbrain.nnet.schedulers as schedulers
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
import logging
# from: recipes/LibriMix/separation/train.py
class Separation(sb.Brain):
def compute_forward(self, mix, targets, stage, noise=None):
"""Forward computations from the mixture to the separated signals."""
# Unpack lists and put tensors in the right device
mix, mix_lens = mix
mix, mix_lens = mix.to(self.device), mix_lens.to(self.device)
# Convert targets to tensor
targets = torch.cat(
[targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)],
dim=-1,
).to(self.device)
# Add speech distortions
if stage == sb.Stage.TRAIN:
with torch.no_grad():
if self.hparams.use_speedperturb or self.hparams.use_rand_shift:
mix, targets = self.add_speed_perturb(targets, mix_lens)
mix = targets.sum(-1)
if self.hparams.use_wham_noise:
noise = noise.to(self.device)
len_noise = noise.shape[1]
len_mix = mix.shape[1]
min_len = min(len_noise, len_mix)
# add the noise
mix = mix[:, :min_len] + noise[:, :min_len]
# fix the length of targets also
targets = targets[:, :min_len, :]
if self.hparams.use_wavedrop:
mix = self.hparams.wavedrop(mix, mix_lens)
if self.hparams.limit_training_signal_len:
mix, targets = self.cut_signals(mix, targets)
# Separation
mix_w = self.hparams.Encoder(mix)
est_mask = self.hparams.MaskNet(mix_w)
mix_w = torch.stack([mix_w] * self.hparams.num_spks)
sep_h = mix_w * est_mask
# Decoding
est_source = torch.cat(
[
self.hparams.Decoder(sep_h[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, 0, 0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin, :]
return est_source, targets
def compute_objectives(self, predictions, targets):
"""Computes the si-snr loss"""
return self.hparams.loss(targets, predictions)
def fit_batch(self, batch):
"""Trains one batch"""
# Unpacking batch list
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.use_wham_noise:
noise = batch.noise_sig[0]
else:
noise = None
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
if self.auto_mix_prec:
with autocast():
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
# hard threshold the easy dataitems
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
self.scaler.scale(loss).backward()
if self.hparams.clip_grad_norm >= 0:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm,
)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
else:
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
snt_id = batch.id
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(mixture, targets, stage)
loss = self.compute_objectives(predictions, targets)
# Manage audio file saving
if stage == sb.Stage.TEST and self.hparams.save_audio:
if hasattr(self.hparams, "n_audio_to_save"):
if self.hparams.n_audio_to_save > 0:
self.save_audio(snt_id[0], mixture, targets, predictions)
self.hparams.n_audio_to_save += -1
else:
self.save_audio(snt_id[0], mixture, targets, predictions)
return loss.mean().detach()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"si-snr": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def add_speed_perturb(self, targets, targ_lens):
"""Adds speed perturbation and random_shift to the input signals"""
min_len = -1
recombine = False
if self.hparams.use_speedperturb:
# Performing speed change (independently on each source)
new_targets = []
recombine = True
for i in range(targets.shape[-1]):
new_target = self.hparams.speedperturb(
targets[:, :, i], targ_lens
)
new_targets.append(new_target)
if i == 0:
min_len = new_target.shape[-1]
else:
if new_target.shape[-1] < min_len:
min_len = new_target.shape[-1]
if self.hparams.use_rand_shift:
# Performing random_shift (independently on each source)
recombine = True
for i in range(targets.shape[-1]):
rand_shift = torch.randint(
self.hparams.min_shift, self.hparams.max_shift, (1,)
)
new_targets[i] = new_targets[i].to(self.device)
new_targets[i] = torch.roll(
new_targets[i], shifts=(rand_shift[0],), dims=1
)
# Re-combination
if recombine:
if self.hparams.use_speedperturb:
targets = torch.zeros(
targets.shape[0],
min_len,
targets.shape[-1],
device=targets.device,
dtype=torch.float,
)
for i, new_target in enumerate(new_targets):
targets[:, :, i] = new_targets[i][:, 0:min_len]
mix = targets.sum(-1)
return mix, targets
def cut_signals(self, mixture, targets):
"""This function selects a random segment of a given length withing the mixture.
The corresponding targets are selected accordingly"""
randstart = torch.randint(
0,
1 + max(0, mixture.shape[1] - self.hparams.training_signal_len),
(1,),
).item()
targets = targets[
:, randstart : randstart + self.hparams.training_signal_len, :
]
mixture = mixture[
:, randstart : randstart + self.hparams.training_signal_len
]
return mixture, targets
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def save_results(self, test_data):
"""This script computes the SDR and SI-SNR metrics and saves
them into a csv file"""
# This package is required for SDR computation
from mir_eval.separation import bss_eval_sources
# Create folders where to store audio
save_file = os.path.join(self.hparams.output_folder, "test_results.csv")
# Variable init
all_sdrs = []
all_sdrs_i = []
all_sisnrs = []
all_sisnrs_i = []
csv_columns = ["snt_id", "sdr", "sdr_i", "si-snr", "si-snr_i"]
test_loader = sb.dataio.dataloader.make_dataloader(
test_data, **self.hparams.dataloader_opts
)
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop over all test sentence
with tqdm(test_loader, dynamic_ncols=True) as t:
for i, batch in enumerate(t):
# Apply Separation
mixture, mix_len = batch.mix_sig
snt_id = batch.id
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(
batch.mix_sig, targets, sb.Stage.TEST
)
# Compute SI-SNR
sisnr = self.compute_objectives(predictions, targets)
# Compute SI-SNR improvement
mixture_signal = torch.stack(
[mixture] * self.hparams.num_spks, dim=-1
)
mixture_signal = mixture_signal.to(targets.device)
sisnr_baseline = self.compute_objectives(
mixture_signal, targets
)
sisnr_i = sisnr - sisnr_baseline
# Compute SDR
sdr, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
predictions[0].t().detach().cpu().numpy(),
)
sdr_baseline, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
mixture_signal[0].t().detach().cpu().numpy(),
)
sdr_i = sdr.mean() - sdr_baseline.mean()
# Saving on a csv file
row = {
"snt_id": snt_id[0],
"sdr": sdr.mean(),
"sdr_i": sdr_i,
"si-snr": -sisnr.item(),
"si-snr_i": -sisnr_i.item(),
}
writer.writerow(row)
# Metric Accumulation
all_sdrs.append(sdr.mean())
all_sdrs_i.append(sdr_i.mean())
all_sisnrs.append(-sisnr.item())
all_sisnrs_i.append(-sisnr_i.item())
row = {
"snt_id": "avg",
"sdr": np.array(all_sdrs).mean(),
"sdr_i": np.array(all_sdrs_i).mean(),
"si-snr": np.array(all_sisnrs).mean(),
"si-snr_i": np.array(all_sisnrs_i).mean(),
}
writer.writerow(row)
logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean()))
logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean()))
logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean()))
logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean()))
def save_audio(self, snt_id, mixture, targets, predictions):
"saves the test audio (mixture, targets, and estimated sources) on disk"
# Create outout folder
save_path = os.path.join(self.hparams.save_folder, "audio_results")
if not os.path.exists(save_path):
os.mkdir(save_path)
for ns in range(self.hparams.num_spks):
# Estimated source
signal = predictions[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Original source
signal = targets[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Mixture
signal = mixture[0][0, :]
signal = signal / signal.abs().max()
save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id))
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# from: recipes/LibriMix/separation/train.py
def dataio_prep(hparams):
"""Creates data processing pipeline"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder_nspks"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"],
replacements={"data_root": hparams["data_folder_nspks"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"],
replacements={"data_root": hparams["data_folder_nspks"]},
)
datasets = [train_data, valid_data, test_data]
# 2. Provide audio pipelines
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides("mix_sig")
def audio_pipeline_mix(mix_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
return mix_sig
@sb.utils.data_pipeline.takes("s1_wav")
@sb.utils.data_pipeline.provides("s1_sig")
def audio_pipeline_s1(s1_wav):
s1_sig = sb.dataio.dataio.read_audio(s1_wav)
return s1_sig
@sb.utils.data_pipeline.takes("s2_wav")
@sb.utils.data_pipeline.provides("s2_sig")
def audio_pipeline_s2(s2_wav):
s2_sig = sb.dataio.dataio.read_audio(s2_wav)
return s2_sig
if hparams["num_spks"] == 3:
@sb.utils.data_pipeline.takes("s3_wav")
@sb.utils.data_pipeline.provides("s3_sig")
def audio_pipeline_s3(s3_wav):
s3_sig = sb.dataio.dataio.read_audio(s3_wav)
return s3_sig
if hparams["use_wham_noise"]:
@sb.utils.data_pipeline.takes("noise_wav")
@sb.utils.data_pipeline.provides("noise_sig")
def audio_pipeline_noise(noise_wav):
noise_sig = sb.dataio.dataio.read_audio(noise_wav)
return noise_sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2)
if hparams["num_spks"] == 3:
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s3)
if hparams["use_wham_noise"]:
print("Using the WHAM! noise in the data pipeline")
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise)
if (hparams["num_spks"] == 2) and hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "noise_sig"]
)
elif (hparams["num_spks"] == 3) and hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
elif (hparams["num_spks"] == 2) and not hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig"]
)
else:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig"]
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
run_opts["auto_mix_prec"] = hparams["auto_mix_prec"]
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Logger info
logger = logging.getLogger(__name__)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Check if storage folder for dataset exists
if not hparams["data_folder"]:
print("Please, specify a valid data_folder for dataset storage")
sys.exit(1)
# Data preparation
from prepare_data import prepare_aishell1mix
run_on_main(
prepare_aishell1mix,
kwargs={
"datapath": hparams["data_folder"],
"savepath": hparams["save_folder"],
"n_spks": hparams["num_spks"],
"skip_prep": hparams["skip_prep"],
"aishell1mix_addnoise": hparams["use_wham_noise"],
"fs": hparams["sample_rate"],
"datafreqs": hparams["data_freqs"],
"datamodes": hparams["data_modes"],
},
)
# Create dataset objects
if hparams["dynamic_mixing"]:
from dynamic_mixing import (
dynamic_mix_data_prep_aishell1mix as dynamic_mix_data_prep,
)
# if the base_folder for dm is not processed, preprocess them
if "processed" not in hparams["base_folder_dm"]:
# if the processed folder already exists we just use it otherwise we do the preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
):
from recipes.Aishell1Mix.meta.preprocess_dynamic_mixing import (
resample_folder,
)
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm"],
"output_folder": os.path.normpath(
hparams["base_folder_dm"]
)
+ "_processed",
"fs": hparams["sample_rate"],
"regex": "**/*.wav",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
# Colleting the hparams for dynamic batching
dm_hparams = {
"train_data": hparams["train_data"],
"data_folder": hparams["data_folder_nspks"],
"base_folder_dm": hparams["base_folder_dm"],
"sample_rate": hparams["sample_rate"],
"num_spks": hparams["num_spks"],
"training_signal_len": hparams["training_signal_len"],
"dataloader_opts": hparams["dataloader_opts"],
}
train_data = dynamic_mix_data_prep(dm_hparams)
_, valid_data, test_data = dataio_prep(hparams)
else:
train_data, valid_data, test_data = dataio_prep(hparams)
# Load pretrained model if pretrained_separator is present in the yaml
if "pretrained_separator" in hparams:
run_on_main(hparams["pretrained_separator"].collect_files)
hparams["pretrained_separator"].load_collected()
# Brain class initialization
separator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# re-initialize the parameters if we don't use a pretrained model
if "pretrained_separator" not in hparams:
for module in separator.modules.values():
separator.reset_layer_recursively(module)
if not hparams["test_only"]:
# Training
separator.fit(
separator.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Eval
separator.evaluate(test_data, min_key="si-snr")
separator.save_results(test_data)
| 25,295 | 36.090909 | 108 | py |
speechbrain | speechbrain-main/recipes/Aishell1Mix/meta/preprocess_dynamic_mixing.py | """
This script allows to resample a folder which contains audio files.
The files are parsed recursively. An exact copy of the folder is created,
with same structure but contained resampled audio files.
Resampling is performed by using sox through torchaudio.
Author
------
Samuele Cornell, 2020
"""
import os
import argparse
from pathlib import Path
import tqdm
import torchaudio
import glob
# from oct2py import octave
from scipy import signal
import numpy as np
import torch
parser = argparse.ArgumentParser(
"utility for resampling all audio files in a folder recursively"
"It --input_folder to --output_folder and "
"resamples all audio files with specified format to --fs."
)
parser.add_argument("--input_folder", type=str, required=True)
parser.add_argument("--output_folder", type=str, required=True)
parser.add_argument(
"--fs", type=str, default=8000, help="this is the target sampling frequency"
)
parser.add_argument("--regex", type=str, default="**/*.wav")
def resample_folder(input_folder, output_folder, fs, regex):
"""Resamples the wav files within an input folder.
Arguments
---------
input_folder : path
Path of the folder to resample.
output_folder : path
Path of the output folder with the resampled data.
fs : int
Target sampling frequency.
reg_exp: str
Regular expression for search.
"""
# filedir = os.path.dirname(os.path.realpath(__file__))
# octave.addpath(filedir)
# add the matlab functions to octave dir here
files = glob.glob(os.path.join(input_folder, regex), recursive=True)
for f in tqdm.tqdm(files):
audio, fs_read = torchaudio.load(f)
audio = audio[0].numpy()
audio = signal.resample_poly(audio, fs, fs_read)
if len(audio) == 0:
continue
# tmp = octave.activlev(audio.tolist(), fs, "n")
# audio, _ = tmp[:-1].squeeze(), tmp[-1]
peak = np.max(np.abs(audio))
audio = audio / peak
audio = torch.from_numpy(audio).float()
relative_path = os.path.join(
Path(f).relative_to(Path(input_folder)).parent,
Path(f).relative_to(Path(input_folder)).stem
+ "_peak_{}.wav".format(peak),
)
os.makedirs(
Path(
os.path.join(
output_folder, Path(f).relative_to(Path(input_folder))
)
).parent,
exist_ok=True,
)
torchaudio.save(
os.path.join(output_folder, relative_path),
audio.reshape(1, -1),
fs,
)
if __name__ == "__main__":
args = parser.parse_args()
resample_folder(
args.input_folder, args.output_folder, int(args.fs), args.regex
)
| 2,781 | 27.10101 | 80 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/librispeech_prepare.py | """
Data preparation.
Download: http://www.openslr.org/12
Author
------
Mirco Ravanelli, Ju-Chieh Chou, Loren Lugosch 2020
"""
import os
import csv
import random
from collections import Counter
import logging
import torchaudio
from tqdm.contrib import tzip
from speechbrain.utils.data_utils import download_file, get_all_files
from speechbrain.dataio.dataio import (
load_pkl,
save_pkl,
merge_csvs,
)
logger = logging.getLogger(__name__)
OPT_FILE = "opt_librispeech_prepare.pkl"
SAMPLERATE = 16000
def prepare_librispeech(
data_folder,
save_folder,
tr_splits=[],
dev_splits=[],
te_splits=[],
select_n_sentences=None,
merge_lst=[],
merge_name=None,
create_lexicon=False,
skip_prep=False,
):
"""
This class prepares the csv files for the LibriSpeech dataset.
Download link: http://www.openslr.org/12
Arguments
---------
data_folder : str
Path to the folder where the original LibriSpeech dataset is stored.
tr_splits : list
List of train splits to prepare from ['test-others','train-clean-100',
'train-clean-360','train-other-500'].
dev_splits : list
List of dev splits to prepare from ['dev-clean','dev-others'].
te_splits : list
List of test splits to prepare from ['test-clean','test-others'].
save_folder : str
The directory where to store the csv files.
select_n_sentences : int
Default : None
If not None, only pick this many sentences.
merge_lst : list
List of librispeech splits (e.g, train-clean, train-clean-360,..) to
merge in a singe csv file.
merge_name: str
Name of the merged csv file.
create_lexicon: bool
If True, it outputs csv files containing mapping between grapheme
to phonemes. Use it for training a G2P system.
skip_prep: bool
If True, data preparation is skipped.
Example
-------
>>> data_folder = 'datasets/LibriSpeech'
>>> tr_splits = ['train-clean-100']
>>> dev_splits = ['dev-clean']
>>> te_splits = ['test-clean']
>>> save_folder = 'librispeech_prepared'
>>> prepare_librispeech(data_folder, save_folder, tr_splits, dev_splits, te_splits)
"""
if skip_prep:
return
data_folder = data_folder
splits = tr_splits + dev_splits + te_splits
save_folder = save_folder
select_n_sentences = select_n_sentences
conf = {
"select_n_sentences": select_n_sentences,
}
# Other variables
# Saving folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
save_opt = os.path.join(save_folder, OPT_FILE)
# Check if this phase is already done (if so, skip it)
if skip(splits, save_folder, conf):
logger.info("Skipping preparation, completed in previous run.")
return
else:
logger.info("Data_preparation...")
# Additional checks to make sure the data folder contains Librispeech
check_librispeech_folders(data_folder, splits)
# create csv files for each split
all_texts = {}
for split_index in range(len(splits)):
split = splits[split_index]
wav_lst = get_all_files(
os.path.join(data_folder, split), match_and=[".flac"]
)
text_lst = get_all_files(
os.path.join(data_folder, split), match_and=["trans.txt"]
)
text_dict = text_to_dict(text_lst)
all_texts.update(text_dict)
if select_n_sentences is not None:
n_sentences = select_n_sentences[split_index]
else:
n_sentences = len(wav_lst)
create_csv(
save_folder, wav_lst, text_dict, split, n_sentences,
)
# Merging csv file if needed
if merge_lst and merge_name is not None:
merge_files = [split_libri + ".csv" for split_libri in merge_lst]
merge_csvs(
data_folder=save_folder, csv_lst=merge_files, merged_csv=merge_name,
)
# Create lexicon.csv and oov.csv
if create_lexicon:
create_lexicon_and_oov_csv(all_texts, data_folder, save_folder)
# saving options
save_pkl(conf, save_opt)
def create_lexicon_and_oov_csv(all_texts, data_folder, save_folder):
"""
Creates lexicon csv files useful for training and testing a
grapheme-to-phoneme (G2P) model.
Arguments
---------
all_text : dict
Dictionary containing text from the librispeech transcriptions
data_folder : str
Path to the folder where the original LibriSpeech dataset is stored.
save_folder : str
The directory where to store the csv files.
Returns
-------
None
"""
# If the lexicon file does not exist, download it
lexicon_url = "http://www.openslr.org/resources/11/librispeech-lexicon.txt"
lexicon_path = os.path.join(save_folder, "librispeech-lexicon.txt")
if not os.path.isfile(lexicon_path):
logger.info(
"Lexicon file not found. Downloading from %s." % lexicon_url
)
download_file(lexicon_url, lexicon_path)
# Get list of all words in the transcripts
transcript_words = Counter()
for key in all_texts:
transcript_words.update(all_texts[key].split("_"))
# Get list of all words in the lexicon
lexicon_words = []
lexicon_pronunciations = []
with open(lexicon_path, "r") as f:
lines = f.readlines()
for line in lines:
word = line.split()[0]
pronunciation = line.split()[1:]
lexicon_words.append(word)
lexicon_pronunciations.append(pronunciation)
# Create lexicon.csv
header = "ID,duration,char,phn\n"
lexicon_csv_path = os.path.join(save_folder, "lexicon.csv")
with open(lexicon_csv_path, "w") as f:
f.write(header)
for idx in range(len(lexicon_words)):
separated_graphemes = [c for c in lexicon_words[idx]]
duration = len(separated_graphemes)
graphemes = " ".join(separated_graphemes)
pronunciation_no_numbers = [
p.strip("0123456789") for p in lexicon_pronunciations[idx]
]
phonemes = " ".join(pronunciation_no_numbers)
line = (
",".join([str(idx), str(duration), graphemes, phonemes]) + "\n"
)
f.write(line)
logger.info("Lexicon written to %s." % lexicon_csv_path)
# Split lexicon.csv in train, validation, and test splits
split_lexicon(save_folder, [98, 1, 1])
def split_lexicon(data_folder, split_ratio):
"""
Splits the lexicon.csv file into train, validation, and test csv files
Arguments
---------
data_folder : str
Path to the folder containing the lexicon.csv file to split.
split_ratio : list
List containing the training, validation, and test split ratio. Set it
to [80, 10, 10] for having 80% of material for training, 10% for valid,
and 10 for test.
Returns
-------
None
"""
# Reading lexicon.csv
lexicon_csv_path = os.path.join(data_folder, "lexicon.csv")
with open(lexicon_csv_path, "r") as f:
lexicon_lines = f.readlines()
# Remove header
lexicon_lines = lexicon_lines[1:]
# Shuffle entries
random.shuffle(lexicon_lines)
# Selecting lines
header = "ID,duration,char,phn\n"
tr_snts = int(0.01 * split_ratio[0] * len(lexicon_lines))
train_lines = [header] + lexicon_lines[0:tr_snts]
valid_snts = int(0.01 * split_ratio[1] * len(lexicon_lines))
valid_lines = [header] + lexicon_lines[tr_snts : tr_snts + valid_snts]
test_lines = [header] + lexicon_lines[tr_snts + valid_snts :]
# Saving files
with open(os.path.join(data_folder, "lexicon_tr.csv"), "w") as f:
f.writelines(train_lines)
with open(os.path.join(data_folder, "lexicon_dev.csv"), "w") as f:
f.writelines(valid_lines)
with open(os.path.join(data_folder, "lexicon_test.csv"), "w") as f:
f.writelines(test_lines)
def create_csv(
save_folder, wav_lst, text_dict, split, select_n_sentences,
):
"""
Create the dataset csv file given a list of wav files.
Arguments
---------
save_folder : str
Location of the folder for storing the csv.
wav_lst : list
The list of wav files of a given data split.
text_dict : list
The dictionary containing the text of each sentence.
split : str
The name of the current data split.
select_n_sentences : int, optional
The number of sentences to select.
Returns
-------
None
"""
# Setting path for the csv file
csv_file = os.path.join(save_folder, split + ".csv")
if os.path.exists(csv_file):
logger.info("Csv file %s already exists, not recreating." % csv_file)
return
# Preliminary prints
msg = "Creating csv lists in %s..." % (csv_file)
logger.info(msg)
csv_lines = [["ID", "duration", "wav", "spk_id", "wrd"]]
snt_cnt = 0
# Processing all the wav files in wav_lst
for wav_file in tzip(wav_lst):
wav_file = wav_file[0]
snt_id = wav_file.split("/")[-1].replace(".flac", "")
spk_id = "-".join(snt_id.split("-")[0:2])
wrds = text_dict[snt_id]
signal, fs = torchaudio.load(wav_file)
signal = signal.squeeze(0)
duration = signal.shape[0] / SAMPLERATE
csv_line = [
snt_id,
str(duration),
wav_file,
spk_id,
str(" ".join(wrds.split("_"))),
]
# Appending current file to the csv_lines list
csv_lines.append(csv_line)
snt_cnt = snt_cnt + 1
if snt_cnt == select_n_sentences:
break
# Writing the csv_lines
with open(csv_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_lines:
csv_writer.writerow(line)
# Final print
msg = "%s successfully created!" % (csv_file)
logger.info(msg)
def skip(splits, save_folder, conf):
"""
Detect when the librispeech data prep can be skipped.
Arguments
---------
splits : list
A list of the splits expected in the preparation.
save_folder : str
The location of the seave directory
conf : dict
The configuration options to ensure they haven't changed.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
# Checking csv files
skip = True
for split in splits:
if not os.path.isfile(os.path.join(save_folder, split + ".csv")):
skip = False
# Checking saved options
save_opt = os.path.join(save_folder, OPT_FILE)
if skip is True:
if os.path.isfile(save_opt):
opts_old = load_pkl(save_opt)
if opts_old == conf:
skip = True
else:
skip = False
else:
skip = False
return skip
def text_to_dict(text_lst):
"""
This converts lines of text into a dictionary-
Arguments
---------
text_lst : str
Path to the file containing the librispeech text transcription.
Returns
-------
dict
The dictionary containing the text transcriptions for each sentence.
"""
# Initialization of the text dictionary
text_dict = {}
# Reading all the transcription files is text_lst
for file in text_lst:
with open(file, "r") as f:
# Reading all line of the transcription file
for line in f:
line_lst = line.strip().split(" ")
text_dict[line_lst[0]] = "_".join(line_lst[1:])
return text_dict
def check_librispeech_folders(data_folder, splits):
"""
Check if the data folder actually contains the LibriSpeech dataset.
If it does not, an error is raised.
Returns
-------
None
Raises
------
OSError
If LibriSpeech is not found at the specified path.
"""
# Checking if all the splits exist
for split in splits:
split_folder = os.path.join(data_folder, split)
if not os.path.exists(split_folder):
err_msg = (
"the folder %s does not exist (it is expected in the "
"Librispeech dataset)" % split_folder
)
raise OSError(err_msg)
| 12,515 | 27.905312 | 87 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/LM/train.py | #!/usr/bin/env python3
"""Recipe for training a Language Model with librispeech train-960 transcript and lm_corpus.
To run this recipe, do the following:
> pip install datasets
> python train.py hparams/<hparam_file>.yaml --data_folder <local_path_to_librispeech_dataset>
Authors
* Jianyuan Zhong 2021
* Ju-Chieh Chou 2020
"""
import os
import sys
import logging
import glob
import torch
from datasets import load_dataset
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class LM(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the sentence batches to the output probabilities."""
batch = batch.to(self.device)
tokens_bos, _ = batch.tokens_bos
logits = self.hparams.model(tokens_bos)
pred = self.hparams.log_softmax(logits)
return pred
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given predictions and targets."""
batch = batch.to(self.device)
tokens_eos, tokens_len = batch.tokens_eos
loss = self.hparams.compute_cost(
predictions, tokens_eos, length=tokens_len
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
(loss / self.hparams.accu_steps).backward()
if self.step % self.hparams.accu_steps == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
if isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
) or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
):
self.hparams.lr_annealing(self.optimizer)
return loss
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
if not (
isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
)
or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
)
):
old_lr, new_lr = self.hparams.lr_annealing(stage_loss)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
else:
old_lr = self.hparams.lr_annealing.current_lr
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta=stage_stats, min_keys=["loss"],
)
def dataio_prepare(hparams):
"""grap all the .txt files for transcripts"""
logging.info("generating datasets...")
data_folder = hparams["data_folder"]
train_transcripts = glob.glob(
os.path.join(data_folder, hparams["train_transcripts_pattern"]),
recursive=True,
)
dev_transcripts = glob.glob(
os.path.join(data_folder, hparams["dev_transcripts_pattern"]),
recursive=True,
)
test_transcripts = glob.glob(
os.path.join(data_folder, hparams["test_transcripts_pattern"]),
recursive=True,
)
"""prepare data and generate datasets"""
datasets = load_dataset(
"dataset.py",
lm_corpus_path=hparams["lm_corpus_path"],
data_files={
"train": train_transcripts,
"dev": dev_transcripts,
"test": test_transcripts,
},
)
train_data, valid_data, test_data = (
datasets["train"],
datasets["dev"],
datasets["test"],
)
"""convert huggingface's dataset to DynamicItemDataset via a magical function"""
train_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
train_data
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
valid_data
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
test_data
)
datasets = [train_data, valid_data, test_data]
tokenizer = hparams["tokenizer"]
"""Define text pipeline"""
# TODO: implement text augmentations pipelines
@sb.utils.data_pipeline.takes("text")
@sb.utils.data_pipeline.provides("text", "tokens_bos", "tokens_eos")
def text_pipeline(text):
yield text
tokens_list = tokenizer.encode_as_ids(text)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "text", "tokens_bos", "tokens_eos"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# here we create the dataloader objects as well as tokenization and encoding
train_data, valid_data, test_data = dataio_prepare(hparams)
# We download the tokenizer from HuggingFace (or elsewhere depending on
# the path given in the YAML file).
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
lm_brain = LM(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
lm_brain.fit(
lm_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# evaluation
test_stats = lm_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 7,083 | 31.645161 | 94 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/G2P/evaluate.py | """Recipe for evaluating a grapheme-to-phoneme system with librispeech lexicon.
The script may be use in isolation or in combination with Orion to fit
hyperparameters that do not require model retraining (e.g. Beam Search)
"""
from hyperpyyaml import load_hyperpyyaml
from speechbrain.dataio.batch import PaddedBatch
from speechbrain.lobes.models.g2p.dataio import get_sequence_key
from speechbrain.utils import hpopt as hp
from speechbrain.wordemb.util import expand_to_chars
from train import dataio_prep, load_dependencies
from types import SimpleNamespace
from tqdm.auto import tqdm
import math
import itertools
import speechbrain as sb
import torch
import sys
import logging
logger = logging.getLogger(__name__)
class G2PEvaluator:
"""The G2P model evaluation wrapper
Arguments
---------
hparams: dict
the dictionary from a parsed hyperparameter file
device: str
the device identifier
model_state: dict
a pre-loaded model state for a "warm start" if applicable
- could be useful if hyperparameters have changed, but
the same model can be reused from one run to the next
"""
def __init__(self, hparams, device, model_state=None):
self.hparams = SimpleNamespace(**hparams)
self.overrides = overrides
self.device = device
self.modules = torch.nn.ModuleDict(self.hparams.modules).to(self.device)
beam_searcher = (
self.hparams.beam_searcher_lm
if self.hparams.use_language_model
else self.hparams.beam_searcher
)
self.beam_searcher = beam_searcher.to(self.device)
if model_state:
self.hparams.model.load_state_dict(model_state)
else:
self.load()
self.grapheme_sequence_mode = getattr(
self.hparams, "grapheme_sequence_mode", "bos"
)
self.grapheme_key = get_sequence_key(
key="grapheme_encoded", mode=self.grapheme_sequence_mode
)
self.modules["model"].eval()
self._word_separator = None
self._bos = torch.tensor(
self.hparams.bos_index, device=device
).unsqueeze(-1)
self._eos = torch.tensor(
self.hparams.eos_index, device=device
).unsqueeze(-1)
# When reconstructing sentences word-wise, the process depends
# on whether spaces are preserved or omitted, as controlled by
# the phonemes_enable_space hyperparameter
self._flatten_results = (
self._flatten_results_separated
if getattr(self.hparams, "phonemes_enable_space", None)
else self._flatten_results_jumbled
)
self._grapheme_word_separator_idx = None
if self.hparams.use_word_emb:
self.modules.word_emb = self.hparams.word_emb().to(self.device)
def load(self):
"""Loads a model from a checkpoint"""
checkpointer = self.hparams.checkpointer
ckpt = checkpointer.recover_if_possible(
device=torch.device(self.device),
importance_key=lambda ckpt: -ckpt.meta.get("PER", -100.0),
ckpt_predicate=lambda ckpt: ckpt.meta["step"]
== self.hparams.eval_ckpt_step,
)
if ckpt:
logger.info("Loaded checkpoint with metadata %s", ckpt.meta)
else:
raise ValueError(
f"Checkpoint not found for training step {self.hparams.eval_train_step}"
)
return ckpt
def evaluate_batch(self, batch):
"""
Evaluates the G2P model
Arguments
---------
batch: PaddedBatch
A single batch of data, same as the kind of batch used
for G2P training
"""
batch = batch.to(self.device)
grapheme_encoded = getattr(batch, self.grapheme_key)
if self.hparams.eval_mode == "sentence":
hyps, scores = self._get_phonemes(grapheme_encoded, char=batch.char)
elif self.hparams.eval_mode == "word":
hyps, scores = self._get_phonemes_wordwise(batch.grapheme_encoded)
else:
raise ValueError(f"unsupported eval_mode {self.hparams.eval_mode}")
ids = batch.sample_id
phns, phn_lens = batch.phn_encoded
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.hparams.out_phoneme_decoder,
)
def _get_phonemes(self, grapheme_encoded, phn_encoded=None, char=None):
"""Runs the model and the beam search to retrieve the phoneme sequence
corresponding to the provided grapheme sequence
Arguments
---------
grapheme_encoded: speechbrain.dataio.batch.PaddedData
An encoded grapheme sequence
phn_encoded_bos: speechbrain.dataio.batch.PaddedData
An encoded phoneme sequence (optional)
char: str
Raw character input (needed for word embeddings)
Returns
-------
hyps: list
the hypotheses (the beam search result)
scores: list
the scores corresponding to the hypotheses
"""
_, char_word_emb = None, None
if self._grapheme_word_separator_idx is None:
self._grapheme_word_separator_idx = self.hparams.grapheme_encoder.lab2ind[
" "
]
if not phn_encoded:
grapheme_encoded_data, grapheme_lens = grapheme_encoded
phn_encoded = (
torch.ones(len(grapheme_encoded_data), 1).to(
grapheme_encoded_data.device
)
* self.hparams.bos_index,
torch.ones(len(grapheme_encoded_data)).to(
grapheme_encoded_data.device
),
)
char_word_emb = self._apply_word_embeddings(grapheme_encoded, char)
p_seq, char_lens, encoder_out, _ = self.modules.model(
grapheme_encoded=grapheme_encoded,
phn_encoded=phn_encoded,
word_emb=char_word_emb,
)
return self.beam_searcher(encoder_out, char_lens)
def _apply_word_embeddings(self, grapheme_encoded, char):
char_word_emb = None
if self.hparams.use_word_emb:
grapheme_encoded_data, grapheme_lens = grapheme_encoded
word_emb = self.modules.word_emb.batch_embeddings(char)
char_word_emb = expand_to_chars(
emb=word_emb,
seq=grapheme_encoded_data,
seq_len=grapheme_lens,
word_separator=self._grapheme_word_separator_idx,
)
return char_word_emb
def _get_phonemes_wordwise(self, grapheme_encoded):
"""Retrieves the phoneme sequence corresponding to the provided grapheme
sequence in a word-wise manner (running the evaluator for each word separately)
Arguments
---------
grapheme_encoded: speechbrain.dataio.batch.PaddedData
An encoded grapheme sequence
Returns
-------
hyps: list
the hypotheses (the beam search result)
scores: list
the scores corresponding to the hypotheses
"""
if self.hparams.use_word_emb:
raise NotImplementedError(
"Wordwise evaluation is not supported with word embeddings"
)
if self._word_separator is None:
self._word_separator = self.hparams.phoneme_encoder.lab2ind[" "]
hyps, scores = [], []
for grapheme_item, grapheme_len in zip(
grapheme_encoded.data, grapheme_encoded.lengths
):
words_batch = self._split_words_batch(grapheme_item, grapheme_len)
item_hyps, item_scores = self._get_phonemes(
words_batch.grapheme_encoded
)
hyps.append(self._flatten_results(item_hyps))
scores.append(self._flatten_scores(item_hyps, item_scores))
return hyps, scores
def _flatten_results_jumbled(self, results):
"""Flattens a sequence of results into a single sequence of tokens -
used when spaces are preserved in the phoneme space
Arguments
---------
results: iterable
a two-dimensional result
Returns
-------
result: list
the concatenated reuslt
"""
return [token for item_result in results for token in item_result]
def _flatten_results_separated(self, results):
"""Flattens a sequence of words, inserting word separators between them -
used when word separators are preserved in the phoneme space
Arguments
---------
results: iterable
a two-dimensional result
Returns
-------
result: list
the concatenated reuslt
"""
result = []
for item_result in results:
for token in item_result:
result.append(token)
if item_result and item_result[-1] != self._word_separator:
result.append(self._word_separator)
del result[-1]
return result
def _flatten_scores(self, hyps, scores):
"""Flattens an array of scores, using a weighted average of the scores of
individual words, by word length
Arguments
---------
hyps: list
the hypotheses (the beam search result)
scores: list
the scores corresponding to the hypotheses
Results
-------
scores: list
the scores corresponding to the hypotheses,
merged
"""
seq_len = sum(len(word_hyp) for word_hyp in hyps)
return (
sum(
word_score * len(word_hyp)
for word_hyp, word_score in zip(hyps, scores)
)
/ seq_len
)
def _split_words_batch(self, graphemes, length):
return PaddedBatch(
[
{"grapheme_encoded": word}
for word in self._split_words_seq(graphemes, length)
]
).to(self.device)
def _split_words_seq(self, graphemes, length):
"""Splits the provided grapheme sequence into words
Arguments
---------
graphemes: torch.Tensor
an encoded sequence of phonemes
Returns
-------
graphemes: generator
a generator representing a sequence of words
"""
space_index = self.hparams.graphemes.index(" ")
(word_boundaries,) = torch.where(graphemes == space_index)
last_word_boundary = 0
for word_boundary in word_boundaries:
yield self._add_delimiters(
graphemes[last_word_boundary + 1 : word_boundary]
)
last_word_boundary = word_boundary
char_length = math.ceil(len(graphemes) * length)
if last_word_boundary < char_length:
yield self._add_delimiters(
graphemes[last_word_boundary + 1 : char_length]
)
def _add_delimiters(self, word):
"""Adds the required delimiter characters to a word
Arguments
---------
word: torch.Tensor
a tensor representing a word
"""
if self.grapheme_sequence_mode == "bos":
word = torch.cat([self._bos, word])
elif self.grapheme_sequence_mode == "eos":
word = torch.cat([word, self._eos])
return word
def evaluate_epoch(self, dataset, dataloader_opts=None):
"""
Evaluates a single epoch
Arguments
---------
dataset: DynamicItemDataset
a G2P dataset (same as the ones used for training)
Returns
-------
metrics: dict
Raw PER metrics
"""
logger.info("Beginning evaluation")
with torch.no_grad():
self.per_metrics = self.hparams.per_stats()
dataloader = sb.dataio.dataloader.make_dataloader(
dataset,
**dict(
dataloader_opts or {},
shuffle=True,
batch_size=self.hparams.eval_batch_size,
),
)
dataloader_it = iter(dataloader)
if self.hparams.eval_batch_count is not None:
dataloader_it = itertools.islice(
dataloader_it, 0, self.hparams.eval_batch_count
)
batch_count = self.hparams.eval_batch_count
else:
batch_count = math.ceil(
len(dataset) / self.hparams.eval_batch_size
)
for batch in tqdm(dataloader_it, total=batch_count):
self.evaluate_batch(batch)
if self.hparams.eval_output_wer_file:
self._output_wer_file()
return self.per_metrics.summarize()
def _output_wer_file(self):
with open(self.hparams.eval_wer_file, "w") as w:
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"seq2seq, and PER stats written to file",
self.hparams.eval_wer_file,
)
if __name__ == "__main__":
# CLI:
with hp.hyperparameter_optimization(objective_key="error_rate") as hp_ctx:
# Parse the hyperparameter file
search_hparam_file = sys.argv[0]
hparams_file, run_opts, overrides = hp_ctx.parse_arguments(sys.argv[1:])
device = run_opts.get("device", "cpu")
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Load dependencies
if hparams.get("use_language_model"):
load_dependencies(hparams, run_opts)
# Run the evaluation
evaluator = G2PEvaluator(hparams, device)
# Some configurations involve curriculum training on
# multiple steps. Load the dataset configuration for the
# step specified in the eval_train_step hyperparameter
# (or command-line argument)
train_step = next(
train_step
for train_step in hparams["train_steps"]
if train_step["name"] == hparams["eval_train_step"]
)
train, valid, test, _ = dataio_prep(hparams, train_step)
datasets = {"train": train, "valid": valid, "test": test}
dataset = datasets[hparams["eval_dataset"]]
dataloader_opts = train_step.get(
"dataloader_opts", hparams.get("dataloader_opts", {})
)
result = evaluator.evaluate_epoch(dataset, dataloader_opts)
hp.report_result(result)
| 14,699 | 34.083532 | 88 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/G2P/train_lm.py | #!/usr/bin/env python3
"""Recipe for training a language model with a phoneme model.
> python train.py RNNLM.yaml
To run this recipe, you need to first install the Huggingface dataset:
> pip install datasets
Authors
* Ju-Chieh Chou 2020
* Jianyuan Zhong 2021
* Mirco Ravanelli 2021
* Artem Ploujnikov 2021
"""
import sys
import logging
import os
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from train import dataio_prep
# The following hyperparameters are used in dataio_prep, shared with the
# main G2P training script:
# - hparams["phoneme_encoder"]
# - hparams["origins"]
# - hparams["phn_token_wordwise"]
# - hparams["graphemes"]
# - hparams["sorting"]
# - hparams["test_data"]
# - hparams["data_load"]
# - hparams["token_space_index"]
# - hparams["valid_data"]
# - hparams["tokenizer_valid_data"]
# - hparams["grapheme_encoder"]
# - hparams["train_data"]
# - hparams["eos_index"]
# - hparams["bos_index"]
# - hparams["tokenizer_file"]
# - hparams["output_neurons"]
# - hparams["tokenizer"]
# - hparams["blank_index"]
logger = logging.getLogger(__name__)
# Brain class for language model training
class LM(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Predicts the next word given the previous ones.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
predictions : torch.Tensor
A tensor containing the posterior probabilities (predictions).
"""
batch = batch.to(self.device)
tokens_bos, _ = batch.phn_encoded_bos
pred = self.hparams.model(tokens_bos)
return self.hparams.log_softmax(pred)
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs.
Arguments
---------
predictions : torch.Tensor
The posterior probabilities from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
batch = batch.to(self.device)
tokens_eos, tokens_len = batch.phn_encoded_eos
loss = self.hparams.compute_cost(
predictions, tokens_eos, length=tokens_len
)
return loss
def fit_batch(self, batch):
"""Runs all the steps needed to train the model on a single batch.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
Returns
-------
Loss : torch.Tensor
A tensor containing the loss (single real number).
"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# Loss backpropagation (gradient computation)
(loss / self.hparams.accu_steps).backward()
# Manage gradient accumulation
if self.step % self.hparams.accu_steps == 0:
# Gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
# Update the parameters
self.optimizer.step()
# Reset the gradient
self.optimizer.zero_grad()
if isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
) or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
):
self.hparams.lr_annealing(self.optimizer)
return loss
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
}
# At the end of validation, we can wrote
if stage == sb.Stage.VALID:
# Update learning rate
old_lr, new_lr = self.hparams.lr_annealing(stage_loss)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints.
self.checkpointer.save_and_keep_only(meta=stats, min_keys=["loss"])
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
# Recipe begins!
if __name__ == "__main__":
# Reading command line arguments
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
from tokenizer_prepare import prepare_tokenizer # noqa
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
if hparams.get("phn_tokenize"):
path = hparams["phoneme_tokenizer_output_folder"]
if not os.path.exists(path):
os.makedirs(path)
run_on_main(
prepare_tokenizer,
kwargs={
"dataset_name": hparams["dataset"],
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"phonemes": hparams["phonemes"],
},
)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Create dataset objects "train", "valid", and "test"
train_data, valid_data, test_data, _ = dataio_prep(hparams)
# Initialize the Brain object to prepare for LM training.
lm_brain = LM(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
lm_brain.fit(
lm_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Load best checkpoint for evaluation
test_stats = lm_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 7,998 | 30.996 | 80 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/G2P/train.py | #!/usr/bin/env/python3
"""Recipe for training a grapheme-to-phoneme system with one of the available datasets.
See README.md for more details
Authors
* Loren Lugosch 2020
* Mirco Ravanelli 2020
* Artem Ploujnikov 2021
"""
from speechbrain.dataio.dataset import (
FilteredSortedDynamicItemDataset,
DynamicItemDataset,
)
from speechbrain.dataio.sampler import BalancingDataSampler
from speechbrain.utils.data_utils import undo_padding
import datasets
import logging
import os
import random
import torch
import speechbrain as sb
import sys
from enum import Enum
from collections import namedtuple
from hyperpyyaml import load_hyperpyyaml
from functools import partial
from speechbrain.utils.distributed import run_on_main
from speechbrain.pretrained.training import save_for_pretrained
from speechbrain.lobes.models.g2p.dataio import (
enable_eos_bos,
grapheme_pipeline,
phoneme_pipeline,
tokenizer_encode_pipeline,
add_bos_eos,
get_sequence_key,
phonemes_to_label,
)
from speechbrain.dataio.wer import print_alignments
from speechbrain.wordemb.util import expand_to_chars
from io import StringIO
from speechbrain.utils import hpopt as hp
import numpy as np
logger = logging.getLogger(__name__)
G2PPredictions = namedtuple(
"G2PPredictions",
"p_seq char_lens hyps ctc_logprobs attn",
defaults=[None] * 4,
)
class TrainMode(Enum):
"""An enumeration that represents the trainining mode
NORMAL: trains the sequence-to-sequence model
HOMOGRAPH: fine-tunes a trained model on homographs"""
NORMAL = "normal"
HOMOGRAPH = "homograph"
# Define training procedure
class G2PBrain(sb.Brain):
def __init__(self, train_step_name, *args, **kwargs):
"""Class constructor
Arguments
---------
train_step_name: the name of the training step, for curriculum learning
"""
super().__init__(*args, **kwargs)
self.train_step_name = train_step_name
self.train_step = next(
step
for step in self.hparams.train_steps
if step["name"] == train_step_name
)
self.epoch_counter = self.train_step["epoch_counter"]
self.mode = TrainMode(train_step.get("mode", TrainMode.NORMAL))
self.last_attn = None
self.lr_annealing = getattr(
self.train_step, "lr_annealing", self.hparams.lr_annealing
)
self.phn_key = get_sequence_key(
key="phn_encoded",
mode=getattr(self.hparams, "phoneme_sequence_mode", "bos"),
)
self.grapheme_key = get_sequence_key(
key="grapheme_encoded",
mode=getattr(self.hparams, "grapheme_sequence_mode", "bos"),
)
self.beam_searcher = (
self.hparams.beam_searcher_lm
if self.hparams.use_language_model
else self.hparams.beam_searcher
)
self.beam_searcher_valid = getattr(
self.hparams, "beam_searcher_valid", self.beam_searcher
)
self.start_epoch = None
def on_fit_start(self):
"""Gets called at the beginning of ``fit()``, on multiple processes
if ``distributed_count > 0`` and backend is ddp.
Default implementation compiles the jit modules, initializes
optimizers, and loads the latest checkpoint to resume training.
"""
# Run this *after* starting all processes since jit modules cannot be
# pickled.
self._compile_jit()
# Wrap modules with parallel backend after jit
self._wrap_distributed()
# Initialize optimizers after parameters are configured
self.init_optimizers()
# Load latest checkpoint to resume training if interrupted
self._recover_checkpoint()
def on_evaluate_start(self, max_key=None, min_key=None):
"""Gets called at the beginning of ``evaluate()``
Default implementation loads the best-performing checkpoint for
evaluation, based on stored metrics.
Arguments
---------
max_key : str
Key to use for finding best checkpoint (higher is better).
By default, passed to ``self.checkpointer.recover_if_possible()``.
min_key : str
Key to use for finding best checkpoint (lower is better).
By default, passed to ``self.checkpointer.recover_if_possible()``.
"""
self._recover_checkpoint(min_key, max_key)
def _recover_checkpoint(self, min_key=None, max_key=None):
"""loads the best-performing checkpoint, based on stored metrics.
Arguments
---------
max_key : str
Key to use for finding best checkpoint (higher is better).
By default, passed to ``self.checkpointer.recover_if_possible()``.
min_key : str
Key to use for finding best checkpoint (lower is better).
By default, passed to ``self.checkpointer.recover_if_possible()``.
"""
if self.checkpointer is not None:
step = self.train_step["name"]
logger.info(f"Attempting to restore checkpoint for step {step}")
result = self.checkpointer.recover_if_possible(
device=torch.device(self.device),
min_key=min_key,
max_key=max_key,
ckpt_predicate=(lambda ckpt: ckpt.meta.get("step") == step),
)
if result is None:
logger.info(
"No checkpoint fount for step %s, "
"attempting to recover any checkpoint",
step,
)
result = self.checkpointer.recover_if_possible(
device=torch.device(self.device),
min_key=min_key,
max_key=max_key,
)
if result:
logger.info(
"Recovered checkpoint with metadata %s", result.meta
)
else:
logger.info("No checkpoint found")
def compute_forward(self, batch, stage):
"""Forward computations from the char batches to the output probabilities."""
batch = batch.to(self.device)
# Get graphemes or phonemes
graphemes, grapheme_lens = getattr(batch, self.grapheme_key)
phn_encoded = getattr(batch, self.phn_key)
word_emb = None
# Use word embeddings (if applicable)
if self.hparams.use_word_emb:
word_emb = self.modules.word_emb.batch_embeddings(batch.char)
char_word_emb = expand_to_chars(
emb=word_emb,
seq=graphemes,
seq_len=grapheme_lens,
word_separator=self.grapheme_word_separator_idx,
)
else:
word_emb, char_word_emb = None, None
# Forward pass through the model
p_seq, char_lens, encoder_out, attn = self.modules["model"](
grapheme_encoded=(graphemes.detach(), grapheme_lens),
phn_encoded=phn_encoded,
word_emb=char_word_emb,
)
self.last_attn = attn
hyps = None
# Apply CTC, if applicable
ctc_logprobs = None
if stage == sb.Stage.TRAIN and self.is_ctc_active(stage):
# Output layer for ctc log-probabilities
ctc_logits = self.modules.ctc_lin(encoder_out)
ctc_logprobs = self.hparams.log_softmax(ctc_logits)
if stage != sb.Stage.TRAIN and self.hparams.enable_metrics:
beam_searcher = (
self.beam_searcher_valid
if stage == sb.Stage.VALID
else self.beam_searcher
)
hyps, scores = beam_searcher(encoder_out, char_lens)
return G2PPredictions(p_seq, char_lens, hyps, ctc_logprobs, attn)
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets.
Arguments
---------
predictions: G2PPredictions
the predictions (as computed by compute_forward)
batch: PaddedBatch
a raw G2P data batch
stage: speechbrain.Stage
the training stage
"""
phns_eos, phn_lens_eos = batch.phn_encoded_eos
phns, phn_lens = batch.phn_encoded
loss_seq = self.hparams.seq_cost(
predictions.p_seq, phns_eos, phn_lens_eos
)
if self.is_ctc_active(stage):
seq_weight = 1 - self.hparams.ctc_weight
loss_ctc = self.hparams.ctc_cost(
predictions.ctc_logprobs,
phns_eos,
predictions.char_lens,
phn_lens_eos,
)
loss = seq_weight * loss_seq + self.hparams.ctc_weight * loss_ctc
else:
loss = loss_seq
if self.mode == TrainMode.HOMOGRAPH:
# When tokenization is used, the length of the words
# in the original phoneme space is not equal to the tokenized
# words but the raw data only supplies non-tokenized information
phns_base, phn_lens_base = (
batch.phn_raw_encoded
if self.hparams.phn_tokenize
else (None, None)
)
homograph_loss = (
self.hparams.homograph_loss_weight
* self.hparams.homograph_cost(
phns=phns,
phn_lens=phn_lens,
p_seq=predictions.p_seq,
subsequence_phn_start=batch.homograph_phn_start,
subsequence_phn_end=batch.homograph_phn_end,
phns_base=phns_base,
phn_lens_base=phn_lens_base,
)
)
loss += homograph_loss
# Record losses for posterity
if self.hparams.enable_metrics and stage != sb.Stage.TRAIN:
self._add_sequence_metrics(predictions, batch)
if self.mode == TrainMode.HOMOGRAPH:
self._add_homograph_metrics(predictions, batch)
return loss
def _add_sequence_metrics(self, predictions, batch):
"""Extracts the homograph from the sequence, computes metrics for it
and registers them
Arguments
---------
predictions: G2PPredictions
the predictions (as computed by compute_forward)
batch: PaddedBatch
a raw G2P data batch
"""
phns_eos, _ = batch.phn_encoded_eos
phns, phn_lens = batch.phn_encoded
self.seq_metrics.append(
batch.sample_id, predictions.p_seq, phns_eos, phn_lens
)
self.per_metrics.append(
batch.sample_id,
predictions.hyps,
phns,
None,
phn_lens,
self.hparams.out_phoneme_decoder,
)
def _add_homograph_metrics(self, predictions, batch):
"""Extracts the homograph from the sequence, computes metrics for it
and registers them
Arguments
---------
predictions: G2PPredictions
the predictions (as computed by compute_forward)
batch: PaddedBatch
a raw G2P data batch
"""
phns, phn_lens = batch.phn_encoded
phns_base, phn_base_lens = (
batch.phn_raw_encoded if self.hparams.phn_tokenize else (None, None)
)
(
p_seq_homograph,
phns_homograph,
phn_lens_homograph,
) = self.hparams.homograph_extractor(
phns,
phn_lens,
predictions.p_seq,
subsequence_phn_start=batch.homograph_phn_start,
subsequence_phn_end=batch.homograph_phn_end,
phns_base=phns_base,
phn_base_lens=phn_base_lens,
)
hyps_homograph = self.hparams.homograph_extractor.extract_hyps(
phns_base if phns_base is not None else phns,
predictions.hyps,
batch.homograph_phn_start,
use_base=True,
)
self.seq_metrics_homograph.append(
batch.sample_id, p_seq_homograph, phns_homograph, phn_lens_homograph
)
self.per_metrics_homograph.append(
batch.sample_id,
hyps_homograph,
phns_homograph,
None,
phn_lens_homograph,
self.hparams.out_phoneme_decoder,
)
prediction_labels = phonemes_to_label(
phns=hyps_homograph, decoder=self.hparams.out_phoneme_decoder
)
phns_homograph_list = undo_padding(phns_homograph, phn_lens_homograph)
target_labels = phonemes_to_label(
phns_homograph_list, decoder=self.hparams.out_phoneme_decoder
)
self.classification_metrics_homograph.append(
batch.sample_id,
predictions=prediction_labels,
targets=target_labels,
categories=batch.homograph_wordid,
)
def is_ctc_active(self, stage):
"""Determines whether or not the CTC loss should be enabled.
It is enabled only if a ctc_lin module has been defined in
the hyperparameter file, only during training and only for
the number of epochs determined by the ctc_epochs hyperparameter
of the corresponding training step.
Arguments
---------
stage: speechbrain.Stage
the training stage
"""
if stage != sb.Stage.TRAIN:
return False
current_epoch = self.epoch_counter.current
return current_epoch <= self.train_step["ctc_epochs"]
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.seq_metrics = self.hparams.seq_stats()
if self.start_epoch is None:
self.start_epoch = epoch
if self.hparams.enable_metrics:
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
if self.mode == TrainMode.HOMOGRAPH:
self.seq_metrics_homograph = self.hparams.seq_stats_homograph()
self.classification_metrics_homograph = (
self.hparams.classification_stats_homograph()
)
if stage != sb.Stage.TRAIN:
self.per_metrics_homograph = (
self.hparams.per_stats_homograph()
)
if self.mode == TrainMode.HOMOGRAPH:
self._set_word_separator()
self.grapheme_word_separator_idx = self.hparams.grapheme_encoder.lab2ind[
" "
]
if self.hparams.use_word_emb:
self.modules.word_emb = self.hparams.word_emb().to(self.device)
def _set_word_separator(self):
"""Determines the word separators to be used"""
if self.hparams.phn_tokenize:
word_separator_idx = self.hparams.token_space_index
word_separator_base_idx = self.phoneme_encoder.lab2ind[" "]
else:
word_separator_base_idx = (
word_separator_idx
) = self.phoneme_encoder.lab2ind[" "]
self.hparams.homograph_cost.word_separator = word_separator_idx
self.hparams.homograph_cost.word_separator_base = (
word_separator_base_idx
)
self.hparams.homograph_extractor.word_separator = word_separator_idx
self.hparams.homograph_extractor.word_separator_base = (
word_separator_base_idx
)
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
elif self.hparams.enable_metrics:
per = self.per_metrics.summarize("error_rate")
ckpt_predicate, ckpt_meta, min_keys = None, {}, None
if stage == sb.Stage.VALID:
if (
isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.NewBobScheduler,
)
and self.hparams.enable_metrics
):
old_lr, new_lr = self.hparams.lr_annealing(per)
elif isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.ReduceLROnPlateau,
):
old_lr, new_lr = self.hparams.lr_annealing(
optim_list=[self.optimizer],
current_epoch=epoch,
current_loss=self.train_loss,
)
else:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
stats = {
"stats_meta": {"epoch": epoch, "lr": old_lr},
"train_stats": {"loss": self.train_loss},
"valid_stats": {"loss": stage_loss},
}
if self.hparams.enable_metrics:
stats["valid_stats"].update(
{
"seq_loss": self.seq_metrics.summarize("average"),
"PER": per,
}
)
if self.mode == TrainMode.HOMOGRAPH:
per_homograph = self.per_metrics_homograph.summarize(
"error_rate"
)
stats["valid_stats"].update(
{
"seq_loss_homograph": self.seq_metrics_homograph.summarize(
"average"
),
"PER_homograph": per_homograph,
}
)
ckpt_meta = {"PER_homograph": per_homograph, "PER": per}
min_keys = ["PER_homograph"]
ckpt_predicate = self._has_homograph_per
else:
ckpt_meta = {"PER": per}
min_keys = ["PER"]
hp.report_result(stats["valid_stats"])
stats = self._add_stats_prefix(stats)
self.hparams.train_logger.log_stats(**stats)
if self.hparams.use_tensorboard:
self.hparams.tensorboard_train_logger.log_stats(**stats)
self.save_samples()
if (
self.hparams.ckpt_enable
and epoch % self.hparams.ckpt_frequency == 0
):
ckpt_meta["step"] = self.train_step["name"]
self.checkpointer.save_and_keep_only(
meta=ckpt_meta,
min_keys=min_keys,
ckpt_predicate=ckpt_predicate,
)
if self.hparams.enable_interim_reports:
if self.hparams.enable_metrics:
self._write_reports(epoch, final=False)
if self.epoch_counter.should_stop(
current=epoch, current_metric=per,
):
self.epoch_counter.current = self.epoch_counter.limit
if stage == sb.Stage.TEST:
test_stats = {"loss": stage_loss}
if self.hparams.enable_metrics:
test_stats["PER"] = per
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.epoch_counter.current},
test_stats=test_stats,
)
if self.hparams.enable_metrics:
self._write_reports(epoch)
def _has_homograph_per(self, ckpt):
"""Determines if the provided checkpoint has a homograph PER. Used
when selecting the best epochs for the homograph loss.
Arguments
---------
ckpt: speechbrain.utils.checkpoints.Checkpoint
a checkpoint
Returns
-------
result: bool
whether it contains a homograph PER"""
return "PER_homograph" in ckpt.meta
def _get_interim_report_path(self, epoch, file_path):
"""Determines the path to the interim, per-epoch report
Arguments
---------
epoch: int
the epoch number
file_path: str
the raw report path
"""
output_path = os.path.join(
self.hparams.output_folder,
"reports",
self.train_step["name"],
str(epoch),
)
if not os.path.exists(output_path):
os.makedirs(output_path)
return os.path.join(output_path, os.path.basename(file_path))
def _get_report_path(self, epoch, key, final):
"""Determines the path in which to save a given report
Arguments
---------
epoch: int
the epoch number
key: str
the key within the training step definition in the
hyperparameter file (e.g. "wer_file")
final: bool
whether or not this si the final report. If
final is false, an epoch number will be inserted into the path
Arguments
---------
file_name: str
the report file name
"""
file_name = self.train_step[key]
if not final:
file_name = self._get_interim_report_path(epoch, file_name)
return file_name
def _write_reports(self, epoch, final=True):
"""Outputs all reports for a given epoch
Arguments
---------
epoch: int
the epoch number
final: bool
whether or not the reports are final (i.e.
after the final epoch)
Returns
-------
file_name: str
the report file name
"""
wer_file_name = self._get_report_path(epoch, "wer_file", final)
self._write_wer_file(wer_file_name)
if self.mode == TrainMode.HOMOGRAPH:
homograph_stats_file_name = self._get_report_path(
epoch, "homograph_stats_file", final
)
self._write_homograph_file(homograph_stats_file_name)
def _write_wer_file(self, file_name):
"""Outputs the Word Error Rate (WER) file
Arguments
---------
file_name: str
the report file name
"""
with open(file_name, "w") as w:
w.write("\nseq2seq loss stats:\n")
self.seq_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
logger.info("seq2seq, and PER stats written to file: %s", file_name)
def _write_homograph_file(self, file_name):
"""Outputs the detailed homograph report, detailing the accuracy
percentage for each homograph, as well as the relative frequencies
of particular output sequences output by the model
Arguments
---------
file_name: str
the report file name
"""
with open(file_name, "w") as w:
self.classification_metrics_homograph.write_stats(w)
def _add_stats_prefix(self, stats):
"""
Adds a training step prefix to every key in the provided statistics
dictionary
Arguments
---------
stats: dict
a statistics dictionary
Returns
---------
stats: dict
a prefixed statistics dictionary
"""
prefix = self.train_step["name"]
return {
stage: {
f"{prefix}_{key}": value for key, value in stage_stats.items()
}
for stage, stage_stats in stats.items()
}
@property
def tb_writer(self):
"""Returns the raw TensorBoard logger writer"""
return self.hparams.tensorboard_train_logger.writer
@property
def tb_global_step(self):
"""Returns the global step number in the Tensorboard writer"""
global_step = self.hparams.tensorboard_train_logger.global_step
prefix = self.train_step["name"]
return global_step["valid"][f"{prefix}_loss"]
def save_samples(self):
"""Saves attention alignment and text samples to the Tensorboard
writer"""
self._save_attention_alignment()
self._save_text_alignments()
def _save_text_alignments(self):
"""Saves text predictions aligned with lables (a sample, for progress
tracking)"""
if not self.hparams.enable_metrics:
return
last_batch_sample = self.per_metrics.scores[
-self.hparams.eval_prediction_sample_size :
]
metrics_by_wer = sorted(
self.per_metrics.scores, key=lambda item: item["WER"], reverse=True
)
worst_sample = metrics_by_wer[
: self.hparams.eval_prediction_sample_size
]
sample_size = min(
self.hparams.eval_prediction_sample_size,
len(self.per_metrics.scores),
)
random_sample = np.random.choice(
self.per_metrics.scores, sample_size, replace=False
)
text_alignment_samples = {
"last_batch": last_batch_sample,
"worst": worst_sample,
"random": random_sample,
}
prefix = self.train_step["name"]
for key, sample in text_alignment_samples.items():
self._save_text_alignment(
tag=f"valid/{prefix}_{key}", metrics_sample=sample
)
def _save_attention_alignment(self):
"""Saves attention alignments"""
attention = self.last_attn[0]
if attention.dim() > 2:
attention = attention[0]
alignments_max = (
attention.max(dim=-1)
.values.max(dim=-1)
.values.unsqueeze(-1)
.unsqueeze(-1)
)
alignments_output = (
attention.T.flip(dims=(1,)) / alignments_max
).unsqueeze(0)
prefix = self.train_step["name"]
self.tb_writer.add_image(
f"valid/{prefix}_attention_alignments",
alignments_output,
self.tb_global_step,
)
def _save_text_alignment(self, tag, metrics_sample):
"""Saves a single text sample
Arguments
---------
tag: str
the tag - for Tensorboard
metrics_sample: list
List of wer details by utterance,
see ``speechbrain.utils.edit_distance.wer_details_by_utterance``
for format. Has to have alignments included.
"""
with StringIO() as text_alignments_io:
print_alignments(
metrics_sample,
file=text_alignments_io,
print_header=False,
sample_separator="\n --- \n",
)
text_alignments_io.seek(0)
alignments_sample = text_alignments_io.read()
alignments_sample_md = f"```\n{alignments_sample}\n```"
self.tb_writer.add_text(tag, alignments_sample_md, self.tb_global_step)
def sort_data(data, hparams, train_step):
"""Sorts the dataset according to hyperparameter values
Arguments
---------
data: speechbrain.dataio.dataset.DynamicItemDataset
the dataset to be sorted
hparams: dict
raw hyperparameters
train_step: dict
the hyperparameters of the training step
Returns
-------
data: speechbrain.dataio.dataset.DynamicItemDataset
sorted data
"""
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
data = data.filtered_sorted(sort_key="duration")
elif hparams["sorting"] == "descending":
data = data.filtered_sorted(sort_key="duration", reverse=True)
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
sample = train_step.get("sample")
if sample:
sample_ids = list(data.data_ids)
if train_step.get("sample_random"):
random.shuffle(sample_ids)
sample_ids = sample_ids[:sample]
data = FilteredSortedDynamicItemDataset(data, sample_ids)
return data
def filter_origins(data, hparams):
"""Filters a dataset using a specified list of origins,
as indicated by the "origin" key in the hyperparameters
provided
Arguments
---------
data: speechbrain.dataio.dataset.DynamicItemDataset
the data to be filtered
hparams: dict
the hyperparameters data
Results
-------
data: speechbrain.dataio.dataset.DynamicItemDataset
the filtered data
"""
origins = hparams.get("origins")
if origins and origins != "*":
origins = set(origins.split(","))
data = data.filtered_sorted(
key_test={"origin": lambda origin: origin in origins}
)
return data
def filter_homograph_positions(dataset):
"""Removes any defective homograph samples
Arguments
---------
data: speechbrain.dataio.dataset.DynamicItemDataset
the data to be filtered
Results
-------
data: speechbrain.dataio.dataset.DynamicItemDataset
the filtered data
"""
return dataset.filtered_sorted(
key_test={
"homograph_char_end": lambda value: value > 0,
"homograph_phn_end": lambda value: value > 0,
}
)
def validate_hparams(hparams):
result = True
supports_homograph = not (
(
hparams.get("char_tokenize")
and not hparams.get("char_token_wordwise")
)
or (
hparams.get("phn_tokenize")
and not hparams.get("phn_token_wordwise")
)
)
if not supports_homograph and hparams.get("homograph_epochs", 0) > 0:
logger.error(
"ERROR: Non-wordwise tokenization is not supported with "
"homograph disambiguation training"
)
result = False
return result
DATASET_SPLITS = ["train", "valid", "test"]
def load_datasets(hparams, train_step):
"""Flexibly loads the specified dataset. If a custom loader is
provided, it will be used. Otherwise, it will default to the
arrow dataset loader"""
data_folder = hparams["data_folder"]
dataset = datasets.load_dataset(
hparams["dataset"], cache_dir=hparams["data_folder"]
)
train_step_name = train_step.get("name", "sentence")
results = [
DynamicItemDataset.from_arrow_dataset(
dataset[f"{train_step_name}_{key}"],
replacements={"data_root": data_folder},
)
for key in DATASET_SPLITS
]
return results
# TODO: Split this up into smaller functions
def dataio_prep(hparams, train_step=None):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
Arguments
---------
hparams: dict
the hyperparameters dictionary
train_step: dict
the hyperparameters for the training step being executed
Returns
-------
train_data: speechbrain.dataio.dataset.DynamicItemDataset
the training dataset
valid_data: speechbrain.dataio.dataset.DynamicItemDataset
the validation dataset
test_data: speechbrain.dataio.dataset.DynamicItemDataset
the test dataset
phoneme_encoder: speechbrain.dataio.encoder.TextEncoder
the phoneme encoder
"""
if not train_step:
train_step = hparams
# 1. Load the datasets:
train_data, valid_data, test_data = load_datasets(hparams, train_step)
if hparams["sorting"] == "ascending":
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
is_homograph = (
TrainMode(train_step.get("mode", TrainMode.NORMAL))
== TrainMode.HOMOGRAPH
)
train_data = sort_data(train_data, hparams, train_step)
valid_data = sort_data(valid_data, hparams, train_step)
test_data = sort_data(test_data, hparams, train_step)
datasets = [train_data, valid_data, test_data]
phoneme_encoder = hparams["phoneme_encoder"]
grapheme_encoder = hparams["grapheme_encoder"]
# 2. Define grapheme and phoneme pipelines:
enable_eos_bos(
tokens=hparams["phonemes"],
encoder=phoneme_encoder,
bos_index=hparams["bos_index"],
eos_index=hparams["eos_index"],
)
enable_eos_bos(
tokens=hparams["graphemes"],
encoder=grapheme_encoder,
bos_index=hparams["bos_index"],
eos_index=hparams["eos_index"],
)
if hparams.get("char_tokenize"):
grapheme_pipeline_item = partial(
tokenizer_encode_pipeline,
tokenizer=hparams["grapheme_tokenizer"],
tokens=hparams["graphemes"],
wordwise=hparams["char_token_wordwise"],
token_space_index=hparams["token_space_index"],
)
else:
grapheme_pipeline_item = partial(
grapheme_pipeline, grapheme_encoder=grapheme_encoder
)
if hparams.get("phn_tokenize"):
phoneme_pipeline_item = partial(
tokenizer_encode_pipeline,
tokenizer=hparams["phoneme_tokenizer"],
tokens=hparams["phonemes"],
char_map=hparams["phn_char_map"],
wordwise=hparams["phn_token_wordwise"],
token_space_index=hparams["token_space_index"],
)
# Ensure the tokenizers are trained
if "grapheme_tokenizer" in hparams:
hparams["grapheme_tokenizer"]()
if "phoneme_tokenizer" in hparams:
hparams["phoneme_tokenizer"]()
enable_eos_bos(
tokens=hparams["phonemes"],
encoder=phoneme_encoder,
bos_index=hparams["bos_index"],
eos_index=hparams["eos_index"],
)
else:
phoneme_pipeline_item = partial(
phoneme_pipeline, phoneme_encoder=phoneme_encoder,
)
phn_bos_eos_pipeline_item = partial(add_bos_eos, encoder=phoneme_encoder)
grapheme_bos_eos_pipeline_item = partial(
add_bos_eos,
# TODO: Use the grapheme encoder here (this will break some models)
encoder=phoneme_encoder,
)
dynamic_items = [
{
"func": grapheme_pipeline_item,
"takes": ["char"],
"provides": [
"grapheme_list",
"grpaheme_encoded_list",
"grapheme_encoded",
],
},
{
"func": phoneme_pipeline_item,
"takes": ["phn"],
"provides": ["phn_list", "phn_encoded_list", "phn_encoded"],
},
{
"func": phn_bos_eos_pipeline_item,
"takes": ["phn_encoded"],
"provides": [
"phn_encoded_bos",
"phn_len_bos",
"phn_encoded_eos",
"phn_len_eos",
],
},
{
"func": grapheme_bos_eos_pipeline_item,
"takes": ["grapheme_encoded"],
"provides": [
"grapheme_encoded_bos",
"grapheme_len_bos",
"grapheme_encoded_eos",
"grapheme_len_eos",
],
},
]
if hparams.get("phn_tokenize"):
# A raw tokenizer is needed to determine the correct
# word boundaries from data
phoneme_raw_pipeline = partial(
phoneme_pipeline, phoneme_encoder=phoneme_encoder,
)
dynamic_items.append(
{
"func": phoneme_raw_pipeline,
"takes": ["phn"],
"provides": [
"phn_raw_list",
"phn_raw_encoded_list",
"phn_raw_encoded",
],
}
)
for dynamic_item in dynamic_items:
sb.dataio.dataset.add_dynamic_item(datasets, **dynamic_item)
# 3. Set output:
output_keys = [
"sample_id",
"grapheme_encoded",
"grapheme_encoded_bos",
"grapheme_encoded_eos",
"phn_encoded",
"phn_encoded_eos",
"phn_encoded_bos",
]
if is_homograph:
output_keys += [
"homograph_wordid",
"homograph_phn_start",
"homograph_phn_end",
]
if hparams.get("use_word_emb", False):
output_keys.append("char")
if (
hparams.get("phn_tokenize", False)
and "phn_raw_encoded" not in output_keys
):
output_keys.append("phn_raw_encoded")
sb.dataio.dataset.set_output_keys(
datasets, output_keys,
)
if "origins" in hparams:
datasets = [filter_origins(dataset, hparams) for dataset in datasets]
if train_step.get("mode") == "homograph":
datasets = [filter_homograph_positions(dataset) for dataset in datasets]
train_data, valid_data, test_data = datasets
return train_data, valid_data, test_data, phoneme_encoder
def load_dependencies(hparams, run_opts):
"""Loads any pre-trained dependencies (e.g. language models)
Arguments
---------
hparams: dict
the hyperparamters dictionary
run_opts: dict
run options
"""
deps_pretrainer = hparams.get("deps_pretrainer")
if deps_pretrainer:
run_on_main(deps_pretrainer.collect_files)
deps_pretrainer.load_collected(device=run_opts["device"])
def check_tensorboard(hparams):
"""Checks whether Tensorboard is enabled and initializes the logger if it is
Arguments
---------
hparams: dict
the hyperparameter dictionary
"""
if hparams["use_tensorboard"]:
try:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
except ImportError:
logger.warning(
"Could not enable TensorBoard logging - TensorBoard is not available"
)
hparams["use_tensorboard"] = False
if __name__ == "__main__":
# CLI:
with hp.hyperparameter_optimization(objective_key="PER") as hp_ctx:
# Set a default PER
hp.report_result({"PER": 0.0})
hparams_file, run_opts, overrides = hp_ctx.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Validate hyperparameters
if not validate_hparams(hparams):
sys.exit(1)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
if hparams.get("use_language_model"):
load_dependencies(hparams, run_opts)
check_tensorboard(hparams)
from tokenizer_prepare import prepare_tokenizer # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams.get("char_tokenize") or hparams.get("phn_tokenize"):
path_keys = [
"grapheme_tokenizer_output_folder",
"phoneme_tokenizer_output_folder",
]
paths = [hparams[key] for key in path_keys]
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
run_on_main(
prepare_tokenizer,
kwargs={
"dataset_name": hparams.get("dataset"),
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"phonemes": hparams["phonemes"],
},
)
for train_step in hparams["train_steps"]:
epochs = train_step["epoch_counter"].limit
if epochs < 1:
logger.info("Skipping training step: %s", train_step["name"])
continue
logger.info("Running training step: %s", train_step["name"])
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, phoneme_encoder = dataio_prep(
hparams, train_step
)
# Trainer initialization
g2p_brain = G2PBrain(
train_step_name=train_step["name"],
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
g2p_brain.phoneme_encoder = phoneme_encoder
# NOTE: This gets modified after the first run and causes a double
# agument issue
dataloader_opts = train_step.get(
"dataloader_opts", hparams.get("dataloader_opts", {})
)
if (
"ckpt_prefix" in dataloader_opts
and dataloader_opts["ckpt_prefix"] is None
):
del dataloader_opts["ckpt_prefix"]
train_dataloader_opts = dataloader_opts
if train_step.get("balance"):
sampler = BalancingDataSampler(
train_data, train_step["balance_on"]
)
train_dataloader_opts = dict(dataloader_opts, sampler=sampler)
start_epoch = train_step["epoch_counter"].current
# Training/validation loop
g2p_brain.fit(
train_step["epoch_counter"],
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=dataloader_opts,
)
# Test
skip_test = hparams.get("skip_test", False)
if isinstance(skip_test, str):
skip_test = train_step["name"] in skip_test.split(",")
if not skip_test:
g2p_brain.evaluate(
test_data,
min_key=train_step.get("performance_key"),
test_loader_kwargs=dataloader_opts,
)
if hparams.get("save_for_pretrained"):
save_for_pretrained(
hparams, min_key=train_step.get("performance_key")
)
| 43,545 | 32.887938 | 87 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/self-supervised-learning/wav2vec2/train_sb_wav2vec2.py | #!/usr/bin/env python3
"""Recipe for pretraining wav2vec2 (https://arxiv.org/abs/2006.11477).
See config file for model definition.
See the readme of the recipe for advices on the pretraining that may appear
a bit challenging depending on your available resources.
To run this recipe call python train.py hparams/train_wav2vec.yaml --find_unused_parameters --max_grad_norm 0.0
Authors
* Rudolf Braun 2022
* Guillermo Cámbara 2022
* Titouan Parcollet 2022
"""
import logging
import sys
import time
from functools import partial
import speechbrain as sb
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from hyperpyyaml import load_hyperpyyaml
from speechbrain import Stage
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.dataloader import SaveableDataLoader
from speechbrain.dataio.sampler import DynamicBatchSampler
from speechbrain.lobes.models.wav2vec import w2v_mask_collate_fn
from speechbrain.lobes.models.wav2vec import sample_negatives
logger = logging.getLogger(__name__)
class W2V2Brain(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Computes forward pass through wav2vec model and returns encoded and
target embeddings as well as other metrics of interest.
"""
wavs, wav_lens, mask = batch
wavs, wav_lens, mask = (
wavs.to(self.device),
wav_lens.to(self.device),
mask.to(self.device),
)
batch_size = wavs.size(0)
# Mormalisation already done in dataloader
# 1. Go through features extractor
latents = self.modules.latent_extractor(wavs, normalize_signal=False)
# 2. Go through latent (Transformer).
results = self.modules.latent_encoder(
latents, mask=mask, wav_lens=wav_lens,
)
embeddings = results["embeddings"]
# 3. Mask some of the latent and projection
embeddings = embeddings[mask]
embeddings = self.modules.feat_proj(embeddings)
results["embeddings"] = embeddings.view(
batch_size, -1, embeddings.size(1)
)
latents = latents[mask].view(batch_size, -1, latents.size(2))
# 4. Apply the quantiser as well
targets, meta = self.modules.target_quantiser(latents)
results.update(meta)
results["targets"] = targets
return results
def compute_objectives(self, forward_outputs, batch, stage):
"""Samples negatives, computes contrastive loss and accuracy.
"""
embeddings = forward_outputs["embeddings"]
targets = forward_outputs["targets"]
negs = sample_negatives(targets, self.hparams.num_negatives)
loss, accuracy = self.hparams.loss(embeddings, targets, negs)
# This is only used for logging purpose
if stage != sb.Stage.TRAIN and sb.utils.distributed.if_main_process():
self.acc_metric.append(accuracy)
objectives = {
"loss": loss,
"accuracy": accuracy,
"num_masked": forward_outputs["num_masked"],
"ratio_masked": forward_outputs["ratio_masked"],
}
if (
"diversity_loss" in forward_outputs
): # only quantised model has these
objectives.update(
{
"diversity_loss": forward_outputs["diversity_loss"],
"prob_perplex": forward_outputs["prob_perplex"],
"code_perplex": forward_outputs["code_perplex"],
"num_vars": forward_outputs["num_vars"],
"temp": forward_outputs["temp"],
}
)
# Compute the loss given the original equation from the paper
loss = objectives["loss"]
if self.hparams.diversity_loss_weight == 0.0:
objectives["backprop_loss"] = loss
else:
objectives["backprop_loss"] = (
loss
+ objectives["diversity_loss"]
* self.hparams.diversity_loss_weight
* objectives["num_masked"]
)
return objectives
def fit_batch(self, batch):
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
if self.auto_mix_prec:
with self.no_sync(not should_step):
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, Stage.TRAIN)
objectives = self.compute_objectives(
outputs, batch, Stage.TRAIN
)
self.scaler.scale(
objectives["backprop_loss"] / self.grad_accumulation_factor
).backward()
objectives["total_loss"] = objectives["backprop_loss"].detach()
if should_step:
self.scaler.unscale_(self.optimizer)
if self.check_gradients(objectives["backprop_loss"]):
self.scaler.step(self.optimizer)
self.optimizer.zero_grad()
self.optimizer_step += 1
self.scaler.update()
else:
with self.no_sync(not should_step):
outputs = self.compute_forward(batch, Stage.TRAIN)
objectives = self.compute_objectives(
outputs, batch, Stage.TRAIN
)
(
objectives["backprop_loss"] / self.grad_accumulation_factor
).backward()
objectives["total_loss"] = objectives["backprop_loss"].detach()
if should_step:
if self.check_gradients(objectives["backprop_loss"]):
self.optimizer.step()
self.optimizer.zero_grad()
self.optimizer_step += 1
if should_step:
self.on_fit_batch_end(objectives)
return objectives["backprop_loss"].detach()
def on_fit_batch_end(self, objectives):
""" Called after fit_batch(), updates learning rate and does per-step logging. """
if isinstance(self.modules.target_quantiser, DistributedDataParallel):
w2v_model = self.modules.target_quantiser.module
else:
w2v_model = self.modules.target_quantiser
w2v_model.quantiser.update_temp(self.optimizer_step)
self.hparams.lr_scheduler(self.optimizer, self.optimizer_step)
# Perform step-wise logging
if (
hasattr(self.hparams, "log_interval")
and self.optimizer_step % self.hparams.log_interval == 0
):
# Create a dictionary and fill it with everything we
# want to log such as contrastive loss, diversity loss,
# learning rate etc.
log_dct = {
k: (v.item() if isinstance(v, torch.Tensor) else v)
for k, v in objectives.items()
}
current_lr = self.optimizer.param_groups[0]["lr"]
log_dct["steps"] = self.optimizer_step
log_dct["lr"] = current_lr
log_dct["avg_loss"] = self.avg_train_loss
if hasattr(self, "time_last_log"):
run_time_since_last_log = time.time() - self.time_last_log
log_dct["run_time"] = run_time_since_last_log
self.time_last_log = time.time()
if sb.utils.distributed.if_main_process():
self.hparams.train_steps_logger.log_stats(stats_meta=log_dct,)
def evaluate_batch(self, batch, stage):
""" Returns accuracy on contrastive objective. """
out = self.compute_forward(batch, stage=stage)
objectives = self.compute_objectives(out, batch, stage=stage)
return objectives["backprop_loss"].detach().cpu()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = []
def on_stage_end(self, stage, stage_loss, epoch=None):
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
if stage == sb.Stage.VALID:
print(self.acc_metric)
stage_stats["accuracy"] = sum(self.acc_metric) / len(
self.acc_metric
)
self.hparams.train_stage_logger.log_stats(
stats_meta={
"epoch": epoch,
"steps": self.optimizer_step,
"lr": self.optimizer.param_groups[0]["lr"],
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
end_of_epoch=True,
num_to_keep=5,
meta={"valid_loss": stage_loss},
)
def dataio_prepare(hparams):
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
# We remove longer and shorter files from the train.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_shorter_than"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
def get_output_lengths(input_lengths):
""" Function to get the output length of the feature extractor this is
necessery to compute the masks of wav2vec2.
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
for kernel_size, stride in zip(
hparams["latentextractor_kernels"],
hparams["latentextractor_strides"],
):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths.to(torch.long)
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
assert sig.dim() == 1, sig.dim()
# Audio normalization
with torch.no_grad():
sig = F.layer_norm(sig, sig.shape)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"])
# We create the DynamicBatch Sampler
train_sampler = DynamicBatchSampler(
train_data,
hparams["seconds_per_batch"],
num_buckets=hparams["train_num_buckets"],
length_func=lambda x: x["duration"],
batch_ordering="random",
shuffle=True,
)
# We define the custom collation function that is necessary for w2v2 to
# generate masks.
w2v_mask_collate_fn_partial = partial(
w2v_mask_collate_fn,
get_out_len_fn=get_output_lengths,
mask_prob=hparams["mask_prob"],
mask_length=hparams["mask_length"],
)
train_loader_kwargs = {
"batch_sampler": train_sampler,
"collate_fn": w2v_mask_collate_fn_partial,
"num_workers": hparams["train_dataloader_options"]["num_workers"],
"pin_memory": True,
}
valid_loader = SaveableDataLoader(
valid_data,
collate_fn=w2v_mask_collate_fn_partial,
num_workers=hparams["test_dataloader_options"]["num_workers"],
batch_size=hparams["test_dataloader_options"]["batch_size"],
pin_memory=True,
)
return train_data, valid_loader, train_loader_kwargs
def main():
logger.setLevel(logging.INFO)
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
hparams.update(run_opts)
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
from librispeech_prepare import prepare_librispeech
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# Part that matters starts here.
train_dataset, valid_loader, train_loader_kwargs = dataio_prepare(hparams)
brain = W2V2Brain(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
brain.fit(
brain.hparams.epoch_counter,
train_dataset,
valid_loader,
train_loader_kwargs=train_loader_kwargs,
progressbar=False,
)
if __name__ == "__main__":
main()
| 13,411 | 33.836364 | 111 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/ASR/transducer/train.py | #!/usr/bin/env/python3
"""Recipe for training a Transducer ASR system with librispeech.
The system employs an encoder, a decoder, and an joint network
between them. Decoding is performed with beamsearch coupled with a neural
language model.
To run this recipe, do the following:
> python train.py hparams/train.yaml
With the default hyperparameters, the system employs a CRDNN encoder.
The decoder is based on a standard GRU. Beamsearch coupled with a RNN
language model is used on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
Authors
* Abdel Heba 2020
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
* Peter Plantinga 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_with_bos, token_with_bos_lens = batch.tokens_bos
# wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
batch.sig = wavs, wav_lens
tokens_with_bos = torch.cat(
[tokens_with_bos, tokens_with_bos], dim=0
)
token_with_bos_lens = torch.cat(
[token_with_bos_lens, token_with_bos_lens]
)
batch.tokens_bos = tokens_with_bos, token_with_bos_lens
if hasattr(self.modules, "augmentation"):
wavs = self.modules.augmentation(wavs, wav_lens)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_with_bos)
h, _ = self.modules.dec(e_in)
# Joint network
# add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc]
# add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec]
joint = self.modules.Tjoint(x.unsqueeze(2), h.unsqueeze(1))
# Output layer for transducer log-probabilities
logits_transducer = self.modules.transducer_lin(joint)
# Compute outputs
if stage == sb.Stage.TRAIN:
return_CTC = False
return_CE = False
current_epoch = self.hparams.epoch_counter.current
if (
hasattr(self.hparams, "ctc_cost")
and current_epoch <= self.hparams.number_of_ctc_epochs
):
return_CTC = True
# Output layer for ctc log-probabilities
out_ctc = self.modules.enc_lin(x)
p_ctc = self.hparams.log_softmax(out_ctc)
if (
hasattr(self.hparams, "ce_cost")
and current_epoch <= self.hparams.number_of_ce_epochs
):
return_CE = True
# Output layer for ctc log-probabilities
p_ce = self.modules.dec_lin(h)
p_ce = self.hparams.log_softmax(p_ce)
if return_CE and return_CTC:
return p_ctc, p_ce, logits_transducer, wav_lens
elif return_CTC:
return p_ctc, logits_transducer, wav_lens
elif return_CE:
return p_ce, logits_transducer, wav_lens
else:
return logits_transducer, wav_lens
elif stage == sb.Stage.VALID:
best_hyps, scores, _, _ = self.hparams.Greedysearcher(x)
return logits_transducer, wav_lens, best_hyps
else:
(
best_hyps,
best_scores,
nbest_hyps,
nbest_scores,
) = self.hparams.Beamsearcher(x)
return logits_transducer, wav_lens, best_hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (Transducer+(CTC+NLL)) given predictions and targets."""
ids = batch.id
current_epoch = self.hparams.epoch_counter.current
tokens, token_lens = batch.tokens
tokens_eos, token_eos_lens = batch.tokens_eos
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
token_eos_lens = torch.cat([token_eos_lens, token_eos_lens], dim=0)
tokens = torch.cat([tokens, tokens], dim=0)
token_lens = torch.cat([token_lens, token_lens], dim=0)
if stage == sb.Stage.TRAIN:
if len(predictions) == 4:
p_ctc, p_ce, logits_transducer, wav_lens = predictions
CTC_loss = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, token_lens
)
CE_loss = self.hparams.ce_cost(
p_ce, tokens_eos, length=token_eos_lens
)
loss_transducer = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
loss = (
self.hparams.ctc_weight * CTC_loss
+ self.hparams.ce_weight * CE_loss
+ (1 - (self.hparams.ctc_weight + self.hparams.ce_weight))
* loss_transducer
)
elif len(predictions) == 3:
# one of the 2 heads (CTC or CE) is still computed
# CTC alive
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, logits_transducer, wav_lens = predictions
CTC_loss = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, token_lens
)
loss_transducer = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
loss = (
self.hparams.ctc_weight * CTC_loss
+ (1 - self.hparams.ctc_weight) * loss_transducer
)
# CE for decoder alive
else:
p_ce, logits_transducer, wav_lens = predictions
CE_loss = self.hparams.ce_cost(
p_ce, tokens_eos, length=token_eos_lens
)
loss_transducer = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
loss = (
self.hparams.ce_weight * CE_loss
+ (1 - self.hparams.ctc_weight) * loss_transducer
)
else:
logits_transducer, wav_lens = predictions
loss = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
else:
logits_transducer, wav_lens, predicted_tokens = predictions
loss = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = [
self.tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# Defining tokenizer and loading it
# To avoid mismatch, we have to use the same tokenizer used for LM training
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["blank_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["blank_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# 1. # Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets = dataio_prepare(hparams)
# We download the pretrained LM and the tokenizer from HuggingFace (or elsewhere
# depending on the path given in the YAML file). The tokenizer is loaded at
# the same time.
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = hparams["tokenizer"]
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 16,705 | 38.124122 | 89 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/ASR/seq2seq/train.py | #!/usr/bin/env/python3
"""Recipe for training a sequence-to-sequence ASR system with librispeech.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beamsearch coupled with a neural
language model.
To run this recipe, do the following:
> python train.py hparams/train_BPE1000.yaml
With the default hyperparameters, the system employs a CRDNN encoder.
The decoder is based on a standard GRU. Beamsearch coupled with a RNN
language model is used on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
This recipe assumes that the tokenizer and the LM are already trained.
To avoid token mismatches, the tokenizer used for the acoustic model is
the same use for the LM. The recipe downloads the pre-trained tokenizer
and LM.
If you would like to train a full system from scratch do the following:
1- Train a tokenizer (see ../../Tokenizer)
2- Train a language model (see ../../LM)
3- Train the acoustic model (with this code).
Authors
* Ju-Chieh Chou 2020
* Mirco Ravanelli 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
* Andreas Nautsch 2021
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, p_seq, wav_lens
else:
return p_seq, wav_lens
else:
if stage == sb.Stage.VALID:
p_tokens, scores = self.hparams.valid_search(x, wav_lens)
else:
p_tokens, scores = self.hparams.test_search(x, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_seq, wav_lens = predictions
else:
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# Add ctc loss if necessary
if (
stage == sb.Stage.TRAIN
and current_epoch <= self.hparams.number_of_ctc_epochs
):
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
else:
loss = loss_seq
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = [
self.tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"],
)
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
from speechbrain.dataio.dataloader import SaveableDataLoader # noqa
from speechbrain.dataio.batch import PaddedBatch # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
hop_size = dynamic_hparams["feats_hop_size"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"] * (1 / hop_size),
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"] * (1 / hop_size),
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_datasets,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_data,
valid_data,
test_datasets,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = hparams["tokenizer"]
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
if train_bsampler is not None:
train_dataloader_opts = {"batch_sampler": train_bsampler}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 15,618 | 35.92435 | 89 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py | #!/usr/bin/env/python3
"""Recipe for training a wav2vec-based ctc ASR system with librispeech.
The system employs wav2vec as its encoder. Decoding is performed with
ctc greedy decoder.
To run this recipe, do the following:
> python train_with_wav2vec.py hparams/train_{hf,sb}_wav2vec.yaml
The neural network is trained on CTC likelihood target and character units
are used as basic recognition tokens.
Authors
* Rudolf A Braun 2022
* Titouan Parcollet 2022
* Sung-Lin Yeh 2021
* Ju-Chieh Chou 2020
* Mirco Ravanelli 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Downsample the inputs if specified
if hasattr(self.modules, "downsampler"):
wavs = self.modules.downsampler(wavs)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
# Handling SpeechBrain vs HuggingFance pretrained models
if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model
latents = self.modules.extractor(wavs)
feats = self.modules.encoder_wrapper(latents, wav_lens=wav_lens)[
"embeddings"
]
else: # HuggingFace pretrained model
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
# Compute outputs
p_tokens = None
logits = self.modules.ctc_lin(x)
# Upsample the inputs if they have been highly downsampled
if hasattr(self.hparams, "upsampling") and self.hparams.upsampling:
logits = logits.view(
logits.shape[0], -1, self.hparams.output_neurons
)
p_ctc = self.hparams.log_softmax(logits)
if stage != sb.Stage.TRAIN:
p_tokens = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
return p_ctc, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
p_ctc, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = loss_ctc
if stage == sb.Stage.VALID:
# Decode token terms to words
predicted_words = [
"".join(self.tokenizer.decode_ndim(utt_seq)).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
if stage == sb.Stage.TEST: # Language model decoding only used for test
if self.hparams.use_language_modelling:
predicted_words = []
for logs in p_ctc:
text = decoder.decode(logs.detach().cpu().numpy())
predicted_words.append(text.split(" "))
else:
predicted_words = [
"".join(self.tokenizer.decode_ndim(utt_seq)).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
if self.auto_mix_prec:
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
with torch.cuda.amp.autocast():
with self.no_sync():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_sync(not should_step):
self.scaler.scale(
loss / self.grad_accumulation_factor
).backward()
if should_step:
if not self.hparams.freeze_wav2vec:
self.scaler.unscale_(self.wav2vec_optimizer)
self.scaler.unscale_(self.model_optimizer)
if self.check_gradients(loss):
self.scaler.step(self.wav2vec_optimizer)
self.scaler.step(self.model_optimizer)
self.scaler.update()
self.optimizer_step += 1
else:
with self.no_sync():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
self.wav2vec_optimizer.step()
self.model_optimizer.step()
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
self.optimizer_step += 1
return loss.detach().cpu()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
# Handling SpeechBrain vs HuggingFance pretrained models
if hasattr(self.modules, "extractor"): # SpeechBrain pretrained model
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.encoder_wrapper.parameters()
)
else: # HuggingFace pretrained model
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def zero_grad(self, set_to_none=False):
self.wav2vec_optimizer.zero_grad(set_to_none)
self.model_optimizer.zero_grad(set_to_none)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "char_list", "tokens_list", "tokens"
)
def text_pipeline(wrd):
yield wrd
char_list = list(wrd)
yield char_list
tokens_list = label_encoder.encode_sequence(char_list)
yield tokens_list
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
special_labels = {
"blank_label": hparams["blank_index"],
}
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="char_list",
special_labels=special_labels,
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "char_list", "tokens"],
)
return train_data, valid_data, test_datasets, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, label_encoder = dataio_prepare(
hparams
)
# Loading the labels for the LM decoding and the CTC decoder
if hasattr(hparams, "use_language_modelling"):
if hparams["use_language_modelling"]:
try:
from pyctcdecode import build_ctcdecoder
except ImportError:
err_msg = "Optional dependencies must be installed to use pyctcdecode.\n"
err_msg += "Install using `pip install kenlm pyctcdecode`.\n"
raise ImportError(err_msg)
ind2lab = label_encoder.ind2lab
labels = [ind2lab[x] for x in range(len(ind2lab))]
labels = [""] + labels[
1:
] # Replace the <blank> token with a blank character, needed for PyCTCdecode
decoder = build_ctcdecoder(
labels,
kenlm_model_path=hparams[
"ngram_lm_path"
], # either .arpa or .bin file
alpha=0.5, # Default by KenLM
beta=1.0, # Default by KenLM
)
else:
hparams["use_language_modelling"] = False
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We load the pretrained wav2vec2 model
if "pretrainer" in hparams.keys():
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(asr_brain.device)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = label_encoder
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 16,312 | 36.415138 | 89 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/ASR/CTC/train_with_whisper.py | #!/usr/bin/env/python3
"""Recipe for training a whisper-based ctc ASR system with librispeech.
The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf).
This recipe take only the whisper encoder and add a DNN + CTC to fine-tune.
If you want to use the full whisper system, please refer to the recipe
speechbrain/recipes/LibriSpeech/ASR/transformer/train_with_whisper.py
To run this recipe, do the following:
> python train_with_whisper.py hparams/train_hf_whisper_encoder.yaml
Authors
* Titouan Parcollet 2022
* Rudolf A Braun 2022
* Sung-Lin Yeh 2021
* Ju-Chieh Chou 2020
* Mirco Ravanelli 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
# Encode with Whisper and then DNN
feats = self.modules.whisper(wavs)
x = self.modules.enc(feats)
# Compute outputs
p_tokens = None
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
if stage != sb.Stage.TRAIN:
p_tokens = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
return p_ctc, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC) given predictions and targets."""
p_ctc, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens, tokens_lens = batch.tokens
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = loss_ctc
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
if self.auto_mix_prec:
self.whisper_optimizer.zero_grad()
self.model_optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
self.scaler.scale(loss / self.grad_accumulation_factor).backward()
if should_step:
self.scaler.unscale_(self.whisper_optimizer)
self.scaler.unscale_(self.model_optimizer)
if self.check_gradients(loss):
if self.optimizer_step > self.hparams.warmup_steps:
# Here we added a warmup to the CTC encoder to make sure that
# it does not screw the whisper with too large gradients.
self.scaler.step(self.whisper_optimizer)
self.scaler.step(self.model_optimizer)
self.scaler.update()
self.optimizer_step += 1
else:
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
# Here we added a warmup to the CTC encoder to make sure that
# it does not screw the whisper with too large gradients.
if self.optimizer_step > self.hparams.warmup_steps:
self.whisper_optimizer.step()
self.model_optimizer.step()
self.whisper_optimizer.zero_grad()
self.model_optimizer.zero_grad()
self.optimizer_step += 1
return loss.detach().cpu()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
sb.nnet.schedulers.update_learning_rate(
self.whisper_optimizer, new_lr_whisper
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_whisperc": old_lr_whisper,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the whisper optimizer and model optimizer"
self.whisper_optimizer = self.hparams.whisper_opt_class(
self.modules.whisper.parameters()
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"whisper_opt", self.whisper_optimizer
)
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "char_list", "tokens_list", "tokens"
)
def text_pipeline(wrd):
yield wrd
char_list = list(wrd)
yield char_list
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "char_list", "tokens"],
)
return train_data, valid_data, test_datasets
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We load the pretrained whisper model
if "pretrainer" in hparams.keys():
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(asr_brain.device)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 13,226 | 35.238356 | 89 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/ASR/transformer/train.py | #!/usr/bin/env python3
"""Recipe for training a Transformer ASR system with librispeech.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural
language model.
To run this recipe, do the following:
> python train.py hparams/transformer.yaml
> python train.py hparams/conformer.yaml
With the default hyperparameters, the system employs a convolutional frontend and a transformer.
The decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer
language model is used on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
The best model is the average of the checkpoints from last 5 epochs.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
Authors
* Jianyuan Zhong 2020
* Mirco Ravanelli 2020
* Peter Plantinga 2020
* Samuele Cornell 2020, 2021, 2022
* Titouan Parcollet 2021, 2022
"""
import os
import sys
import torch
import logging
from pathlib import Path
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
src = self.modules.CNN(feats)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index,
)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
# for the sake of efficiency, we only perform beamsearch with limited capacity
# and no LM to give user some idea of how the AM is doing
hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, hyps,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
).sum()
# now as training progresses we use real prediction from the prev step instead of teacher forcing
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
).sum()
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = [
tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def on_evaluate_start(self, max_key=None, min_key=None):
"""perform checkpoint averge if needed"""
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(
max_key=max_key, min_key=min_key
)
ckpt = sb.utils.checkpoints.average_checkpoints(
ckpts, recoverable_name="model", device=self.device
)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
print("Loaded the average")
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
lr = self.hparams.noam_annealing.current_lr
steps = self.optimizer_step
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
num_to_keep=5,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
# save the averaged checkpoint at the end of the evaluation stage
# delete the rest of the intermediate checkpoints
# ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint
self.checkpointer.save_and_keep_only(
meta={"ACC": 1.1, "epoch": epoch},
max_keys=["ACC"],
num_to_keep=1,
)
def fit_batch(self, batch):
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
if self.auto_mix_prec:
with torch.autocast(torch.device(self.device).type):
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
# Losses are excluded from mixed precision to avoid instabilities
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_sync(not should_step):
self.scaler.scale(
loss / self.grad_accumulation_factor
).backward()
if should_step:
self.scaler.unscale_(self.optimizer)
if self.check_gradients(loss):
self.scaler.step(self.optimizer)
self.scaler.update()
self.zero_grad()
self.optimizer_step += 1
self.hparams.noam_annealing(self.optimizer)
else:
if self.bfloat16_mix_prec:
with torch.autocast(
device_type=torch.device(self.device).type,
dtype=torch.bfloat16,
):
outputs = self.compute_forward(batch, sb.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.TRAIN)
else:
outputs = self.compute_forward(batch, sb.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.TRAIN)
with self.no_sync(not should_step):
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
self.optimizer.step()
self.zero_grad()
self.optimizer_step += 1
self.hparams.noam_annealing(self.optimizer)
self.on_fit_batch_end(batch, outputs, loss, should_step)
return loss.detach().cpu()
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
valtest_datasets = [valid_data] + [i for k, i in test_datasets.items()]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline)
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline_train(wav):
# Speed Perturb is done here so it is multi-threaded with the
# workers of the dataloader (faster).
if hparams["speed_perturb"]:
sig = sb.dataio.dataio.read_audio(wav)
sig = hparams["speed_perturb"](sig.unsqueeze(0)).squeeze(0)
else:
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"],
)
# 5. If Dynamic Batching is used, we instantiate the needed samplers.
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
max_batch_ex=dynamic_hparams["max_batch_ex"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len_val"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_datasets,
tokenizer,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# 1. # Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_data,
valid_data,
test_datasets,
tokenizer,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = hparams["tokenizer"]
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
if train_bsampler is not None:
train_dataloader_opts = {
"batch_sampler": train_bsampler,
"num_workers": hparams["num_workers"],
}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k],
max_key="ACC",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 19,105 | 36.171206 | 105 | py |
speechbrain | speechbrain-main/recipes/LibriSpeech/ASR/transformer/train_with_whisper.py | #!/usr/bin/env python3
"""Recipe for training a whisper-based ASR system with librispeech.
The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf).
This recipe take the whisper encoder-decoder to fine-tune on the NLL.
If you want to only use the whisper encoder system, please refer to the recipe
speechbrain/recipes/LibriSpeech/ASR/CTC/train_with_whisper.py
To run this recipe, do the following:
> python train_with_whisper.py hparams/train_hf_whisper.yaml
Authors
* Adel Moumen 2022
* Titouan Parcollet 2022
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from speechbrain.utils.data_utils import undo_padding
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
bos_tokens, bos_tokens_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# We compute the padding mask and replace the values with the pad_token_id
# that the Whisper decoder expect to see.
abs_tokens_lens = (bos_tokens_lens * bos_tokens.shape[1]).long()
pad_mask = (
torch.arange(abs_tokens_lens.max(), device=self.device)[None, :]
< abs_tokens_lens[:, None]
)
bos_tokens[~pad_mask] = self.tokenizer.pad_token_id
# Forward encoder + decoder
enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens)
log_probs = self.hparams.log_softmax(logits)
hyps = None
if stage == sb.Stage.VALID:
hyps, _ = self.hparams.valid_greedy_searcher(enc_out, wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_beam_searcher(enc_out, wav_lens)
return log_probs, hyps, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss NLL given predictions and targets."""
log_probs, hyps, wav_lens, = predictions
batch = batch.to(self.device)
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
loss = self.hparams.nll_loss(
log_probs, tokens_eos, length=tokens_eos_lens,
)
if stage != sb.Stage.TRAIN:
tokens, tokens_lens = batch.tokens
# Decode token terms to words
predicted_words = self.tokenizer.batch_decode(
hyps, skip_special_tokens=True
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer.batch_decode(
target_words, skip_special_tokens=True
)
if hasattr(self.hparams, "normalized_transcripts"):
predicted_words = [
self.tokenizer._normalize(text).split(" ")
for text in predicted_words
]
target_words = [
self.tokenizer._normalize(text).split(" ")
for text in target_words
]
else:
predicted_words = [text.split(" ") for text in predicted_words]
target_words = [text.split(" ") for text in target_words]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.optimizer, new_lr_whisper
)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr_whisper": old_lr_whisper},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_loader_kwargs"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_loader_kwargs"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode(wrd)
# avoid bos and eos tokens.
tokens_list = tokens_list[1:-1]
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + tokens_list)
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "tokens_list", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = hparams["whisper"].tokenizer
tokenizer.set_prefix_tokens(hparams["language"], "transcribe", False)
# we need to prepare the tokens for searchers
hparams["valid_greedy_searcher"].set_decoder_input_tokens(
tokenizer.prefix_tokens
)
hparams["valid_greedy_searcher"].set_language_token(
tokenizer.prefix_tokens[1]
)
hparams["test_beam_searcher"].set_decoder_input_tokens(
tokenizer.prefix_tokens
)
hparams["test_beam_searcher"].set_language_token(tokenizer.prefix_tokens[1])
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
opt_class=hparams["whisper_opt_class"],
)
# We load the pretrained whisper model
if "pretrainer" in hparams.keys():
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(asr_brain.device)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for Whisper.
asr_brain.tokenizer = tokenizer
# Training
if hparams["test_only"] is False:
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_loader_kwargs"],
valid_loader_kwargs=hparams["valid_loader_kwargs"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_loader_kwargs"]
)
| 11,771 | 34.457831 | 89 | py |
speechbrain | speechbrain-main/recipes/MEDIA/SLU/CTC/train_hf_wav2vec.py | #!/usr/bin/env python3
"""
Recipe for training a CTC based SLU system with Media.
The system employs a wav2vec2 model and a decoder.
To run this recipe, do the following:
> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml
With the default hyperparameters, the system employs a VanillaNN encoder.
The neural network is trained on greedy CTC.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, training tasks (Media , PortMedia),
and many other possible variations.
Authors
* Gaelle Laperriere 2023
"""
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.dataio.batch import PaddedBatch
from speechbrain.utils.distributed import run_on_main
from media_prepare import prepare_media
logger = logging.getLogger(__name__)
# Define training procedure.
class SLU(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from waveform to output probabilities."""
# Get data.
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
# Forward pass.
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
# Output layer for seq2seq log-probabilities.
logits = self.modules.output_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC) given predictions and targets."""
# Get data.
batch = batch.to(self.device)
chars, char_lens = batch.char_encoded
ids = batch.id
# Get predictions & loss.
p_ctc, wav_lens = predictions
loss = self.hparams.ctc_cost(p_ctc, chars, wav_lens, char_lens)
# Get metrics.
if stage != sb.Stage.TRAIN:
# Generate sequences with CTC greedy decoder.
sequence = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, self.hparams.blank_index
)
# Update metrics.
self.cer_metric.append(
ids=ids,
predict=sequence,
target=chars,
target_len=char_lens,
ind2lab=self.tokenizer.decode_ndim,
)
self.coer_metric.append(
ids=ids,
predict=sequence,
target=chars,
target_len=char_lens,
ind2lab=self.tokenizer.decode_ndim,
)
self.cver_metric.append(
ids=ids,
predict=sequence,
target=chars,
target_len=char_lens,
ind2lab=self.tokenizer.decode_ndim,
)
self.ctc_metric.append(ids, p_ctc, chars, wav_lens, char_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
stage = sb.Stage.TRAIN
# Train.
predictions = self.compute_forward(batch, stage)
loss = self.compute_objectives(predictions, batch, stage)
# Propagate loss.
loss.backward()
if self.check_gradients(loss):
self.optimizer_wav2vec.step()
self.optimizer.step()
self.optimizer_wav2vec.zero_grad()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
# Evaluate.
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage)
return loss.detach()
def init_optimizers(self):
"""Initializes the wav2vec2 optimizer and model optimizer"""
# Join optimizers.
self.optimizer_wav2vec = self.hparams.opt_class_wav2vec(
self.hparams.model_wav2vec2.parameters()
)
self.optimizer = self.hparams.opt_class(self.hparams.model.parameters())
# Add opitmizers to checkpoint recoverables.
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"optimizer_wav2vec", self.optimizer_wav2vec
)
self.checkpointer.add_recoverable("optimizer", self.optimizer)
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
# Re-initialize metrics.
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.ctc_metric = self.hparams.ctc_computer()
self.coer_metric = self.hparams.coer_computer()
self.cver_metric = self.hparams.cver_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Save loss and metrics.
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["COER"] = self.coer_metric.summarize("error_rate")
stage_stats["CVER"] = self.cver_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
sb.nnet.schedulers.update_learning_rate(
self.optimizer_wav2vec, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr": old_lr,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"CER": stage_stats["CER"]}, min_keys=["CER"],
)
# Same plus write results in txt files.
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(hparams["cer_file_test"], "w") as w:
self.cer_metric.write_stats(w)
with open(hparams["ctc_file_test"], "w") as w:
self.ctc_metric.write_stats(w)
with open(hparams["coer_file_test"], "w") as w:
self.coer_metric.write_stats(w)
with open(hparams["cver_file_test"], "w") as w:
self.cver_metric.write_stats(w)
# Define custom data procedure.
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets:
csv_folder = hparams["save_folder"] + "/csv"
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": csv_folder},
)
# We sort training data to speed up training and get better results.
# When sorting do not shuffle in dataloader ! otherwise is pointless.
if hparams["sorting"] == "ascending":
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
train_data = train_data.filtered_sorted(
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
# We also sort the validation data so it is faster to validate.
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_valid"], replacements={"data_root": csv_folder}
)
valid_data = valid_data.filtered_sorted(sort_key="duration", reverse=True)
# We also sort the test data so it is faster to validate.
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test"], replacements={"data_root": csv_folder}
)
test_data = test_data.filtered_sorted(sort_key="duration", reverse=True)
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(float(start) * hparams["sample_rate"])
stop = int(float(stop) * hparams["sample_rate"])
speech_segment = {"file": wav, "start": start, "stop": stop}
sig = sb.dataio.dataio.read_audio(speech_segment)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("char")
@sb.utils.data_pipeline.provides("char_list", "char_encoded")
def text_pipeline(char):
char_list = char.strip().split()
yield char_list
char_encoded = label_encoder.encode_sequence_torch(char_list)
yield char_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Create a label encoder instead of a tokenizer for our tag list:
lab_enc_file = hparams["save_folder"] + "/labelencoder.txt"
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="char_list",
special_labels={"blank_label": hparams["blank_index"]},
sequence_input=True,
)
# 5. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "char_encoded"],
)
# 6. Make DataLoaders and shuffle if needed:
dataloader_train = torch.utils.data.DataLoader(
train_data,
batch_size=hparams["batch_size"],
num_workers=hparams["dataloader_options"]["num_workers"],
collate_fn=PaddedBatch,
shuffle=hparams["dataloader_options"]["shuffle"],
)
dataloader_valid = torch.utils.data.DataLoader(
valid_data,
batch_size=hparams["batch_size"],
num_workers=hparams["dataloader_options"]["num_workers"],
collate_fn=PaddedBatch,
shuffle=hparams["dataloader_options"]["shuffle"],
)
dataloader_test = torch.utils.data.DataLoader(
test_data,
batch_size=hparams["test_batch_size"],
num_workers=hparams["test_dataloader_options"]["num_workers"],
collate_fn=PaddedBatch,
)
return dataloader_train, dataloader_valid, dataloader_test, label_encoder
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides.
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol.
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory.
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_media,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"channels_path": hparams["channels_path"],
"concepts_path": hparams["concepts_path"],
"skip_wav": hparams["skip_wav"],
"method": hparams["method"],
"task": hparams["task"],
"skip_prep": hparams["skip_prep"],
"process_test2": hparams["process_test2"],
},
)
# Create the datasets objects as well as tokenization and encoding.
train_data, valid_data, test_data, label_encoder = dataio_prepare(hparams)
# Trainer initialization.
slu_brain = SLU(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
slu_brain.tokenizer = label_encoder
slu_brain.tokenizer.add_unk() # handle unknown SLU labels
# Check for stopped training.
slu_brain.checkpointer.recover_if_possible()
# Train.
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_data,
valid_data,
progressbar=True,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test.
slu_brain.evaluate(
test_data,
min_key="CER",
progressbar=True,
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 13,866 | 34.284987 | 83 | py |
speechbrain | speechbrain-main/recipes/MEDIA/ASR/CTC/train_hf_wav2vec.py | #!/usr/bin/env python3
"""
Recipe for training a CTC based ASR system with Media.
The system employs a wav2vec2 model and a decoder.
To run this recipe, do the following:
> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml
With the default hyperparameters, the system employs a VanillaNN encoder.
The neural network is trained on greedy CTC.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, training tasks (Media , PortMedia),
and many other possible variations.
Authors
* Gaelle Laperriere 2023
"""
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.dataio.batch import PaddedBatch
from speechbrain.utils.distributed import run_on_main
from media_prepare import prepare_media
logger = logging.getLogger(__name__)
# Define training procedure.
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from waveform to output probabilities."""
# Get data.
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
# Forward pass.
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
# Output layer for seq2seq log-probabilities.
logits = self.modules.output_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC) given predictions and targets."""
# Get data.
batch = batch.to(self.device)
chars, char_lens = batch.char_encoded
ids = batch.id
# Get predictions & loss.
p_ctc, wav_lens = predictions
loss = self.hparams.ctc_cost(p_ctc, chars, wav_lens, char_lens)
# Get metrics.
if stage != sb.Stage.TRAIN:
# Generate sequences with CTC greedy decoder.
sequence = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, self.hparams.blank_index
)
# Update metrics.
self.cer_metric.append(
ids=ids,
predict=sequence,
target=chars,
target_len=char_lens,
ind2lab=self.tokenizer.decode_ndim,
)
self.ctc_metric.append(ids, p_ctc, chars, wav_lens, char_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
stage = sb.Stage.TRAIN
# Train.
predictions = self.compute_forward(batch, stage)
loss = self.compute_objectives(predictions, batch, stage)
# Propagate loss.
loss.backward()
if self.check_gradients(loss):
self.optimizer_wav2vec.step()
self.optimizer.step()
self.optimizer_wav2vec.zero_grad()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
# Evaluate.
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage)
return loss.detach()
def init_optimizers(self):
"""Initializes the wav2vec2 optimizer and model optimizer"""
# Join optimizers.
self.optimizer_wav2vec = self.hparams.opt_class_wav2vec(
self.hparams.model_wav2vec2.parameters()
)
self.optimizer = self.hparams.opt_class(self.hparams.model.parameters())
# Add opitmizers to checkpoint recoverables.
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"optimizer_wav2vec", self.optimizer_wav2vec
)
self.checkpointer.add_recoverable("optimizer", self.optimizer)
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
# Re-initialize metrics.
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.ctc_metric = self.hparams.ctc_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Save loss and metrics.
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
sb.nnet.schedulers.update_learning_rate(
self.optimizer_wav2vec, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr": old_lr,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"CER": stage_stats["CER"]}, min_keys=["CER"],
)
# Same plus write results in txt files.
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(hparams["cer_file_test"], "w") as w:
self.cer_metric.write_stats(w)
with open(hparams["ctc_file_test"], "w") as w:
self.ctc_metric.write_stats(w)
# Define custom data procedure.
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets:
csv_folder = hparams["save_folder"] + "/csv"
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": csv_folder},
)
# We sort training data to speed up training and get better results.
# When sorting do not shuffle in dataloader ! otherwise is pointless.
if hparams["sorting"] == "ascending":
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
train_data = train_data.filtered_sorted(
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
# We also sort the validation data so it is faster to validate.
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_valid"], replacements={"data_root": csv_folder}
)
valid_data = valid_data.filtered_sorted(sort_key="duration", reverse=True)
# We also sort the test data so it is faster to validate.
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test"], replacements={"data_root": csv_folder}
)
test_data = test_data.filtered_sorted(sort_key="duration", reverse=True)
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(float(start) * hparams["sample_rate"])
stop = int(float(stop) * hparams["sample_rate"])
speech_segment = {"file": wav, "start": start, "stop": stop}
sig = sb.dataio.dataio.read_audio(speech_segment)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("char")
@sb.utils.data_pipeline.provides("char_list", "char_encoded")
def text_pipeline(char):
char_list = char.strip().split()
yield char_list
char_encoded = label_encoder.encode_sequence_torch(char_list)
yield char_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Create a label encoder instead of a tokenizer for our tag list:
lab_enc_file = hparams["save_folder"] + "/labelencoder.txt"
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="char_list",
special_labels={"blank_label": hparams["blank_index"]},
sequence_input=True,
)
# 5. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "char_encoded"],
)
# 6. Make DataLoaders and shuffle if needed:
dataloader_train = torch.utils.data.DataLoader(
train_data,
batch_size=hparams["batch_size"],
num_workers=hparams["dataloader_options"]["num_workers"],
collate_fn=PaddedBatch,
shuffle=hparams["dataloader_options"]["shuffle"],
)
dataloader_valid = torch.utils.data.DataLoader(
valid_data,
batch_size=hparams["batch_size"],
num_workers=hparams["dataloader_options"]["num_workers"],
collate_fn=PaddedBatch,
shuffle=hparams["dataloader_options"]["shuffle"],
)
dataloader_test = torch.utils.data.DataLoader(
test_data,
batch_size=hparams["test_batch_size"],
num_workers=hparams["test_dataloader_options"]["num_workers"],
collate_fn=PaddedBatch,
)
return dataloader_train, dataloader_valid, dataloader_test, label_encoder
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides.
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol.
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory.
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_media,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"channels_path": hparams["channels_path"],
"concepts_path": hparams["concepts_path"],
"skip_wav": hparams["skip_wav"],
"method": hparams["method"],
"task": hparams["task"],
"skip_prep": hparams["skip_prep"],
"process_test2": hparams["process_test2"],
},
)
# Create the datasets objects as well as tokenization and encoding.
train_data, valid_data, test_data, label_encoder = dataio_prepare(hparams)
# Trainer initialization.
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
asr_brain.tokenizer = label_encoder
asr_brain.tokenizer.add_unk() # handle unknown SLU labels
# Check for stopped training.
asr_brain.checkpointer.recover_if_possible()
# Train.
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
progressbar=True,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test.
asr_brain.evaluate(
test_data,
min_key="CER",
progressbar=True,
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 12,920 | 33.827493 | 83 | py |
speechbrain | speechbrain-main/recipes/REAL-M/sisnr-estimation/train.py | #!/usr/bin/env/python3
"""
Recipe for training a Blind SI-SNR estimator
Authors:
* Cem Subakan 2021
* Mirco Ravanelli 2021
* Samuele Cornell 2021
"""
import os
import sys
import torch
import speechbrain as sb
import speechbrain.nnet.schedulers as schedulers
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from torch.cuda.amp import autocast
import itertools as it
from tqdm import tqdm
import numpy as np
import logging
import csv
# Define training procedure
class Separation(sb.Brain):
def compress_snrrange(self, inp):
"""Convert from true snr range to 0-1 range"""
rnge = self.hparams.snrmax - self.hparams.snrmin
inp = torch.clip(inp, min=self.hparams.snrmin, max=self.hparams.snrmax)
inp = inp - self.hparams.snrmin
inp = inp / rnge
return inp
def gettrue_snrrange(self, inp):
"""Convert from 0-1 range to true snr range"""
rnge = self.hparams.snrmax - self.hparams.snrmin
inp = inp * rnge
inp = inp + self.hparams.snrmin
return inp
def compute_forward(self, mix, targets, stage, noise=None):
"""Forward computations from the mixture to the separated signals."""
# Unpack lists and put tensors in the right device
mix, mix_lens = mix
mix, mix_lens = mix.to(self.device), mix_lens.to(self.device)
# Convert targets to tensor
targets = torch.cat(
[targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)],
dim=-1,
).to(self.device)
# Add speech distortions
if stage == sb.Stage.TRAIN:
with torch.no_grad():
if self.hparams.use_speedperturb or self.hparams.use_rand_shift:
mix, targets = self.add_speed_perturb(targets, mix_lens)
if self.hparams.use_reverb_augment:
targets_rev = [
self.hparams.reverb(targets[:, :, i], None)
for i in range(self.hparams.num_spks)
]
targets_rev = torch.stack(targets_rev, dim=-1)
mix = targets_rev.sum(-1)
else:
mix = targets.sum(-1)
if self.hparams.use_wham_noise:
noise = noise.to(self.device)
len_noise = noise.shape[1]
len_mix = mix.shape[1]
min_len = min(len_noise, len_mix)
# add the noise
mix = mix[:, :min_len] + noise[:, :min_len]
# fix the length of targets also
targets = targets[:, :min_len, :]
if self.hparams.use_wavedrop:
mix = self.hparams.wavedrop(mix, mix_lens)
if self.hparams.limit_training_signal_len:
mix, targets = self.cut_signals(mix, targets)
# randomly select the separator to use, and separate
with torch.no_grad():
separator_model = np.random.choice(self.all_separators)
predictions = separator_model.separate_batch(mix)
# normalize the separation results
if hasattr(self.hparams, "separation_norm_type"):
if self.hparams.separation_norm_type == "max":
predictions = (
predictions / predictions.max(dim=1, keepdim=True)[0]
)
mix = mix / mix.max(dim=1, keepdim=True)[0]
elif self.hparams.separation_norm_type == "stnorm":
predictions = (
predictions - predictions.mean(dim=1, keepdim=True)
) / predictions.std(dim=1, keepdim=True)
mix = (mix - mix.mean(dim=1, keepdim=True)) / mix.std(
dim=1, keepdim=True
)
else:
raise ValueError("Unknown type of normalization")
# calculate oracle sisnrs
snr = self.compute_oracle_sisnrs(predictions, targets)
snr = snr.to(self.device)
# compress the si-snr values to 0-1 range
if self.hparams.use_snr_compression:
snr_compressed = self.compress_snrrange(snr)
predictions = predictions.permute(0, 2, 1)
predictions = predictions.reshape(-1, predictions.size(-1))
# make sure signal lengths do not change
min_T = min(predictions.shape[1], mix.shape[1])
assert predictions.shape[1] == mix.shape[1], "lengths change"
# concat the mixtures to the separation results
mix_repeat = mix.repeat(2, 1)
inp_cat = torch.cat(
[
predictions[:, :min_T].unsqueeze(1),
mix_repeat[:, :min_T].unsqueeze(1),
],
dim=1,
)
# get the encoder output and then calculate stats pooling
enc = self.hparams.encoder(inp_cat)
enc = enc.permute(0, 2, 1)
enc_stats = self.hparams.stat_pooling(enc)
# get the si-snr estimate by passing through the output layers
snrhat = self.hparams.encoder_out(enc_stats).squeeze()
return predictions, snrhat, snr, snr_compressed
def compute_oracle_sisnrs(self, predictions, targets):
"""Computes the oracle si-snrs"""
snr1_1 = self.hparams.loss(targets[:, :, 0:1], predictions[:, :, 0:1])
snr1_2 = self.hparams.loss(targets[:, :, 1:2], predictions[:, :, 0:1])
snr1, ind1 = torch.stack([snr1_1, snr1_2]).min(0)
ind2 = 1 - ind1
snr2 = self.hparams.loss(targets[:, :, ind2], predictions[:, :, 1:2])
return torch.stack([-snr1, -snr2], dim=1)
def fit_batch(self, batch):
"""Trains one batch"""
if self.hparams.use_whamr_train:
whamr_prob = torch.rand(1).item()
if whamr_prob > (1 - self.hparams.whamr_proportion):
batch = next(self.hparams.train_whamr_loader)
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.use_wham_noise:
noise = batch.noise_sig[0]
else:
noise = None
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
if self.auto_mix_prec:
with autocast():
predictions, snrhat, snr, snr_compressed = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
snr = snr.reshape(-1)
loss = ((snr_compressed - snrhat).abs()).mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
self.scaler.scale(loss).backward()
if self.hparams.clip_grad_norm >= 0:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(),
self.hparams.clip_grad_norm,
)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
else:
# get the oracle snrs, estimated snrs, and the source estimates
predictions, snrhat, snr, snr_compressed = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
snr = snr.reshape(-1)
loss = ((snr_compressed - snrhat).abs()).mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
# snt_id = batch.id
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
if self.hparams.use_wham_noise:
noise = batch.noise_sig[0]
else:
noise = None
with torch.no_grad():
predictions, snrhat, snr, snr_compressed = self.compute_forward(
mixture, targets, sb.Stage.VALID, noise
)
if self.hparams.use_snr_compression:
snrhat = self.gettrue_snrrange(snrhat)
loss = (snr - snrhat).abs().mean()
return loss.detach()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"error": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"error": stage_stats["error"]}, min_keys=["error"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def add_speed_perturb(self, targets, targ_lens):
"""Adds speed perturbation and random_shift to the input signals"""
min_len = -1
recombine = False
if self.hparams.use_speedperturb:
# Performing speed change (independently on each source)
new_targets = []
recombine = True
for i in range(targets.shape[-1]):
new_target = self.hparams.speedperturb(
targets[:, :, i], targ_lens
)
new_targets.append(new_target)
if i == 0:
min_len = new_target.shape[-1]
else:
if new_target.shape[-1] < min_len:
min_len = new_target.shape[-1]
if self.hparams.use_rand_shift:
# Performing random_shift (independently on each source)
recombine = True
for i in range(targets.shape[-1]):
rand_shift = torch.randint(
self.hparams.min_shift, self.hparams.max_shift, (1,)
)
new_targets[i] = new_targets[i].to(self.device)
new_targets[i] = torch.roll(
new_targets[i], shifts=(rand_shift[0],), dims=1
)
# Re-combination
if recombine:
if self.hparams.use_speedperturb:
targets = torch.zeros(
targets.shape[0],
min_len,
targets.shape[-1],
device=targets.device,
dtype=torch.float,
)
for i, new_target in enumerate(new_targets):
targets[:, :, i] = new_targets[i][:, 0:min_len]
mix = targets.sum(-1)
return mix, targets
def cut_signals(self, mixture, targets):
"""This function selects a random segment of a given length withing the mixture.
The corresponding targets are selected accordingly"""
randstart = torch.randint(
0,
1 + max(0, mixture.shape[1] - self.hparams.training_signal_len),
(1,),
).item()
targets = targets[
:, randstart : randstart + self.hparams.training_signal_len, :
]
mixture = mixture[
:, randstart : randstart + self.hparams.training_signal_len
]
return mixture, targets
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def save_results(self, test_data):
"""
This function calculates the oracle si-snrs and the estimated si-snr on the test set of WHAMR! dataset, and writes these results into a csv file
"""
# Create folders where to store audio
save_file = os.path.join(
self.hparams.output_folder, "test_results_wsj.csv"
)
# Variable init
all_sisnr1s = []
all_sisnr1_hats = []
all_sisnr2s = []
all_sisnr2_hats = []
csv_columns = ["snt_id", "snr1", "snr1-hat", "snr2", "snr2-hat"]
test_loader = sb.dataio.dataloader.make_dataloader(
test_data, **self.hparams.dataloader_opts
)
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop over all test sentence
with tqdm(test_loader, dynamic_ncols=True) as t:
for i, batch in enumerate(t):
# Apply Separation
mixture = batch.mix_sig
snt_id = batch.id
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
if self.hparams.use_wham_noise:
noise = batch.noise_sig[0]
else:
noise = None
with torch.no_grad():
(
predictions,
snrhat,
snr,
snr_compressed,
) = self.compute_forward(
mixture, targets, sb.Stage.VALID, noise
)
if self.hparams.use_snr_compression:
snrhat = self.gettrue_snrrange(snrhat)
# Saving on a csv file
row = {
"snt_id": snt_id[0],
"snr1": snr.squeeze()[0].item(),
"snr1-hat": snrhat[0].item(),
"snr2": snr.squeeze()[1].item(),
"snr2-hat": snrhat[1].item(),
}
writer.writerow(row)
# Metric Accumulation
all_sisnr1s.append(snr.squeeze()[0].item())
all_sisnr1_hats.append(snrhat[0].item())
all_sisnr2s.append(snr.squeeze()[1].item())
all_sisnr2_hats.append(snrhat[1].item())
row = {
"snt_id": "avg",
"snr1": np.array(all_sisnr1s).mean(),
"snr1-hat": np.array(all_sisnr1_hats).mean(),
"snr2": np.array(all_sisnr2s).mean(),
"snr2-hat": np.array(all_sisnr2_hats).mean(),
}
writer.writerow(row)
logger.info(
"Mean SISNR for source 1 is {}".format(np.array(all_sisnr1s).mean())
)
logger.info(
"Mean SISNR hat for source 1 is {}".format(
np.array(all_sisnr1_hats).mean()
)
)
logger.info(
"Mean SISNR for source 2 is {}".format(np.array(all_sisnr2s).mean())
)
logger.info(
"Mean SISNR hat for source 2 is {}".format(
np.array(all_sisnr2_hats).mean()
)
)
def dataio_prep(hparams):
"""Creates data processing pipeline"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"],
replacements={"data_root": hparams["data_folder"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"],
replacements={"data_root": hparams["data_folder"]},
)
datasets = [train_data, valid_data, test_data]
# 2. Provide audio pipelines
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides("mix_sig")
def audio_pipeline_mix(mix_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
return mix_sig
@sb.utils.data_pipeline.takes("s1_wav")
@sb.utils.data_pipeline.provides("s1_sig")
def audio_pipeline_s1(s1_wav):
s1_sig = sb.dataio.dataio.read_audio(s1_wav)
return s1_sig
@sb.utils.data_pipeline.takes("s2_wav")
@sb.utils.data_pipeline.provides("s2_sig")
def audio_pipeline_s2(s2_wav):
s2_sig = sb.dataio.dataio.read_audio(s2_wav)
return s2_sig
if hparams["num_spks"] == 3:
@sb.utils.data_pipeline.takes("s3_wav")
@sb.utils.data_pipeline.provides("s3_sig")
def audio_pipeline_s3(s3_wav):
s3_sig = sb.dataio.dataio.read_audio(s3_wav)
return s3_sig
if hparams["use_wham_noise"]:
@sb.utils.data_pipeline.takes("noise_wav")
@sb.utils.data_pipeline.provides("noise_sig")
def audio_pipeline_noise(noise_wav):
noise_sig = sb.dataio.dataio.read_audio(noise_wav)
return noise_sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2)
if hparams["num_spks"] == 3:
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s3)
if hparams["use_wham_noise"]:
print("Using the WHAM! noise in the data pipeline")
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise)
if (hparams["num_spks"] == 2) and hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "noise_sig"]
)
elif (hparams["num_spks"] == 3) and hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
elif (hparams["num_spks"] == 2) and not hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig"]
)
else:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig"]
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Logger info
logger = logging.getLogger(__name__)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Check if wsj0_tr is set with dynamic mixing
if hparams["dynamic_mixing"] and not os.path.exists(
hparams["base_folder_dm"]
):
print(
"Please, specify a valid base_folder_dm folder when using dynamic mixing"
)
sys.exit(1)
# Data preparation for LibriMix
from prepare_data_librimix import prepare_librimix as prepare_libri
# create the csv files
run_on_main(
prepare_libri,
kwargs={
"datapath": hparams["data_folder"],
"savepath": hparams["save_folder"],
"n_spks": hparams["num_spks"],
"skip_prep": hparams["skip_prep"],
"fs": hparams["sample_rate"],
"librimix_addnoise": hparams["use_wham_noise"],
},
)
# Data preparation for WHAMR
from prepare_data_wham import create_wham_whamr_csv
from train_wham import dataio_prep as dataio_prep_whamr
# add another skip_prep to distinguish between LibriSpeech & WHAM/R prep
skip_prep = hparams["skip_prep"]
if not skip_prep:
create_wham_whamr_csv(
datapath=hparams["whamr_data_folder"],
savepath=hparams["save_folder"],
fs=hparams["sample_rate"],
add_reverb=True,
savename="whamr_",
set_types=["tr", "cv", "tt"],
)
train_data_whamr, valid_data, test_data = dataio_prep_whamr(hparams)
# if whamr, and we do speedaugment we need to prepare the csv file
if hparams["use_reverb_augment"]:
from prepare_data_wham import create_whamr_rir_csv
from create_whamr_rirs import create_rirs
# If the Room Impulse Responses do not exist, we create them
if not os.path.exists(hparams["rir_path"]):
print("ing Room Impulse Responses...")
run_on_main(
create_rirs,
kwargs={
"output_dir": hparams["rir_path"],
"sr": hparams["sample_rate"],
},
)
run_on_main(
create_whamr_rir_csv,
kwargs={
"datapath": hparams["rir_path"],
"savepath": hparams["save_folder"],
},
)
hparams["reverb"] = sb.processing.speech_augmentation.AddReverb(
os.path.join(hparams["save_folder"], "whamr_rirs.csv")
)
if hparams["dynamic_mixing"]:
from dynamic_mixing_librimix import (
dynamic_mix_data_prep_librimix as dynamic_mix_data_prep,
)
from dynamic_mixing_wham import (
dynamic_mix_data_prep as dynamic_mix_data_prep_whamr,
)
if hparams["use_whamr_train"]:
if "processed" not in hparams["base_folder_dm_whamr"]:
# if the processed folder does not exist for whamr dynamic mixing, we do the necessary preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm_whamr"])
+ "_processed"
):
from preprocess_dynamic_mixing_wham import resample_folder
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm_whamr"],
"output_folder": os.path.normpath(
hparams["base_folder_dm_whamr"]
)
+ "_processed",
"fs": hparams["sample_rate"],
"regex": "**/*.wav",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm_whamr"] = (
os.path.normpath(hparams["base_folder_dm_whamr"])
+ "_processed"
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm_whamr"] = (
os.path.normpath(hparams["base_folder_dm_whamr"])
+ "_processed"
)
train_data_whamr = dynamic_mix_data_prep_whamr(
tr_csv=hparams["train_whamr_data"],
data_root_folder=hparams["whamr_data_folder"],
base_folder_dm=hparams["base_folder_dm_whamr"],
sample_rate=hparams["sample_rate"],
num_spks=hparams["num_spks"],
max_training_signal_len=hparams["training_signal_len"],
batch_size=hparams["dataloader_opts"]["batch_size"],
num_workers=hparams["dataloader_opts"]["num_workers"],
)
hparams["train_whamr_loader"] = it.cycle(iter(train_data_whamr))
# if the base_folder for dm is not processed for LibriMix, preprocess it
if "processed" not in hparams["base_folder_dm"]:
# if the processed folder already exists we just use it otherwise we do the preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
):
from preprocess_dynamic_mixing_librimix import resample_folder
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm"],
"output_folder": os.path.normpath(
hparams["base_folder_dm"]
)
+ "_processed",
"fs": hparams["sample_rate"],
"regex": "**/*.flac",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
train_data = dynamic_mix_data_prep(hparams)
else:
hparams["use_whamr_train"] = False
train_data, valid_data, test_data = dataio_prep(hparams)
# Brain class initialization
snrestimator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
from speechbrain.pretrained import SepformerSeparation as separator
from speechbrain.pretrained.interfaces import fetch
all_separators = []
for separator_model in hparams["separators_to_use"]:
fetch(
separator_model + "_encoder.ckpt",
source=hparams["separator_repo"],
savedir=separator_model,
save_filename="encoder.ckpt",
)
fetch(
separator_model + "_decoder.ckpt",
source=hparams["separator_repo"],
savedir=separator_model,
save_filename="decoder.ckpt",
)
fetch(
separator_model + "_masknet.ckpt",
source=hparams["separator_repo"],
savedir=separator_model,
save_filename="masknet.ckpt",
)
fetch(
separator_model + "_hyperparams.yaml",
source=hparams["separator_repo"],
savedir=separator_model,
save_filename="hyperparams.yaml",
)
separator_loaded = separator.from_hparams(
source=separator_model,
run_opts={"device": "cuda"},
savedir=separator_model,
)
all_separators.append(separator_loaded)
snrestimator.all_separators = all_separators
if not hparams["test_only"]:
# Training
snrestimator.fit(
snrestimator.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Eval
snrestimator.evaluate(test_data, min_key="error")
snrestimator.save_results(test_data)
| 29,744 | 35.631773 | 152 | py |
speechbrain | speechbrain-main/recipes/WHAMandWHAMR/separation/dynamic_mixing.py | import speechbrain as sb
import numpy as np
import torch
import torchaudio
import glob
import os
from pathlib import Path
import random
from speechbrain.processing.signal_processing import rescale
from speechbrain.dataio.batch import PaddedBatch
"""
The functions to implement Dynamic Mixing For SpeechSeparation
Authors
* Samuele Cornell 2021
* Cem Subakan 2021
"""
def build_spk_hashtable(base_folder_dm, sample_rate):
"""
This function builds a dictionary of speaker-utterance pairs to be used in dynamic mixing
arguments:
base_folder_dm (str) : specifies the base folder for dynamic mixing.
sample (int) : sampling frequency
"""
wsj0_utterances = glob.glob(
os.path.join(base_folder_dm, "**/*.wav"), recursive=True
)
spk_hashtable = {}
for utt in wsj0_utterances:
spk_id = Path(utt).stem[:3]
assert torchaudio.info(utt).sample_rate == sample_rate
# e.g. 2speakers/wav8k/min/tr/mix/019o031a_0.27588_01vo030q_-0.27588.wav
# id of speaker 1 is 019 utterance id is o031a
# id of speaker 2 is 01v utterance id is 01vo030q
if spk_id not in spk_hashtable.keys():
spk_hashtable[spk_id] = [utt]
else:
spk_hashtable[spk_id].append(utt)
# calculate weights for each speaker ( len of list of utterances)
spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]
return spk_hashtable, spk_weights
def get_wham_noise_filenames(data_root_folder, sample_rate):
"""
This function lists the WHAM! noise files to be used in dynamic mixing
data_root_folder (str) : specifies the system path for the top folder for the WHAM!, WHAMR! dataset
sample_rate (int) : specifies the sample rate in Hz
"""
if sample_rate == 8000:
noise_path = "wav8k/min/tr/noise/"
elif sample_rate == 16000:
noise_path = "wav16k/min/tr/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
noise_files = glob.glob(os.path.join(data_root_folder, noise_path, "*.wav"))
return noise_files
def dynamic_mix_data_prep(
tr_csv,
data_root_folder,
base_folder_dm,
sample_rate,
num_spks,
max_training_signal_len,
batch_size=1,
num_workers=1,
):
"""
Dynamic mixing for WSJ0-2/3Mix and WHAM!/WHAMR!
tr_csv (str) : the system path for the csv
data_root_folder (str) : the system path for the root folder of the WHAM! / WHAMR! dataset
base_folder_dm (str) : the system path for the wsj0 root folder
sample_rate (int) : sampling frequency in Hz
num_spks (int) : number of speakers (2 or 3)
max_training_signal_len (int) : upper limit for the max_training_signal_len (in number of samples)
batch_size (int) : batch_size
num_workers (int) : number of workers for the dataloader
"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=tr_csv, replacements={"data_root": data_root_folder},
)
# we build an dictionary where keys are speakers id and entries are list
# of utterances files of that speaker
spk_hashtable, spk_weights = build_spk_hashtable(
base_folder_dm=base_folder_dm, sample_rate=sample_rate
)
spk_list = [x for x in spk_hashtable.keys()]
spk_weights = [x / sum(spk_weights) for x in spk_weights]
if "wham" in Path(data_root_folder).stem:
noise_files = get_wham_noise_filenames(data_root_folder, sample_rate)
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides(
"mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"
)
def audio_pipeline(
mix_wav,
): # this is dummy --> it means one epoch will be same as without dynamic mixing
"""
This audio pipeline defines the compute graph for dynamic mixing
"""
speakers = np.random.choice(
spk_list, num_spks, replace=False, p=spk_weights
)
if "wham" in Path(data_root_folder).stem:
noise_file = np.random.choice(noise_files, 1, replace=False)
noise, fs_read = torchaudio.load(noise_file[0])
noise = noise.squeeze()
# select two speakers randomly
sources = []
first_lvl = None
spk_files = [
np.random.choice(spk_hashtable[spk], 1, False)[0]
for spk in speakers
]
minlen = min(
*[torchaudio.info(x).num_frames for x in spk_files],
max_training_signal_len,
)
for i, spk_file in enumerate(spk_files):
# select random offset
length = torchaudio.info(spk_file).num_frames
start = 0
stop = length
if length > minlen: # take a random window
start = np.random.randint(0, length - minlen)
stop = start + minlen
tmp, fs_read = torchaudio.load(
spk_file, frame_offset=start, num_frames=stop - start,
)
tmp = tmp[0] # * peak # remove channel dim and normalize
if i == 0:
gain = np.clip(random.normalvariate(-27.43, 2.57), -45, 0)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
first_lvl = gain
else:
gain = np.clip(
first_lvl + random.normalvariate(-2.51, 2.66), -45, 0
)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
sources.append(tmp)
# we mix the sources together
sources = torch.stack(sources)
mixture = torch.sum(sources, 0)
if "wham" in Path(data_root_folder).stem:
len_noise = len(noise)
len_mix = len(mixture)
min_len = min(len_noise, len_mix)
mixture = mixture[:min_len] + noise[:min_len]
max_amp = max(
torch.abs(mixture).max().item(),
*[x.item() for x in torch.abs(sources).max(dim=-1)[0]],
)
mix_scaling = 1 / max_amp * 0.9
sources = mix_scaling * sources
mixture = mix_scaling * mixture
yield mixture
for i in range(num_spks):
yield sources[i]
# If the number of speakers is 2, yield None for the 3rd speaker
if num_spks == 2:
yield None
if "wham" in Path(data_root_folder).stem:
mean_source_lvl = sources.abs().mean()
mean_noise_lvl = noise.abs().mean()
noise = (mean_source_lvl / mean_noise_lvl) * noise
yield noise
else:
yield None
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)
sb.dataio.dataset.set_output_keys(
[train_data],
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
train_data = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=PaddedBatch,
worker_init_fn=lambda x: np.random.seed(
int.from_bytes(os.urandom(4), "little") + x
),
)
return train_data
| 7,212 | 30.915929 | 107 | py |
speechbrain | speechbrain-main/recipes/WHAMandWHAMR/separation/train.py | #!/usr/bin/env/python3
"""Recipe for training a neural speech separation system on WHAM! and WHAMR!
datasets. The system employs an encoder, a decoder, and a masking network.
To run this recipe, do the following:
> python train.py hparams/sepformer-wham.yaml --data_folder /your_path/wham_original
> python train.py hparams/sepformer-whamr.yaml --data_folder /your_path/whamr
The experiment file is flexible enough to support different neural
networks. By properly changing the parameter files, you can try
different architectures.
Authors
* Cem Subakan 2020
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Mirko Bronzi 2020
* Jianyuan Zhong 2020
"""
import os
import sys
import torch
import torch.nn.functional as F
import torchaudio
import speechbrain as sb
import speechbrain.nnet.schedulers as schedulers
from speechbrain.utils.distributed import run_on_main
from torch.cuda.amp import autocast
from hyperpyyaml import load_hyperpyyaml
import numpy as np
from tqdm import tqdm
import csv
import logging
# Define training procedure
class Separation(sb.Brain):
def compute_forward(self, mix, targets, stage, noise=None):
"""Forward computations from the mixture to the separated signals."""
# Unpack lists and put tensors in the right device
mix, mix_lens = mix
mix, mix_lens = mix.to(self.device), mix_lens.to(self.device)
# Convert targets to tensor
targets = torch.cat(
[targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)],
dim=-1,
).to(self.device)
# Add speech distortions
if stage == sb.Stage.TRAIN:
with torch.no_grad():
if self.hparams.use_speedperturb or self.hparams.use_rand_shift:
mix, targets = self.add_speed_perturb(targets, mix_lens)
if "whamr" in self.hparams.data_folder:
targets_rev = [
self.hparams.reverb(targets[:, :, i], None)
for i in range(self.hparams.num_spks)
]
targets_rev = torch.stack(targets_rev, dim=-1)
mix = targets_rev.sum(-1)
else:
mix = targets.sum(-1)
noise = noise.to(self.device)
len_noise = noise.shape[1]
len_mix = mix.shape[1]
min_len = min(len_noise, len_mix)
# add the noise
mix = mix[:, :min_len] + noise[:, :min_len]
# fix the length of targets also
targets = targets[:, :min_len, :]
if self.hparams.use_wavedrop:
mix = self.hparams.wavedrop(mix, mix_lens)
if self.hparams.limit_training_signal_len:
mix, targets = self.cut_signals(mix, targets)
# Separation
mix_w = self.hparams.Encoder(mix)
est_mask = self.hparams.MaskNet(mix_w)
mix_w = torch.stack([mix_w] * self.hparams.num_spks)
sep_h = mix_w * est_mask
# Decoding
est_source = torch.cat(
[
self.hparams.Decoder(sep_h[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, 0, 0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin, :]
return est_source, targets
def compute_objectives(self, predictions, targets):
"""Computes the si-snr loss"""
return self.hparams.loss(targets, predictions)
def fit_batch(self, batch):
"""Trains one batch"""
# Unpacking batch list
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
noise = batch.noise_sig[0]
if self.auto_mix_prec:
with autocast():
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
# hard threshold the easy dataitems
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
self.scaler.scale(loss).backward()
if self.hparams.clip_grad_norm >= 0:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm,
)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
else:
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
snt_id = batch.id
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
with torch.no_grad():
predictions, targets = self.compute_forward(mixture, targets, stage)
loss = self.compute_objectives(predictions, targets)
# Manage audio file saving
if stage == sb.Stage.TEST and self.hparams.save_audio:
if hasattr(self.hparams, "n_audio_to_save"):
if self.hparams.n_audio_to_save > 0:
self.save_audio(snt_id[0], mixture, targets, predictions)
self.hparams.n_audio_to_save += -1
else:
self.save_audio(snt_id[0], mixture, targets, predictions)
return loss.mean().detach()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"si-snr": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
if (
hasattr(self.hparams, "save_all_checkpoints")
and self.hparams.save_all_checkpoints
):
self.checkpointer.save_checkpoint(
meta={"si-snr": stage_stats["si-snr"]}
)
else:
self.checkpointer.save_and_keep_only(
meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def add_speed_perturb(self, targets, targ_lens):
"""Adds speed perturbation and random_shift to the input signals"""
min_len = -1
recombine = False
if self.hparams.use_speedperturb:
# Performing speed change (independently on each source)
new_targets = []
recombine = True
for i in range(targets.shape[-1]):
new_target = self.hparams.speedperturb(
targets[:, :, i], targ_lens
)
new_targets.append(new_target)
if i == 0:
min_len = new_target.shape[-1]
else:
if new_target.shape[-1] < min_len:
min_len = new_target.shape[-1]
if self.hparams.use_rand_shift:
# Performing random_shift (independently on each source)
recombine = True
for i in range(targets.shape[-1]):
rand_shift = torch.randint(
self.hparams.min_shift, self.hparams.max_shift, (1,)
)
new_targets[i] = new_targets[i].to(self.device)
new_targets[i] = torch.roll(
new_targets[i], shifts=(rand_shift[0],), dims=1
)
# Re-combination
if recombine:
if self.hparams.use_speedperturb:
targets = torch.zeros(
targets.shape[0],
min_len,
targets.shape[-1],
device=targets.device,
dtype=torch.float,
)
for i, new_target in enumerate(new_targets):
targets[:, :, i] = new_targets[i][:, 0:min_len]
mix = targets.sum(-1)
return mix, targets
def cut_signals(self, mixture, targets):
"""This function selects a random segment of a given length withing the mixture.
The corresponding targets are selected accordingly"""
randstart = torch.randint(
0,
1 + max(0, mixture.shape[1] - self.hparams.training_signal_len),
(1,),
).item()
targets = targets[
:, randstart : randstart + self.hparams.training_signal_len, :
]
mixture = mixture[
:, randstart : randstart + self.hparams.training_signal_len
]
return mixture, targets
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def save_results(self, test_data):
"""This script computes the SDR and SI-SNR metrics and saves
them into a csv file"""
# This package is required for SDR computation
from mir_eval.separation import bss_eval_sources
# Create folders where to store audio
save_file = os.path.join(self.hparams.output_folder, "test_results.csv")
# Variable init
all_sdrs = []
all_sdrs_i = []
all_sisnrs = []
all_sisnrs_i = []
csv_columns = ["snt_id", "sdr", "sdr_i", "si-snr", "si-snr_i"]
test_loader = sb.dataio.dataloader.make_dataloader(
test_data, **self.hparams.dataloader_opts
)
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop over all test sentence
with tqdm(test_loader, dynamic_ncols=True) as t:
for i, batch in enumerate(t):
# Apply Separation
mixture, mix_len = batch.mix_sig
snt_id = batch.id
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(
batch.mix_sig, targets, sb.Stage.TEST
)
# Compute SI-SNR
sisnr = self.compute_objectives(predictions, targets)
# Compute SI-SNR improvement
mixture_signal = torch.stack(
[mixture] * self.hparams.num_spks, dim=-1
)
mixture_signal = mixture_signal.to(targets.device)
sisnr_baseline = self.compute_objectives(
mixture_signal, targets
)
sisnr_i = sisnr - sisnr_baseline
# Compute SDR
sdr, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
predictions[0].t().detach().cpu().numpy(),
)
sdr_baseline, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
mixture_signal[0].t().detach().cpu().numpy(),
)
sdr_i = sdr.mean() - sdr_baseline.mean()
# Saving on a csv file
row = {
"snt_id": snt_id[0],
"sdr": sdr.mean(),
"sdr_i": sdr_i,
"si-snr": -sisnr.item(),
"si-snr_i": -sisnr_i.item(),
}
writer.writerow(row)
# Metric Accumulation
all_sdrs.append(sdr.mean())
all_sdrs_i.append(sdr_i.mean())
all_sisnrs.append(-sisnr.item())
all_sisnrs_i.append(-sisnr_i.item())
row = {
"snt_id": "avg",
"sdr": np.array(all_sdrs).mean(),
"sdr_i": np.array(all_sdrs_i).mean(),
"si-snr": np.array(all_sisnrs).mean(),
"si-snr_i": np.array(all_sisnrs_i).mean(),
}
writer.writerow(row)
logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean()))
logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean()))
logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean()))
logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean()))
def save_audio(self, snt_id, mixture, targets, predictions):
"saves the test audio (mixture, targets, and estimated sources) on disk"
# Create outout folder
save_path = os.path.join(self.hparams.save_folder, "audio_results")
if not os.path.exists(save_path):
os.mkdir(save_path)
for ns in range(self.hparams.num_spks):
# Estimated source
signal = predictions[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Original source
signal = targets[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Mixture
signal = mixture[0][0, :]
signal = signal / signal.abs().max()
save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id))
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
def dataio_prep(hparams):
"""Creates data processing pipeline"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"],
replacements={"data_root": hparams["data_folder"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"],
replacements={"data_root": hparams["data_folder"]},
)
datasets = [train_data, valid_data, test_data]
# 2. Provide audio pipelines
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides("mix_sig")
def audio_pipeline_mix(mix_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
return mix_sig
@sb.utils.data_pipeline.takes("s1_wav")
@sb.utils.data_pipeline.provides("s1_sig")
def audio_pipeline_s1(s1_wav):
s1_sig = sb.dataio.dataio.read_audio(s1_wav)
return s1_sig
@sb.utils.data_pipeline.takes("s2_wav")
@sb.utils.data_pipeline.provides("s2_sig")
def audio_pipeline_s2(s2_wav):
s2_sig = sb.dataio.dataio.read_audio(s2_wav)
return s2_sig
@sb.utils.data_pipeline.takes("noise_wav")
@sb.utils.data_pipeline.provides("noise_sig")
def audio_pipeline_noise(noise_wav):
noise_sig = sb.dataio.dataio.read_audio(noise_wav)
return noise_sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2)
print("Using the WHAM! noise in the data pipeline")
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "noise_sig"]
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Logger info
logger = logging.getLogger(__name__)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Check if wsj0_tr is set with dynamic mixing
if hparams["dynamic_mixing"] and not os.path.exists(
hparams["base_folder_dm"]
):
print(
"Please, specify a valid base_folder_dm folder when using dynamic mixing"
)
sys.exit(1)
# Data preparation
from prepare_data import prepare_wham_whamr_csv
run_on_main(
prepare_wham_whamr_csv,
kwargs={
"datapath": hparams["data_folder"],
"savepath": hparams["save_folder"],
"skip_prep": hparams["skip_prep"],
"fs": hparams["sample_rate"],
},
)
# if whamr, and we do speedaugment we need to prepare the csv file
if "whamr" in hparams["data_folder"] and hparams["use_speedperturb"]:
from prepare_data import create_whamr_rir_csv
from create_whamr_rirs import create_rirs
# If the Room Impulse Responses do not exist, we create them
if not os.path.exists(hparams["rir_path"]):
print("ing Room Impulse Responses...")
run_on_main(
create_rirs,
kwargs={
"output_dir": hparams["rir_path"],
"sr": hparams["sample_rate"],
},
)
run_on_main(
create_whamr_rir_csv,
kwargs={
"datapath": hparams["rir_path"],
"savepath": hparams["save_folder"],
},
)
hparams["reverb"] = sb.processing.speech_augmentation.AddReverb(
os.path.join(hparams["save_folder"], "whamr_rirs.csv")
)
# Create dataset objects
if hparams["dynamic_mixing"]:
from dynamic_mixing import dynamic_mix_data_prep # noqa
# if the base_folder for dm is not processed, preprocess them
if "processed" not in hparams["base_folder_dm"]:
# if the processed folder already exists we just use it otherwise we do the preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
):
from recipes.WHAMandWHAMR.meta.preprocess_dynamic_mixing import (
resample_folder,
)
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm"],
"output_folder": os.path.normpath(
hparams["base_folder_dm"]
)
+ "_processed",
"fs": hparams["sample_rate"],
"regex": "**/*.wav",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
train_data = dynamic_mix_data_prep(
tr_csv=hparams["train_data"],
data_root_folder=hparams["data_folder"],
base_folder_dm=hparams["base_folder_dm"],
sample_rate=hparams["sample_rate"],
num_spks=hparams["num_spks"],
max_training_signal_len=hparams["training_signal_len"],
batch_size=hparams["dataloader_opts"]["batch_size"],
num_workers=hparams["dataloader_opts"]["num_workers"],
)
_, valid_data, test_data = dataio_prep(hparams)
else:
train_data, valid_data, test_data = dataio_prep(hparams)
# Load pretrained model if pretrained_separator is present in the yaml
if "pretrained_separator" in hparams:
run_on_main(hparams["pretrained_separator"].collect_files)
hparams["pretrained_separator"].load_collected()
# Brain class initialization
separator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# re-initialize the parameters if we don't use a pretrained model
if "pretrained_separator" not in hparams:
for module in separator.modules.values():
separator.reset_layer_recursively(module)
if not hparams["test_only"]:
# Training
separator.fit(
separator.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Eval
separator.evaluate(test_data, min_key="si-snr")
separator.save_results(test_data)
| 25,297 | 36.039531 | 108 | py |
speechbrain | speechbrain-main/recipes/WHAMandWHAMR/meta/create_whamr_rirs.py | """
Adapted from the original WHAMR script to obtain the Room Impulse ResponsesRoom Impulse Responses
Authors
* Cem Subakan 2021
"""
import os
import pandas as pd
import argparse
import torchaudio
from wham_room import WhamRoom
from scipy.signal import resample_poly
import torch
from speechbrain.pretrained.fetching import fetch
from tqdm import tqdm
import pyroomacoustics
def create_rirs(output_dir, sr=8000):
"""
This function creates the room impulse responses from the WHAMR! dataset
The implementation is based on the scripts from http://wham.whisper.ai/
Arguments:
------
output_dir (str) : directory for saving the RIRs
sr (int) : sampling rate with which we save
"""
assert (
pyroomacoustics.__version__ == "0.3.1"
), "The pyroomacoustics version needs to be 0.3.1"
os.makedirs(output_dir)
metafilesdir = os.path.dirname(os.path.realpath(__file__))
filelist = [
"mix_2_spk_filenames_tr.csv",
"mix_2_spk_filenames_cv.csv",
"mix_2_spk_filenames_tt.csv",
"reverb_params_tr.csv",
"reverb_params_cv.csv",
"reverb_params_tt.csv",
]
savedir = os.path.join(metafilesdir, "data")
for fl in filelist:
if not os.path.exists(os.path.join(savedir, fl)):
fetch(
"metadata/" + fl,
"speechbrain/sepformer-whamr",
savedir=savedir,
save_filename=fl,
)
FILELIST_STUB = os.path.join(
metafilesdir, "data", "mix_2_spk_filenames_{}.csv"
)
SPLITS = ["tr"]
reverb_param_stub = os.path.join(
metafilesdir, "data", "reverb_params_{}.csv"
)
for splt in SPLITS:
wsjmix_path = FILELIST_STUB.format(splt)
wsjmix_df = pd.read_csv(wsjmix_path)
reverb_param_path = reverb_param_stub.format(splt)
reverb_param_df = pd.read_csv(reverb_param_path)
utt_ids = wsjmix_df.output_filename.values
for output_name in tqdm(utt_ids):
utt_row = reverb_param_df[
reverb_param_df["utterance_id"] == output_name
]
room = WhamRoom(
[
utt_row["room_x"].iloc[0],
utt_row["room_y"].iloc[0],
utt_row["room_z"].iloc[0],
],
[
[
utt_row["micL_x"].iloc[0],
utt_row["micL_y"].iloc[0],
utt_row["mic_z"].iloc[0],
],
[
utt_row["micR_x"].iloc[0],
utt_row["micR_y"].iloc[0],
utt_row["mic_z"].iloc[0],
],
],
[
utt_row["s1_x"].iloc[0],
utt_row["s1_y"].iloc[0],
utt_row["s1_z"].iloc[0],
],
[
utt_row["s2_x"].iloc[0],
utt_row["s2_y"].iloc[0],
utt_row["s2_z"].iloc[0],
],
utt_row["T60"].iloc[0],
)
room.generate_rirs()
rir = room.rir_reverberant
for i, mics in enumerate(rir):
for j, source in enumerate(mics):
h = resample_poly(source, sr, 16000)
h_torch = torch.from_numpy(h).float().unsqueeze(0)
torchaudio.save(
os.path.join(
output_dir, "{}_{}_".format(i, j) + output_name,
),
h_torch,
sr,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-dir",
type=str,
required=True,
help="The output directory for saving the rirs for random augmentation style",
)
args = parser.parse_args()
create_rirs(args.output_dir)
| 4,052 | 27.542254 | 97 | py |
speechbrain | speechbrain-main/recipes/WHAMandWHAMR/meta/preprocess_dynamic_mixing.py | """
This script allows to resample a folder which contains audio files.
The files are parsed recursively. An exact copy of the folder is created,
with same structure but contained resampled audio files.
Resampling is performed by using sox through torchaudio.
Author
------
Samuele Cornell, 2020
"""
import os
import argparse
from pathlib import Path
import tqdm
import torchaudio
import glob
# from oct2py import octave
from scipy import signal
import numpy as np
import torch
parser = argparse.ArgumentParser(
"utility for resampling all audio files in a folder recursively"
"It --input_folder to --output_folder and "
"resamples all audio files with specified format to --fs."
)
parser.add_argument("--input_folder", type=str, required=True)
parser.add_argument("--output_folder", type=str, required=True)
parser.add_argument(
"--fs", type=str, default=8000, help="this is the target sampling frequency"
)
parser.add_argument("--regex", type=str, default="**/*.wav")
def resample_folder(input_folder, output_folder, fs, regex):
"""Resamples the wav files within an input folder.
Arguments
---------
input_folder : path
Path of the folder to resample.
output_folder : path
Path of the output folder with the resampled data.
fs : int
Target sampling frequency.
reg_exp: str
Regular expression for search.
"""
# filedir = os.path.dirname(os.path.realpath(__file__))
# octave.addpath(filedir)
# add the matlab functions to octave dir here
files = glob.glob(os.path.join(input_folder, regex), recursive=True)
for f in tqdm.tqdm(files):
audio, fs_read = torchaudio.load(f)
audio = audio[0].numpy()
audio = signal.resample_poly(audio, fs, fs_read)
# tmp = octave.activlev(audio.tolist(), fs, "n")
# audio, _ = tmp[:-1].squeeze(), tmp[-1]
peak = np.max(np.abs(audio))
audio = audio / peak
audio = torch.from_numpy(audio).float()
relative_path = os.path.join(
Path(f).relative_to(Path(input_folder)).parent,
Path(f).relative_to(Path(input_folder)).stem
+ "_peak_{}.wav".format(peak),
)
os.makedirs(
Path(
os.path.join(
output_folder, Path(f).relative_to(Path(input_folder))
)
).parent,
exist_ok=True,
)
torchaudio.save(
os.path.join(output_folder, relative_path),
audio.reshape(1, -1),
fs,
)
if __name__ == "__main__":
args = parser.parse_args()
resample_folder(
args.input_folder, args.output_folder, int(args.fs), args.regex
)
| 2,732 | 27.175258 | 80 | py |
speechbrain | speechbrain-main/recipes/WHAMandWHAMR/enhancement/dynamic_mixing.py | import speechbrain as sb
import numpy as np
import torch
import torchaudio
import glob
import os
from pathlib import Path
import random
from speechbrain.processing.signal_processing import rescale
from speechbrain.dataio.batch import PaddedBatch
"""
The functions to implement Dynamic Mixing For SpeechSeparation
Authors
* Samuele Cornell 2021
* Cem Subakan 2021
"""
def build_spk_hashtable(base_folder_dm, sample_rate):
"""
This function builds a dictionary of speaker-utterance pairs to be used in dynamic mixing
arguments:
base_folder_dm (str) : specifies the base folder for dynamic mixing.
sample (int) : sampling frequency
"""
wsj0_utterances = glob.glob(
os.path.join(base_folder_dm, "**/*.wav"), recursive=True
)
spk_hashtable = {}
for utt in wsj0_utterances:
spk_id = Path(utt).stem[:3]
assert torchaudio.info(utt).sample_rate == sample_rate
# e.g. 2speakers/wav8k/min/tr/mix/019o031a_0.27588_01vo030q_-0.27588.wav
# id of speaker 1 is 019 utterance id is o031a
# id of speaker 2 is 01v utterance id is 01vo030q
if spk_id not in spk_hashtable.keys():
spk_hashtable[spk_id] = [utt]
else:
spk_hashtable[spk_id].append(utt)
# calculate weights for each speaker ( len of list of utterances)
spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]
return spk_hashtable, spk_weights
def get_wham_noise_filenames(data_root_folder, sample_rate):
"""
This function lists the WHAM! noise files to be used in dynamic mixing
data_root_folder (str) : specifies the system path for the top folder for the WHAM!, WHAMR! dataset
sample_rate (int) : specifies the sample rate in Hz
"""
if sample_rate == 8000:
noise_path = "wav8k/min/tr/noise/"
elif sample_rate == 16000:
noise_path = "wav16k/min/tr/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
noise_files = glob.glob(os.path.join(data_root_folder, noise_path, "*.wav"))
return noise_files
def dynamic_mix_data_prep(
tr_csv,
data_root_folder,
base_folder_dm,
sample_rate,
num_spks,
max_training_signal_len,
batch_size=1,
num_workers=1,
):
"""
Dynamic mixing for WSJ0-2/3Mix and WHAM!/WHAMR!
tr_csv (str) : the system path for the csv
data_root_folder (str) : the system path for the root folder of the WHAM! / WHAMR! dataset
base_folder_dm (str) : the system path for the wsj0 root folder
sample_rate (int) : sampling frequency in Hz
num_spks (int) : number of speakers (2 or 3)
max_training_signal_len (int) : upper limit for the max_training_signal_len (in number of samples)
batch_size (int) : batch_size
num_workers (int) : number of workers for the dataloader
"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=tr_csv, replacements={"data_root": data_root_folder},
)
# we build an dictionary where keys are speakers id and entries are list
# of utterances files of that speaker
spk_hashtable, spk_weights = build_spk_hashtable(
base_folder_dm=base_folder_dm, sample_rate=sample_rate
)
spk_list = [x for x in spk_hashtable.keys()]
spk_weights = [x / sum(spk_weights) for x in spk_weights]
if "wham" in Path(data_root_folder).stem:
noise_files = get_wham_noise_filenames(data_root_folder, sample_rate)
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides(
"mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"
)
def audio_pipeline(
mix_wav,
): # this is dummy --> it means one epoch will be same as without dynamic mixing
"""
This audio pipeline defines the compute graph for dynamic mixing
"""
speakers = np.random.choice(
spk_list, num_spks, replace=False, p=spk_weights
)
if "wham" in Path(data_root_folder).stem:
noise_file = np.random.choice(noise_files, 1, replace=False)
noise, fs_read = torchaudio.load(noise_file[0])
noise = noise.squeeze()
# select two speakers randomly
sources = []
first_lvl = None
spk_files = [
np.random.choice(spk_hashtable[spk], 1, False)[0]
for spk in speakers
]
minlen = min(
*[torchaudio.info(x).num_frames for x in spk_files],
max_training_signal_len,
)
for i, spk_file in enumerate(spk_files):
# select random offset
length = torchaudio.info(spk_file).num_frames
start = 0
stop = length
if length > minlen: # take a random window
start = np.random.randint(0, length - minlen)
stop = start + minlen
tmp, fs_read = torchaudio.load(
spk_file, frame_offset=start, num_frames=stop - start,
)
tmp = tmp[0] # * peak # remove channel dim and normalize
if i == 0:
gain = np.clip(random.normalvariate(-27.43, 2.57), -45, 0)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
first_lvl = gain
else:
gain = np.clip(
first_lvl + random.normalvariate(-2.51, 2.66), -45, 0
)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
sources.append(tmp)
# we mix the sources together
sources = torch.stack(sources)
mixture = torch.sum(sources, 0)
if "wham" in Path(data_root_folder).stem:
len_noise = len(noise)
len_mix = len(mixture)
min_len = min(len_noise, len_mix)
mixture = mixture[:min_len] + noise[:min_len]
max_amp = max(
torch.abs(mixture).max().item(),
*[x.item() for x in torch.abs(sources).max(dim=-1)[0]],
)
mix_scaling = 1 / max_amp * 0.9
sources = mix_scaling * sources
mixture = mix_scaling * mixture
yield mixture
for i in range(num_spks):
yield sources[i]
for i in range(num_spks, 3):
yield None
# If the number of speakers is 2, yield None for the 3rd speaker
if "wham" in Path(data_root_folder).stem:
mean_source_lvl = sources.abs().mean()
mean_noise_lvl = noise.abs().mean()
noise = (mean_source_lvl / mean_noise_lvl) * noise
yield noise
else:
yield None
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)
sb.dataio.dataset.set_output_keys(
[train_data],
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
train_data = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=PaddedBatch,
worker_init_fn=lambda x: np.random.seed(
int.from_bytes(os.urandom(4), "little") + x
),
)
return train_data
| 7,223 | 30.964602 | 107 | py |
speechbrain | speechbrain-main/recipes/WHAMandWHAMR/enhancement/train.py | #!/usr/bin/env/python3
"""Recipe for training a neural speech separation system on WHAM! and WHAMR!
datasets. The system employs an encoder, a decoder, and a masking network.
To run this recipe, do the following:
> python train.py hparams/sepformer-wham.yaml --data_folder /your_path/wham_original
> python train.py hparams/sepformer-whamr.yaml --data_folder /your_path/whamr
The experiment file is flexible enough to support different neural
networks. By properly changing the parameter files, you can try
different architectures.
Authors
* Cem Subakan 2020
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Mirko Bronzi 2020
* Jianyuan Zhong 2020
"""
import os
import sys
import torch
import torch.nn.functional as F
import torchaudio
import speechbrain as sb
import speechbrain.nnet.schedulers as schedulers
from speechbrain.utils.distributed import run_on_main
from torch.cuda.amp import autocast
from hyperpyyaml import load_hyperpyyaml
import numpy as np
from tqdm import tqdm
import csv
import logging
from speechbrain.processing.features import spectral_magnitude
from speechbrain.utils.metric_stats import MetricStats
from pesq import pesq
# Define training procedure
class Separation(sb.Brain):
def compute_forward(self, mix, targets, stage, noise=None):
"""Forward computations from the mixture to the separated signals."""
# Unpack lists and put tensors in the right device
mix, mix_lens = mix
mix, mix_lens = mix.to(self.device), mix_lens.to(self.device)
# Convert targets to tensor
targets = torch.cat(
[targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)],
dim=-1,
).to(self.device)
# Add speech distortions
if stage == sb.Stage.TRAIN:
with torch.no_grad():
if self.hparams.use_speedperturb or self.hparams.use_rand_shift:
mix, targets = self.add_speed_perturb(targets, mix_lens)
if "whamr" in self.hparams.data_folder:
try:
targets_rev = [
self.hparams.reverb(targets[:, :, i], None)
for i in range(self.hparams.num_spks)
]
except Exception:
print("reverb error, not adding reverb")
targets_rev = [
targets[:, :, i]
for i in range(self.hparams.num_spks)
]
targets_rev = torch.stack(targets_rev, dim=-1)
mix = targets_rev.sum(-1)
# if we do not dereverberate, we set the targets to be reverberant
if not self.hparams.dereverberate:
targets = targets_rev
else:
mix = targets.sum(-1)
noise = noise.to(self.device)
len_noise = noise.shape[1]
len_mix = mix.shape[1]
min_len = min(len_noise, len_mix)
# add the noise
mix = mix[:, :min_len] + noise[:, :min_len]
# fix the length of targets also
targets = targets[:, :min_len, :]
if self.hparams.use_wavedrop:
mix = self.hparams.wavedrop(mix, mix_lens)
if self.hparams.limit_training_signal_len:
mix, targets = self.cut_signals(mix, targets)
# Separation
if self.use_freq_domain:
mix_w = self.compute_feats(mix)
est_mask = self.modules.masknet(mix_w)
sep_h = mix_w * est_mask
est_source = self.hparams.resynth(torch.expm1(sep_h), mix)
else:
mix_w = self.hparams.Encoder(mix)
est_mask = self.modules.masknet(mix_w)
mix_w = torch.stack([mix_w] * self.hparams.num_spks)
sep_h = mix_w * est_mask
est_source = torch.cat(
[
self.hparams.Decoder(sep_h[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
est_source = est_source.squeeze(-1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin]
return [est_source, sep_h], targets.squeeze(-1)
def compute_feats(self, wavs):
"""Feature computation pipeline"""
feats = self.hparams.Encoder(wavs)
feats = spectral_magnitude(feats, power=0.5)
feats = torch.log1p(feats)
return feats
def compute_objectives(self, predictions, targets):
"""Computes the si-snr loss"""
predicted_wavs, predicted_specs = predictions
if self.use_freq_domain:
target_specs = self.compute_feats(targets)
return self.hparams.loss(target_specs, predicted_specs)
else:
return self.hparams.loss(
targets.unsqueeze(-1), predicted_wavs.unsqueeze(-1)
)
def fit_batch(self, batch):
"""Trains one batch"""
# Unpacking batch list
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
noise = batch.noise_sig[0]
if self.auto_mix_prec:
with autocast():
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
# hard threshold the easy dataitems
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
self.scaler.scale(loss).backward()
if self.hparams.clip_grad_norm >= 0:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm,
)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
else:
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
snt_id = batch.id
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
with torch.no_grad():
predictions, targets = self.compute_forward(mixture, targets, stage)
loss = self.compute_objectives(predictions, targets).mean()
if stage != sb.Stage.TRAIN:
self.pesq_metric.append(
ids=batch.id, predict=predictions[0].cpu(), target=targets.cpu()
)
# Manage audio file saving
if stage == sb.Stage.TEST and self.hparams.save_audio:
if hasattr(self.hparams, "n_audio_to_save"):
if self.hparams.n_audio_to_save > 0:
self.save_audio(snt_id[0], mixture, targets, predictions[0])
self.hparams.n_audio_to_save += -1
else:
self.save_audio(snt_id[0], mixture, targets, predictions[0])
return loss.detach()
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
# Define function taking (prediction, target) for parallel eval
def pesq_eval(pred_wav, target_wav):
"""Computes the PESQ evaluation metric"""
psq_mode = "wb" if self.hparams.sample_rate == 16000 else "nb"
try:
return pesq(
fs=self.hparams.sample_rate,
ref=target_wav.numpy(),
deg=pred_wav.numpy(),
mode=psq_mode,
)
except Exception:
print("pesq encountered an error for this data item")
return 0
self.pesq_metric = MetricStats(
metric=pesq_eval, n_jobs=1, batch_eval=False
)
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stats = {
"loss": stage_loss,
"pesq": self.pesq_metric.summarize("average"),
}
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stats,
)
if (
hasattr(self.hparams, "save_all_checkpoints")
and self.hparams.save_all_checkpoints
):
self.checkpointer.save_checkpoint(meta={"pesq": stats["pesq"]})
else:
self.checkpointer.save_and_keep_only(
meta={"pesq": stats["pesq"]}, max_keys=["pesq"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def add_speed_perturb(self, targets, targ_lens):
"""Adds speed perturbation and random_shift to the input signals"""
min_len = -1
recombine = False
if self.hparams.use_speedperturb:
# Performing speed change (independently on each source)
new_targets = []
recombine = True
for i in range(targets.shape[-1]):
new_target = self.hparams.speedperturb(
targets[:, :, i], targ_lens
)
new_targets.append(new_target)
if i == 0:
min_len = new_target.shape[-1]
else:
if new_target.shape[-1] < min_len:
min_len = new_target.shape[-1]
if self.hparams.use_rand_shift:
# Performing random_shift (independently on each source)
recombine = True
for i in range(targets.shape[-1]):
rand_shift = torch.randint(
self.hparams.min_shift, self.hparams.max_shift, (1,)
)
new_targets[i] = new_targets[i].to(self.device)
new_targets[i] = torch.roll(
new_targets[i], shifts=(rand_shift[0],), dims=1
)
# Re-combination
if recombine:
if self.hparams.use_speedperturb:
targets = torch.zeros(
targets.shape[0],
min_len,
targets.shape[-1],
device=targets.device,
dtype=torch.float,
)
for i, new_target in enumerate(new_targets):
targets[:, :, i] = new_targets[i][:, 0:min_len]
mix = targets.sum(-1)
return mix, targets
def cut_signals(self, mixture, targets):
"""This function selects a random segment of a given length withing the mixture.
The corresponding targets are selected accordingly"""
randstart = torch.randint(
0,
1 + max(0, mixture.shape[1] - self.hparams.training_signal_len),
(1,),
).item()
targets = targets[
:, randstart : randstart + self.hparams.training_signal_len, :
]
mixture = mixture[
:, randstart : randstart + self.hparams.training_signal_len
]
return mixture, targets
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def save_results(self, test_data):
"""This script computes the SDR and SI-SNR metrics and saves
them into a csv file"""
# This package is required for SDR computation
from mir_eval.separation import bss_eval_sources
# Create folders where to store audio
save_file = os.path.join(self.hparams.output_folder, "test_results.csv")
# Variable init
all_sdrs = []
all_sdrs_i = []
all_sisnrs = []
all_sisnrs_i = []
all_pesqs = []
csv_columns = ["snt_id", "sdr", "sdr_i", "si-snr", "si-snr_i", "pesq"]
test_loader = sb.dataio.dataloader.make_dataloader(
test_data, **self.hparams.dataloader_opts
)
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop over all test sentence
with tqdm(test_loader, dynamic_ncols=True) as t:
for i, batch in enumerate(t):
# Apply Separation
mixture, mix_len = batch.mix_sig
snt_id = batch.id
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(
batch.mix_sig, targets, sb.Stage.TEST
)
# Compute SI-SNR
sisnr = self.compute_objectives(predictions, targets)
# Compute SI-SNR improvement
mixture_signal = torch.stack(
[mixture] * self.hparams.num_spks, dim=-1
)
mixture_signal = mixture_signal.to(targets.device)
mix_w = self.compute_feats(mixture_signal.squeeze(-1))
sisnr_baseline = self.compute_objectives(
[mixture_signal.squeeze(-1), mix_w], targets
)
sisnr_i = sisnr - sisnr_baseline
# Compute SDR
sdr, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
predictions[0][0].t().detach().cpu().numpy(),
)
sdr_baseline, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
mixture_signal[0].t().detach().cpu().numpy(),
)
sdr_i = sdr.mean() - sdr_baseline.mean()
# Compute PESQ
psq_mode = (
"wb" if self.hparams.sample_rate == 16000 else "nb"
)
psq = pesq(
self.hparams.sample_rate,
targets.squeeze().cpu().numpy(),
predictions[0].squeeze().cpu().numpy(),
mode=psq_mode,
)
# Saving on a csv file
row = {
"snt_id": snt_id[0],
"sdr": sdr.mean(),
"sdr_i": sdr_i,
"si-snr": -sisnr.item(),
"si-snr_i": -sisnr_i.item(),
"pesq": psq,
}
writer.writerow(row)
# Metric Accumulation
all_sdrs.append(sdr.mean())
all_sdrs_i.append(sdr_i.mean())
all_sisnrs.append(-sisnr.item())
all_sisnrs_i.append(-sisnr_i.item())
all_pesqs.append(psq)
row = {
"snt_id": "avg",
"sdr": np.array(all_sdrs).mean(),
"sdr_i": np.array(all_sdrs_i).mean(),
"si-snr": np.array(all_sisnrs).mean(),
"si-snr_i": np.array(all_sisnrs_i).mean(),
"pesq": np.array(all_pesqs).mean(),
}
writer.writerow(row)
logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean()))
logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean()))
logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean()))
logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean()))
logger.info("Mean PESQ {}".format(np.array(all_pesqs).mean()))
def save_audio(self, snt_id, mixture, targets, predictions):
"saves the test audio (mixture, targets, and estimated sources) on disk"
# Create outout folder
save_path = os.path.join(self.hparams.save_folder, "audio_results")
if not os.path.exists(save_path):
os.mkdir(save_path)
# Estimated source
signal = predictions[0, :]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_sourcehat.wav".format(snt_id)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Original source
signal = targets[0, :]
signal = signal / signal.abs().max()
save_file = os.path.join(save_path, "item{}_source.wav".format(snt_id))
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Mixture
signal = mixture[0][0, :]
signal = signal / signal.abs().max()
save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id))
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
def dataio_prep(hparams):
"""Creates data processing pipeline"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"],
replacements={"data_root": hparams["data_folder"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"],
replacements={"data_root": hparams["data_folder"]},
)
datasets = [train_data, valid_data, test_data]
# 2. Provide audio pipelines
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides("mix_sig")
def audio_pipeline_mix(mix_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
return mix_sig
@sb.utils.data_pipeline.takes("s1_wav")
@sb.utils.data_pipeline.provides("s1_sig")
def audio_pipeline_s1(s1_wav):
s1_sig = sb.dataio.dataio.read_audio(s1_wav)
return s1_sig
@sb.utils.data_pipeline.takes("s2_wav")
@sb.utils.data_pipeline.provides("s2_sig")
def audio_pipeline_s2(s2_wav):
s2_sig = sb.dataio.dataio.read_audio(s2_wav)
return s2_sig
@sb.utils.data_pipeline.takes("noise_wav")
@sb.utils.data_pipeline.provides("noise_sig")
def audio_pipeline_noise(noise_wav):
noise_sig = sb.dataio.dataio.read_audio(noise_wav)
return noise_sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2)
print("Using the WHAM! noise in the data pipeline")
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "noise_sig"]
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Logger info
logger = logging.getLogger(__name__)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Check if wsj0_tr is set with dynamic mixing
if hparams["dynamic_mixing"] and not os.path.exists(
hparams["base_folder_dm"]
):
print(
"Please, specify a valid base_folder_dm folder when using dynamic mixing"
)
sys.exit(1)
# Data preparation
from prepare_data import prepare_wham_whamr_csv
run_on_main(
prepare_wham_whamr_csv,
kwargs={
"datapath": hparams["data_folder"],
"savepath": hparams["save_folder"],
"skip_prep": hparams["skip_prep"],
"fs": hparams["sample_rate"],
"task": hparams["task"],
},
)
# if whamr, and we do speedaugment we need to prepare the csv file
if "whamr" in hparams["data_folder"] and hparams["use_speedperturb"]:
from prepare_data import create_whamr_rir_csv
from create_whamr_rirs import create_rirs
# If the Room Impulse Responses do not exist, we create them
if not os.path.exists(hparams["rir_path"]):
print("Createing Room Impulse Responses...")
run_on_main(
create_rirs,
kwargs={
"output_dir": hparams["rir_path"],
"sr": hparams["sample_rate"],
},
)
run_on_main(
create_whamr_rir_csv,
kwargs={
"datapath": hparams["rir_path"],
"savepath": hparams["save_folder"],
},
)
hparams["reverb"] = sb.processing.speech_augmentation.AddReverb(
os.path.join(hparams["save_folder"], "whamr_rirs.csv")
)
# Create dataset objects
if hparams["dynamic_mixing"]:
from dynamic_mixing import dynamic_mix_data_prep # noqa
# if the base_folder for dm is not processed, preprocess them
dm_suffix = (
"processed" if hparams["sample_rate"] == 8000 else "processed_16k"
)
# if base_folder_dm includes the dm_suffix, just use that path
if dm_suffix not in hparams["base_folder_dm"]:
# if the processed folder already exists we just use it otherwise we do the preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm"]) + "_" + dm_suffix
):
from preprocess_dynamic_mixing import resample_folder
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm"],
"output_folder": os.path.normpath(
hparams["base_folder_dm_processed"]
)
+ "_"
+ dm_suffix,
"fs": hparams["sample_rate"],
"regex": "**/*.wav",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm_processed"])
+ "_"
+ dm_suffix
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"])
+ "_"
+ dm_suffix
)
train_data = dynamic_mix_data_prep(
tr_csv=hparams["train_data"],
data_root_folder=hparams["data_folder"],
base_folder_dm=hparams["base_folder_dm"],
sample_rate=hparams["sample_rate"],
num_spks=hparams["num_spks"],
max_training_signal_len=hparams["training_signal_len"],
batch_size=hparams["dataloader_opts"]["batch_size"],
num_workers=hparams["dataloader_opts"]["num_workers"],
)
_, valid_data, test_data = dataio_prep(hparams)
else:
train_data, valid_data, test_data = dataio_prep(hparams)
# Load pretrained model if pretrained_separator is present in the yaml
if "pretrained_separator" in hparams:
run_on_main(hparams["pretrained_separator"].collect_files)
hparams["pretrained_separator"].load_collected()
# Brain class initialization
separator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# re-initialize the parameters if we don't use a pretrained model
if "pretrained_separator" not in hparams:
for module in separator.modules.values():
separator.reset_layer_recursively(module)
# determine if frequency domain enhancement or not
use_freq_domain = hparams.get("use_freq_domain", False)
separator.use_freq_domain = use_freq_domain
if not hparams["test_only"]:
# Training
separator.fit(
separator.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts_valid"],
)
# Eval
separator.evaluate(test_data, max_key="pesq")
separator.save_results(test_data)
| 29,072 | 36.465206 | 108 | py |
speechbrain | speechbrain-main/recipes/LibriParty/VAD/commonlanguage_prepare.py | import os
import logging
import torchaudio
import speechbrain as sb
from speechbrain.utils.data_utils import get_all_files
logger = logging.getLogger(__name__)
COMMON_LANGUAGE_URL = (
"https://zenodo.org/record/5036977/files/CommonLanguage.tar.gz?download=1"
)
def prepare_commonlanguage(folder, csv_file, max_noise_len=None):
"""Prepare the CommonLanguage dataset for VAD training.
Arguments
---------
folder : str
The location of the folder containing the dataset.
csv_file : str
Filename for storing the prepared csv file.
max_noise_len : float
The maximum noise length in seconds. Noises longer
than this will be cut into pieces.
"""
logger.info("CommonLanguage Preparation...")
wav_lst = get_all_files(os.path.join(folder), match_and=[".wav"])
if not os.path.isfile(csv_file):
logger.info(csv_file + " creation...")
_prepare_csv(folder, wav_lst, csv_file, max_noise_len)
def _prepare_csv(folder, filelist, csv_file, max_length=None):
"""Iterate a set of wavs and write the corresponding csv file.
Arguments
---------
folder : str
The folder relative to which the files in the list are listed.
filelist : str
The location of a file listing the files to be used.
csvfile : str
The location to use for writing the csv file.
max_length : float
The maximum length in seconds. Waveforms longer
than this will be cut into pieces.
"""
try:
if sb.utils.distributed.if_main_process():
with open(csv_file, "w") as w:
w.write("ID,duration,wav,wav_format,wav_opts\n\n")
for line in filelist:
# Read file for duration/channel info
filename = os.path.join(folder, line.split()[-1])
signal, rate = torchaudio.load(filename)
# Ensure only one channel
if signal.shape[0] > 1:
signal = signal[0].unsqueeze(0)
torchaudio.save(filename, signal, rate)
ID, ext = os.path.basename(filename).split(".")
duration = signal.shape[1] / rate
# Handle long waveforms
if max_length is not None and duration > max_length:
# Delete old file
os.remove(filename)
for i in range(int(duration / max_length)):
start = int(max_length * i * rate)
stop = int(
min(max_length * (i + 1), duration) * rate
)
new_filename = (
filename[: -len(f".{ext}")] + f"_{i}.{ext}"
)
torchaudio.save(
new_filename, signal[:, start:stop], rate
)
csv_row = (
f"{ID}_{i}",
str((stop - start) / rate),
new_filename,
ext,
"\n",
)
w.write(",".join(csv_row))
else:
w.write(
",".join((ID, str(duration), filename, ext, "\n"))
)
finally:
sb.utils.distributed.ddp_barrier()
| 3,586 | 36.757895 | 78 | py |
speechbrain | speechbrain-main/recipes/LibriParty/VAD/data_augment.py | """This library is used to create data on-the-fly for VAD.
Authors
* Mirco Ravanelli 2020
"""
import torch
import torchaudio
import random
# fade-in/fade-out definition
fade_in = torchaudio.transforms.Fade(fade_in_len=1000, fade_out_len=0)
fade_out = torchaudio.transforms.Fade(fade_in_len=0, fade_out_len=1000)
def add_chunk(
wav,
wav_chunk,
target,
sample_rate=16000,
time_resolution=0.01,
example_length=5,
min_len=1.0,
max_len=2.5,
min_begin_sample=0,
min_amp=0.4,
max_amp=1.0,
chunk_shift=0,
speech=False,
):
"""Add a new source (noise or speech) to an existing chunk of speech.
The length of the appended signal is randomly selected within the
min_len-max_len range. The shift is controlled by the chunk_shift
parameter.
Arguments
---------
wav : torch.tensor
The waveform to append.
wav_chunk : torch.tensor
The existing waveform where to append the new source.
sample_rate: int
The sample rate of the input waveforms.
time_resolution:
Time resolution of the targets (in seconds)-
example_length: float
Duration (in seconds) of the existing chunk.
min_len: float
Minimum duration (in seconds) of the waveform to append.
max_len: float
Maximum duration (in seconds) of the waveform to append.
min_begin_sample: int
It allows sampling the original waveform with some shift. This might
be useful to avoid the initial sampled of the waveform that can be
silence samples.
min_amp: float
The new waveform is appended with a random amplitude sampled in the
range min_amp-max_amp.
max_amp: float
See min_amp.
chunk_shift: int
This parameter controls where to append the new source within the
existing one.
speech: bool
If True, the new waveform is assumed to be a speech signal. The targets
will be put to 1 for all the duration on the speech signal.
Returns
-------
wav_chunk: torch.tensor
The new waveform with the added signal.
target: torch.tensor
The new targets corresponding to the output signal.
lengths: torch.tensor
relative lenghts of each chunk.
end_chunk: int
The last sample of the appended sequence. It can be used later to add
another source that do not overlap with the current one.
"""
# Convert from seconds to samples
min_len_samples = int(sample_rate * min_len)
max_len_samples = int(sample_rate * max_len)
last_sample = int(example_length * sample_rate)
# Randomly sampling the length of the chunk to append
len_chunk = torch.randint(
low=min_len_samples, high=max_len_samples, size=(1,)
).item()
# Randomly sampling the start sample
max_end_sample = min_begin_sample + (last_sample - len_chunk)
begin_sample = torch.randint(
low=min_begin_sample, high=max_end_sample, size=(1,)
).item()
end_chunk = min(chunk_shift + len_chunk, last_sample)
# Randomly sampling the amplitude of the chunk to append
rand_amp = (
torch.rand(wav.shape[0], 1, wav.shape[-1], device=wav.device)
* (max_amp - min_amp)
+ min_amp
)
# Fetch the signal to append
wav_to_paste = wav[
:, begin_sample : begin_sample + (end_chunk - chunk_shift)
]
# Random amplitude
max_v, p = wav_to_paste.abs().max(1)
wav_to_paste = wav_to_paste.transpose(1, 0) / max_v.unsqueeze(0)
wav_to_paste = wav_to_paste.transpose(1, 0)
wav_to_paste = rand_amp * wav_to_paste
# Apply fade_in/fade_out if needed
if chunk_shift > 0:
wav_to_paste = fade_in(wav_to_paste.transpose(1, -1))
wav_to_paste = wav_to_paste.transpose(1, -1)
if end_chunk < last_sample:
wav_to_paste = fade_out(wav_to_paste.transpose(1, -1))
wav_to_paste = wav_to_paste.transpose(1, -1)
# Append the signal
wav_chunk[:, chunk_shift:end_chunk] = (
wav_chunk[:, chunk_shift:end_chunk] + wav_to_paste
)
# Update targets if the appended signal is speech.
if speech:
beg_speech_target = int(chunk_shift / (sample_rate * time_resolution))
end_speech_target = int(end_chunk / (sample_rate * time_resolution))
target[:, beg_speech_target:end_speech_target] = 1
# Lenth computation
lenghts = torch.ones(
wav_chunk.shape[0], wav_chunk.shape[-1], device=wav.device
)
return wav_chunk, target, lenghts, end_chunk
def initialize_targets(wav, sample_rate, time_resolution):
"Initializes the targets."
target_downsampling = sample_rate * time_resolution
target_len = int(wav.shape[1] / (target_downsampling))
targets = torch.zeros(
(wav.shape[0], target_len, wav.shape[2]), device=wav.device
)
return targets
def get_samples_from_datasets(datasets, wav):
"""Gets samples (noise or speech) from the datasets.
Arguments
---------
datasets : list
List containing datasets. More precisely, we expect here the pointers
to the object used in speechbrain for data augmentation
(e.g, speechbrain.lobes.augment.EnvCorrupt).
wav : torch.tensor
The original waveform. The drawn samples will have the same
dimensionality of the original waveform.
Returns
-------
samples: torch.tensor
A batch of new samples drawn from the input list of datasets.
"""
# We want a sample of the same size of the original signal
samples = torch.zeros(
wav.shape[0], wav.shape[1], len(datasets), device=wav.device
)
# Let's sample a sequence from each dataset
for i, dataset in enumerate(datasets):
# Initialize the signal with noise
wav_sample = (torch.rand_like(wav) * 2) - 1
len_sample = torch.ones(wav.shape[0], device=wav.device)
# Sample a sequence
wav_sample = dataset(wav_sample, len_sample)
# Append it
samples[:, :, i] = wav_sample
# Random permutations of the signal
idx = torch.randperm(samples.shape[-1], device=wav.device)
samples[:, :] = samples[:, :, idx]
return samples
def create_chunks(
wav1,
wav2,
background,
sample_rate=16000,
time_resolution=0.01,
example_length=5,
speech1=False,
speech2=False,
low_background=0.05,
high_background=0.15,
max_pause=16000,
):
"""This method creates augmented data for training the VAD.
It sums up two delayed sources + a noise background.
Arguments
---------
wav1 : torch.tensor
The waveform for source 1.
wav2 : torch.tensor
The waveform for source 2.
background : torch.tensor
The waveform for background noise.
sample_rate: int
The sample rate of the input waveforms.
time_resolution:
Time resolution of the targets (in seconds)-
example_length: float
Duration (in seconds) of the existing chunk.
speech1: bool
If True, source 1 is assumed to be a speech signal. The targets
will be put to 1 for all the duration on the speech signal.
speech2: bool
If True, source 2 is assumed to be a speech signal. The targets
will be put to 1 for all the duration of the speech signal.
low_background: float
The amplitude of the background is randomly sampled between
low_background and high_background.
high_background: float
See above.
max_pause: int
Max pause in samples between the two sources.
Returns
-------
wavs: torch.tensor
The generated speech signal.
target: torch.tensor
The new targets corresponding to the generated signal.
lengths: torch.tensor
relative lengths of each chunk.
"""
# Random selection of the background amplitude
background_amp = (
random.random() * (high_background - low_background) + low_background
)
# Adding the background
wav = background_amp * (torch.rand_like(background) - 0.5)
background = torch.roll(background, 1, dims=-1)
wav = wav + background_amp * background
# Adding the first source
wav, target, lengths, end_chunk = add_chunk(
wav1,
wav,
initialize_targets(wav1, sample_rate, time_resolution),
sample_rate=sample_rate,
time_resolution=time_resolution,
example_length=example_length,
speech=speech1,
)
# Choosing the lag of the second source
begin_sample = torch.randint(
low=end_chunk, high=end_chunk + max_pause, size=(1,)
).item()
# Adding the second source
wav, target, lengths, end_sample = add_chunk(
wav2,
wav,
target,
chunk_shift=begin_sample,
sample_rate=sample_rate,
time_resolution=time_resolution,
example_length=example_length,
speech=speech2,
)
wav = wav.transpose(1, 2).reshape(wav.shape[0] * wav.shape[2], wav.shape[1])
target = target.transpose(1, 2).reshape(
target.shape[0] * target.shape[2], target.shape[1]
)
lengths = lengths.reshape([lengths.shape[0] * lengths.shape[1]])
return wav, target, lengths
def augment_data(noise_datasets, speech_datasets, wavs, targets, lens_targ):
"""This method creates different types of augmented that are useful to train
a VAD system. It creates signals with different types of transitions, such
as speech=>speech, noise=>speech, speech=>noise. The new signals are
concatenated with the original ones (wavs, targets) such that the output
is already an augmented batch useful to train a VAD model.
Arguments
---------
noise_datasets: list
List containing noise datasets. More precisely, we expect here the pointers
to the object used in speechbrain for data augmentation
(e.g, speechbrain.lobes.augment.EnvCorrupt).
speech_datasets: list
List containing noise datasets. More precisely, we expect here the pointers
to the object used in speechbrain for data augmentation
(e.g, speechbrain.lobes.augment.EnvCorrupt).
wavs: torch.tensor
The original waveform.
targets: torch.tensor
The original targets.
lens_targ: torch.tensor
The lenght of the original targets.
Returns
-------
wavs: torch.tensor
The output batch with the augmented signals
target: torch.tensor
The new targets corresponding to the augmented signals.
lengths: torch.tensor
relative lenghts of each element in the batch.
"""
# Sample a noise sequence
wav_samples_noise = get_samples_from_datasets(noise_datasets, wavs)
# Sample a speech sequence
wav_samples_speech = get_samples_from_datasets(speech_datasets, wavs)
# Create chunk with noise=>speech transition
(
wav_noise_speech,
target_noise_speech,
lengths_noise_speech,
) = create_chunks(
wav_samples_noise,
wav_samples_speech,
wav_samples_noise,
speech1=False,
speech2=True,
)
# Create chunk with speech=>noise transition
(
wav_speech_noise,
target_speech_noise,
lengths_speech_noise,
) = create_chunks(
wav_samples_speech,
wav_samples_noise,
wav_samples_noise,
speech1=True,
speech2=False,
)
# Create chunk with speech=>speech transition
wav_samples_speech2 = torch.roll(wav_samples_speech, 1, dims=-1)
(
wav_speech_speech,
target_speech_speech,
lengths_speech_speech,
) = create_chunks(
wav_samples_speech,
wav_samples_speech2,
wav_samples_noise,
speech1=True,
speech2=True,
)
# Create chunk with noise=>noise transition
wav_samples_noise2 = torch.roll(wav_samples_noise, 1, dims=-1)
(wav_noise_noise, target_noise_noise, lengths_noise_noise) = create_chunks(
wav_samples_noise,
wav_samples_noise2,
wav_samples_noise,
speech1=False,
speech2=False,
)
# Concatenate all the augmented data
wavs = torch.cat(
[
wavs,
wav_noise_speech,
wav_speech_noise,
wav_speech_speech,
wav_noise_noise,
],
dim=0,
)
# Concatenate targets
targets = torch.cat(
[
targets,
target_noise_speech,
target_speech_noise,
target_speech_speech,
target_noise_noise,
],
dim=0,
)
# Concatenate lengths
lens = torch.cat(
[
lens_targ,
lengths_noise_speech,
lengths_speech_noise,
lengths_speech_speech,
lengths_noise_noise,
],
dim=0,
)
# Assign random amplitude to the signals
max_amp, _ = wavs.abs().max(1)
wavs = wavs / max_amp.unsqueeze(1)
wavs = wavs * torch.rand_like(max_amp).unsqueeze(1)
return wavs, targets, lens
| 13,096 | 29.45814 | 83 | py |
speechbrain | speechbrain-main/recipes/LibriParty/VAD/musan_prepare.py | import os
import logging
import torchaudio
import speechbrain as sb
from speechbrain.utils.data_utils import get_all_files
logger = logging.getLogger(__name__)
def prepare_musan(folder, music_csv, noise_csv, speech_csv, max_noise_len=None):
"""Prepare the musan dataset (music, noise, speech).
Arguments
---------
folder : str
The location of the folder containing the dataset.
noise_csv : str
Filename for storing the prepared noise csv.
max_noise_len : float
The maximum noise length in seconds. Noises longer
than this will be cut into pieces.
"""
sub_folders = ["music", "noise", "speech"]
csv_files = [music_csv, noise_csv, speech_csv]
logger.info("Musan Data Preparation...")
for sub_folder, csv_file in zip(sub_folders, csv_files):
wav_lst = get_all_files(
os.path.join(folder, sub_folder), match_and=[".wav"]
)
if not os.path.isfile(csv_file):
logger.info(csv_file + " creation...")
_prepare_csv(folder, wav_lst, csv_file, max_noise_len)
def _prepare_csv(folder, filelist, csv_file, max_length=None):
"""Iterate a set of wavs and write the corresponding csv file.
Arguments
---------
folder : str
The folder relative to which the files in the list are listed.
filelist : str
The location of a file listing the files to be used.
csvfile : str
The location to use for writing the csv file.
max_length : float
The maximum length in seconds. Waveforms longer
than this will be cut into pieces.
"""
try:
if sb.utils.distributed.if_main_process():
with open(csv_file, "w") as w:
w.write("ID,duration,wav,wav_format,wav_opts\n\n")
for line in filelist:
# Read file for duration/channel info
filename = os.path.join(folder, line.split()[-1])
signal, rate = torchaudio.load(filename)
# Ensure only one channel
if signal.shape[0] > 1:
signal = signal[0].unsqueeze(0)
torchaudio.save(filename, signal, rate)
ID, ext = os.path.basename(filename).split(".")
duration = signal.shape[1] / rate
# Handle long waveforms
if max_length is not None and duration > max_length:
# Delete old file
os.remove(filename)
for i in range(int(duration / max_length)):
start = int(max_length * i * rate)
stop = int(
min(max_length * (i + 1), duration) * rate
)
new_filename = (
filename[: -len(f".{ext}")] + f"_{i}.{ext}"
)
torchaudio.save(
new_filename, signal[:, start:stop], rate
)
csv_row = (
f"{ID}_{i}",
str((stop - start) / rate),
new_filename,
ext,
"\n",
)
w.write(",".join(csv_row))
else:
w.write(
",".join((ID, str(duration), filename, ext, "\n"))
)
finally:
sb.utils.distributed.ddp_barrier()
| 3,700 | 37.154639 | 80 | py |
speechbrain | speechbrain-main/recipes/LibriParty/VAD/train.py | #!/usr/bin/env python3
"""
Recipe for training a Voice Activity Detection (VAD) model on LibriParty.
This code heavily relis on data augmentation with external datasets.
(e.g, open_rir, musan, CommonLanguge is used as well).
Make sure you download all the datasets before staring the experiment:
- LibriParty: https://drive.google.com/file/d/1--cAS5ePojMwNY5fewioXAv9YlYAWzIJ/view?usp=sharing
- Musan: https://www.openslr.org/resources/17/musan.tar.gz
- CommonLanguage: https://zenodo.org/record/5036977/files/CommonLanguage.tar.gz?download=1
To run an experiment:
python train.py hparams/train.yaml\
--data_folder=/path/to/LibriParty \
--musan_folder=/path/to/musan/\
--commonlanguage_folder=/path/to/commonlang
Authors
* Mohamed Kleit 2021
* Arjun V 2021
* Mirco Ravanelli 2021
"""
import sys
import torch
import logging
import numpy as np
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from data_augment import augment_data
logger = logging.getLogger(__name__)
class VADBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Given an input batch it computes the binary probability.
In training phase, we create on-the-fly augmentation data.
"""
batch = batch.to(self.device)
wavs, lens = batch.signal
targets, lens_targ = batch.target
self.targets = targets
if stage == sb.Stage.TRAIN:
wavs, targets, lens = augment_data(
self.noise_datasets,
self.speech_datasets,
wavs,
targets,
lens_targ,
)
self.lens = lens
self.targets = targets
# From wav input to output binary prediciton
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
feats = feats.detach()
outputs = self.modules.cnn(feats)
outputs = outputs.reshape(
outputs.shape[0],
outputs.shape[1],
outputs.shape[2] * outputs.shape[3],
)
outputs, h = self.modules.rnn(outputs)
outputs = self.modules.dnn(outputs)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the binary CE"
predictions, lens = predictions
targets = self.targets
predictions = predictions[:, : targets.shape[-1], 0]
loss = self.hparams.compute_BCE_cost(predictions, targets, lens)
self.train_metrics.append(batch.id, torch.sigmoid(predictions), targets)
if stage != sb.Stage.TRAIN:
self.valid_metrics.append(
batch.id, torch.sigmoid(predictions), targets
)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
self.train_metrics = self.hparams.train_stats()
self.noise_datasets = [
self.hparams.add_noise,
self.hparams.add_noise_musan,
self.hparams.add_music_musan,
]
self.speech_datasets = [
self.hparams.add_speech_musan,
self.hparams.add_speech_musan,
self.hparams.add_speech_musan,
]
if stage != sb.Stage.TRAIN:
self.valid_metrics = self.hparams.test_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
summary = self.valid_metrics.summarize(threshold=0.5)
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={"loss": stage_loss, "summary": summary},
)
self.checkpointer.save_and_keep_only(
meta={"loss": stage_loss, "summary": summary},
num_to_keep=1,
min_keys=["loss"],
name="epoch_{}".format(epoch),
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "summary": summary},
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
data_folder = hparams["data_folder"]
train = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["annotation_train"],
replacements={"data_root": data_folder},
)
validation = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["annotation_valid"],
replacements={"data_root": data_folder},
)
test = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["annotation_test"],
replacements={"data_root": data_folder},
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("signal")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("speech")
@sb.utils.data_pipeline.provides("target")
def vad_targets(speech, hparams=hparams):
boundaries = (
[
(
int(interval[0] / hparams["time_resolution"]),
int(interval[1] / hparams["time_resolution"]),
)
for interval in speech
]
if len(speech) > 0
else []
)
gt = torch.zeros(
int(
np.ceil(
hparams["example_length"] * (1 / hparams["time_resolution"])
)
)
)
for indxs in boundaries:
start, stop = indxs
gt[start:stop] = 1
return gt
# Create dataset
datasets = [train, validation, test]
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
sb.dataio.dataset.add_dynamic_item(datasets, vad_targets)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "signal", "target", "speech"]
)
# Split dataset
train_data, valid_data, test_data = datasets
return train_data, valid_data, test_data
# Begin Recipe!
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
from libriparty_prepare import prepare_libriparty
# LibriParty preparation
run_on_main(
prepare_libriparty,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_folder": hparams["save_folder"],
"sample_rate": hparams["sample_rate"],
"window_size": hparams["example_length"],
"skip_prep": hparams["skip_prep"],
},
)
# Prepare Musan
from musan_prepare import prepare_musan
if not hparams["skip_prep"]:
run_on_main(
prepare_musan,
kwargs={
"folder": hparams["musan_folder"],
"music_csv": hparams["music_csv"],
"noise_csv": hparams["noise_csv"],
"speech_csv": hparams["speech_csv"],
"max_noise_len": hparams["example_length"],
},
)
# Prepare common
from commonlanguage_prepare import prepare_commonlanguage
if not hparams["skip_prep"]:
run_on_main(
prepare_commonlanguage,
kwargs={
"folder": hparams["commonlanguage_folder"],
"csv_file": hparams["multilang_speech_csv"],
},
)
# Dataset IO prep: creating Dataset objects
train_data, valid_data, test_data = dataio_prep(hparams)
# Trainer initialization
vad_brain = VADBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Training/validation loop
vad_brain.fit(
vad_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
vad_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 9,259 | 30.931034 | 96 | py |
speechbrain | speechbrain-main/recipes/LibriParty/generate_dataset/local/resample_folder.py | """
This script allows to resample a folder which contains audio files.
The files are parsed recursively. An exact copy of the folder is created,
with same structure but contained resampled audio files.
Resampling is performed by using sox through torchaudio.
Author
------
Samuele Cornell, 2020
"""
import os
import argparse
from pathlib import Path
import tqdm
import torchaudio
import torch
from speechbrain.utils.data_utils import get_all_files
parser = argparse.ArgumentParser(
"utility for resampling all audio files in a folder recursively. "
"It --input_folder to --output_folder and "
"resamples all audio files with specified format to --fs."
)
parser.add_argument("--input_folder", type=str, required=True)
parser.add_argument("--output_folder", type=str, required=True)
parser.add_argument("--fs", type=str, default=16000)
parser.add_argument("--regex", type=str, default="*.wav")
def resample_folder(input_folder, output_folder, fs, regex):
files = get_all_files(input_folder, match_and=[regex])
torchaudio.initialize_sox()
for f in tqdm.tqdm(files):
# we use sox because torchaudio.Resample uses too much RAM.
resample = torchaudio.sox_effects.SoxEffectsChain()
resample.append_effect_to_chain("rate", [fs])
resample.set_input_file(f)
audio, fs = resample.sox_build_flow_effects()
audio = (
audio / torch.max(torch.abs(audio), dim=-1, keepdim=True)[0]
) # scale back otherwise you get empty .wav file
os.makedirs(
Path(
os.path.join(
output_folder, Path(f).relative_to(Path(input_folder))
)
).parent,
exist_ok=True,
)
torchaudio.save(
os.path.join(
output_folder, Path(f).relative_to(Path(input_folder))
),
audio,
fs,
)
torchaudio.shutdown_sox()
if __name__ == "__main__":
args = parser.parse_args()
resample_folder(args.input_folder, args.output_folder, args.fs, args.regex)
| 2,091 | 29.318841 | 79 | py |
speechbrain | speechbrain-main/recipes/LibriParty/generate_dataset/local/create_mixtures_metadata.py | """
This file contains functions to create json metadata used to create
mixtures which simulate a multi-party conversation in a noisy scenario.
Author
------
Samuele Cornell, 2020
"""
import numpy as np
from pathlib import Path
import json
import os
from tqdm import tqdm
import torchaudio
def _read_metadata(file_path, configs):
meta = torchaudio.info(file_path)
if meta.num_channels > 1:
channel = np.random.randint(0, meta.num_channels - 1)
else:
channel = 0
assert (
meta.sample_rate == configs["samplerate"]
), "file samplerate is different from the one specified"
return meta, channel
def create_metadata(
output_filename,
n_sessions,
configs,
utterances_dict,
words_dict,
rir_list,
impulsive_noises_list=None,
background_noises_list=None,
):
dataset_metadata = {}
for n_sess in tqdm(range(n_sessions)):
# we sample randomly n_speakers ids
c_speakers = np.random.choice(
list(utterances_dict.keys()), configs["n_speakers"], replace=False
)
# we select all utterances from selected speakers
c_utts = [utterances_dict[spk_id] for spk_id in c_speakers]
activity = {}
for spk in c_speakers:
activity[spk] = []
tot_length = 0
min_spk_lvl = np.inf
for i in range(len(c_speakers)):
c_spk = c_speakers[i]
spk_utts = c_utts[i]
np.random.shuffle(spk_utts) # random shuffle
# we use same technique as in EEND repo for intervals distribution
intervals = np.random.exponential(
configs["interval_factor_speech"], len(spk_utts)
)
cursor = 0
for j, wait in enumerate(intervals):
meta, channel = _read_metadata(spk_utts[j], configs)
c_rir = np.random.choice(rir_list, 1)[0]
# check if the rir is monaural
meta_rir, rir_channel = _read_metadata(c_rir, configs)
length = meta.num_frames / meta.sample_rate
id_utt = Path(spk_utts[j]).stem
cursor += wait
if cursor + length > configs["max_length"]:
break
lvl = np.clip(
np.random.normal(
configs["speech_lvl_mean"], configs["speech_lvl_var"]
),
configs["speech_lvl_min"],
configs["speech_lvl_max"],
)
min_spk_lvl = min(lvl, min_spk_lvl)
# we save to metadata only relative paths
activity[c_spk].append(
{
"start": cursor,
"stop": cursor + length,
"words": words_dict[id_utt],
"rir": str(
Path(c_rir).relative_to(configs["rirs_noises_root"])
),
"utt_id": id_utt,
"file": str(
Path(spk_utts[j]).relative_to(
configs["librispeech_root"]
)
),
"lvl": lvl,
"channel": channel,
"rir_channel": rir_channel,
}
)
tot_length = max(cursor + length, tot_length)
cursor = cursor + length
# we add also impulsive noises as it were a speaker
if impulsive_noises_list:
activity["noises"] = []
# sampling intervals for impulsive noises.
intervals = np.random.exponential(
configs["interval_factor_noises"], len(impulsive_noises_list)
)
cursor = 0
for j, wait in enumerate(intervals):
# we sample with replacement an impulsive noise.
c_noise = np.random.choice(impulsive_noises_list, 1)[0]
meta, channel = _read_metadata(c_noise, configs)
c_rir = np.random.choice(rir_list, 1)[0]
# we reverberate it.
meta_rir, rir_channel = _read_metadata(c_rir, configs)
length = meta.num_frames / meta.sample_rate
cursor += wait
if cursor + length > configs["max_length"]:
break
lvl = np.clip(
np.random.normal(
configs["imp_lvl_mean"], configs["imp_lvl_var"]
),
configs["imp_lvl_min"],
min(min_spk_lvl + configs["imp_lvl_rel_max"], 0),
)
activity["noises"].append(
{
"start": cursor,
"stop": cursor + length,
"rir": str(
Path(c_rir).relative_to(configs["rirs_noises_root"])
),
"file": str(
Path(c_noise).relative_to(
configs["rirs_noises_root"]
)
),
"lvl": lvl,
"channel": channel,
"rir_channel": rir_channel,
}
)
tot_length = max(tot_length, cursor + length)
cursor += length
if background_noises_list:
# we add also background noise.
lvl = np.random.randint(
configs["background_lvl_min"],
min(min_spk_lvl + configs["background_lvl_rel_max"], 0),
)
# we scale the level but do not reverberate.
background = np.random.choice(background_noises_list, 1)[0]
meta, channel = _read_metadata(background, configs)
assert (
meta.num_frames >= configs["max_length"] * configs["samplerate"]
), "background noise files should be >= max_length in duration"
offset = 0
if meta.num_frames > configs["max_length"] * configs["samplerate"]:
offset = np.random.randint(
0,
meta.num_frames
- int(configs["max_length"] * configs["samplerate"]),
)
activity["background"] = {
"start": 0,
"stop": tot_length,
"file": str(
Path(background).relative_to(configs["backgrounds_root"])
),
"lvl": lvl,
"orig_start": offset,
"orig_stop": offset + int(tot_length * configs["samplerate"]),
"channel": channel,
}
else:
# we use as background gaussian noise
lvl = np.random.randint(
configs["background_lvl_min"],
min(min_spk_lvl + configs["background_lvl_rel_max"], 0),
)
activity["background"] = {
"start": 0,
"stop": tot_length,
"file": None,
"lvl": lvl,
"orig_start": None,
"orig_stop": None,
"channel": None,
}
dataset_metadata["session_{}".format(n_sess)] = activity
with open(
os.path.join(configs["out_folder"], output_filename + ".json"), "w"
) as f:
json.dump(dataset_metadata, f, indent=4)
| 7,619 | 35.285714 | 80 | py |
speechbrain | speechbrain-main/recipes/LibriParty/generate_dataset/local/create_mixtures_from_metadata.py | """
This file contains functions to create mixtures given json metadata.
The mixtures simulate a multi-party conversation in a noisy scenario.
Author
------
Samuele Cornell, 2020
"""
import os
import torch
import json
import numpy as np
import torchaudio
from speechbrain.processing.signal_processing import rescale, reverberate
def create_mixture(session_n, output_dir, params, metadata):
os.makedirs(os.path.join(output_dir, session_n), exist_ok=True)
session_meta = {}
speakers = [x for x in metadata.keys() if x not in ["noises", "background"]]
tot_length = int(
np.ceil(metadata["background"]["stop"] * params["samplerate"])
)
mixture = torch.zeros(tot_length) # total mixture file
assert len(mixture) > 0, "Mixture has length 0, please raise max_length."
# step 1
for spk in speakers:
session_meta[spk] = []
# we create mixture for each speaker and we optionally save it.
if params["save_dry_sources"]:
dry = torch.zeros(tot_length)
if params["save_wet_sources"]:
wet = torch.zeros(tot_length)
for utt in metadata[spk]:
c_audio, fs = torchaudio.load(
os.path.join(params["librispeech_root"], utt["file"])
)
assert fs == params["samplerate"]
if len(c_audio.shape) > 1: # multichannel
c_audio = c_audio[utt["channel"], :]
c_audio = c_audio - torch.mean(c_audio)
c_audio = rescale(
c_audio,
c_audio.size(0),
utt["lvl"],
scale="dB",
amp_type="peak",
)
# we save it in dry
dry_start = int(utt["start"] * params["samplerate"])
dry_stop = dry_start + c_audio.shape[-1]
if params["save_dry_sources"]:
dry[dry_start:dry_stop] += c_audio
# we add now reverb and put it in wet
c_rir, fs = torchaudio.load(
os.path.join(params["rirs_noises_root"], utt["rir"])
)
assert fs == params["samplerate"]
c_rir = c_rir[utt["rir_channel"], :]
c_audio = reverberate(c_audio, c_rir, "peak")
# tof is not accounted because in reverberate we shift by it
wet_start = dry_start
wet_stop = dry_stop # + early_rev_samples
if params["save_wet_sources"]:
wet[wet_start : wet_start + len(c_audio)] += c_audio
session_meta[spk].append(
{
"start": np.round(wet_start / params["samplerate"], 3),
"stop": np.round(wet_stop / params["samplerate"], 3),
"lvl": utt["lvl"],
"words": utt["words"],
"file": utt["file"],
"channel": utt["channel"],
"rir": utt["rir"],
"rir_channels": utt["rir_channel"],
}
)
# we add to mixture
mixture[wet_start : wet_start + len(c_audio)] += c_audio
# we allow for clipping as it occurs also in real recordings.
# save per speaker clean sources
if params["save_dry_sources"]:
torchaudio.save(
os.path.join(
output_dir,
session_n,
"session_{}_spk_{}_dry.wav".format(session_n, spk),
),
torch.clamp(dry, min=-1, max=1),
params["samplerate"],
)
if params["save_wet_sources"]:
torchaudio.save(
os.path.join(
output_dir,
session_n,
"session_{}_spk_{}_wet.wav".format(session_n, spk),
),
torch.clamp(wet, min=-1, max=1),
params["samplerate"],
)
with open(
os.path.join(output_dir, session_n, "{}.json".format(session_n)), "w"
) as f:
json.dump(session_meta, f, indent=4)
# add impulsive noises
for noise_event in metadata["noises"]:
c_audio, fs = torchaudio.load(
os.path.join(params["rirs_noises_root"], noise_event["file"])
)
assert fs == params["samplerate"]
if len(c_audio.shape) > 1: # multichannel
c_audio = c_audio[noise_event["channel"], :]
c_audio = c_audio - torch.mean(c_audio)
c_audio = rescale(
c_audio,
c_audio.size(0),
noise_event["lvl"],
scale="dB",
amp_type="peak",
)
# we save it in dry
dry_start = int(noise_event["start"] * params["samplerate"])
# dry_stop = dry_start + c_audio.shape[-1]
# we add now reverb and put it in wet
c_rir, fs = torchaudio.load(
os.path.join(params["rirs_noises_root"], noise_event["rir"])
)
assert fs == params["samplerate"]
c_rir = c_rir[noise_event["rir_channel"], :]
c_audio = reverberate(c_audio, c_rir, "peak")
# tof is not accounted because in reverberate we shift by it
wet_start = dry_start
mixture[wet_start : wet_start + len(c_audio)] += c_audio
# add background
if metadata["background"]["file"]:
c_audio, fs = torchaudio.load(
os.path.join(
params["backgrounds_root"], metadata["background"]["file"]
),
frame_offset=metadata["background"]["orig_start"],
num_frames=mixture.shape[-1],
)
assert fs == params["samplerate"]
if len(c_audio.shape) > 1: # multichannel
c_audio = c_audio[metadata["background"]["channel"], :]
c_audio = c_audio - torch.mean(c_audio)
c_audio = rescale(
c_audio,
c_audio.size(0),
metadata["background"]["lvl"],
scale="dB",
amp_type="avg",
)
mixture += c_audio
else:
# add gaussian noise
mixture += rescale(
torch.normal(0, 1, mixture.shape),
mixture.size(0),
metadata["background"]["lvl"],
scale="dB",
amp_type="peak",
)
# save total mixture
mixture = torch.clamp(mixture, min=-1, max=1)
torchaudio.save(
os.path.join(output_dir, session_n, "{}_mixture.wav".format(session_n)),
mixture.unsqueeze(0),
params["samplerate"],
)
| 6,540 | 33.246073 | 80 | py |
speechbrain | speechbrain-main/recipes/WSJ0Mix/separation/dynamic_mixing.py | import speechbrain as sb
import numpy as np
import torch
import torchaudio
import glob
import os
from pathlib import Path
import random
from speechbrain.processing.signal_processing import rescale
from speechbrain.dataio.batch import PaddedBatch
"""
The functions to implement Dynamic Mixing For SpeechSeparation
Authors
* Samuele Cornell 2021
* Cem Subakan 2021
"""
def build_spk_hashtable(hparams):
"""
This function builds a dictionary of speaker-utterance pairs to be used in dynamic mixing
"""
wsj0_utterances = glob.glob(
os.path.join(hparams["base_folder_dm"], "**/*.wav"), recursive=True
)
spk_hashtable = {}
for utt in wsj0_utterances:
spk_id = Path(utt).stem[:3]
assert torchaudio.info(utt).sample_rate == hparams["sample_rate"]
# e.g. 2speakers/wav8k/min/tr/mix/019o031a_0.27588_01vo030q_-0.27588.wav
# id of speaker 1 is 019 utterance id is o031a
# id of speaker 2 is 01v utterance id is 01vo030q
if spk_id not in spk_hashtable.keys():
spk_hashtable[spk_id] = [utt]
else:
spk_hashtable[spk_id].append(utt)
# calculate weights for each speaker ( len of list of utterances)
spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]
return spk_hashtable, spk_weights
def get_wham_noise_filenames(hparams):
"This function lists the WHAM! noise files to be used in dynamic mixing"
if "Libri" in hparams["data_folder"]:
# Data folder should point to Libri2Mix folder
if hparams["sample_rate"] == 8000:
noise_path = "wav8k/min/train-360/noise/"
elif hparams["sample_rate"] == 16000:
noise_path = "wav16k/min/train-360/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
else:
if hparams["sample_rate"] == 8000:
noise_path = "wav8k/min/tr/noise/"
elif hparams["sample_rate"] == 16000:
noise_path = "wav16k/min/tr/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
noise_files = glob.glob(
os.path.join(hparams["data_folder"], noise_path, "*.wav")
)
return noise_files
def dynamic_mix_data_prep(hparams):
"""
Dynamic mixing for WSJ0-2/3Mix and WHAM!/WHAMR!
"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
# we build an dictionary where keys are speakers id and entries are list
# of utterances files of that speaker
spk_hashtable, spk_weights = build_spk_hashtable(hparams)
spk_list = [x for x in spk_hashtable.keys()]
spk_weights = [x / sum(spk_weights) for x in spk_weights]
if "wham" in Path(hparams["data_folder"]).stem:
noise_files = get_wham_noise_filenames(hparams)
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides(
"mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"
)
def audio_pipeline(
mix_wav,
): # this is dummy --> it means one epoch will be same as without dynamic mixing
"""
This audio pipeline defines the compute graph for dynamic mixing
"""
speakers = np.random.choice(
spk_list, hparams["num_spks"], replace=False, p=spk_weights
)
if "wham" in Path(hparams["data_folder"]).stem:
noise_file = np.random.choice(noise_files, 1, replace=False)
noise, fs_read = torchaudio.load(noise_file[0])
noise = noise.squeeze()
# gain = np.clip(random.normalvariate(1, 10), -4, 15)
# noise = rescale(noise, torch.tensor(len(noise)), gain, scale="dB").squeeze()
# select two speakers randomly
sources = []
first_lvl = None
spk_files = [
np.random.choice(spk_hashtable[spk], 1, False)[0]
for spk in speakers
]
minlen = min(
*[torchaudio.info(x).num_frames for x in spk_files],
hparams["training_signal_len"],
)
for i, spk_file in enumerate(spk_files):
# select random offset
length = torchaudio.info(spk_file).num_frames
start = 0
stop = length
if length > minlen: # take a random window
start = np.random.randint(0, length - minlen)
stop = start + minlen
tmp, fs_read = torchaudio.load(
spk_file, frame_offset=start, num_frames=stop - start,
)
# peak = float(Path(spk_file).stem.split("_peak_")[-1])
tmp = tmp[0] # * peak # remove channel dim and normalize
if i == 0:
gain = np.clip(random.normalvariate(-27.43, 2.57), -45, 0)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
# assert not torch.all(torch.isnan(tmp))
first_lvl = gain
else:
gain = np.clip(
first_lvl + random.normalvariate(-2.51, 2.66), -45, 0
)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
# assert not torch.all(torch.isnan(tmp))
sources.append(tmp)
# we mix the sources together
# here we can also use augmentations ! -> runs on cpu and for each
# mixture parameters will be different rather than for whole batch.
# no difference however for bsz=1 :)
# padding left
# sources, _ = batch_pad_right(sources)
sources = torch.stack(sources)
mixture = torch.sum(sources, 0)
if "wham" in Path(hparams["data_folder"]).stem:
len_noise = len(noise)
len_mix = len(mixture)
min_len = min(len_noise, len_mix)
mixture = mixture[:min_len] + noise[:min_len]
max_amp = max(
torch.abs(mixture).max().item(),
*[x.item() for x in torch.abs(sources).max(dim=-1)[0]],
)
mix_scaling = 1 / max_amp * 0.9
sources = mix_scaling * sources
mixture = mix_scaling * mixture
yield mixture
for i in range(hparams["num_spks"]):
yield sources[i]
# If the number of speakers is 2, yield None for the 3rd speaker
if hparams["num_spks"] == 2:
yield None
if "wham" in Path(hparams["data_folder"]).stem:
mean_source_lvl = sources.abs().mean()
mean_noise_lvl = noise.abs().mean()
noise = (mean_source_lvl / mean_noise_lvl) * noise
yield noise
else:
yield None
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)
sb.dataio.dataset.set_output_keys(
[train_data],
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
train_data = torch.utils.data.DataLoader(
train_data,
batch_size=hparams["dataloader_opts"]["batch_size"],
num_workers=hparams["dataloader_opts"]["num_workers"],
collate_fn=PaddedBatch,
worker_init_fn=lambda x: np.random.seed(
int.from_bytes(os.urandom(4), "little") + x
),
)
return train_data
| 7,309 | 32.378995 | 93 | py |
speechbrain | speechbrain-main/recipes/WSJ0Mix/separation/train.py | #!/usr/bin/env/python3
"""Recipe for training a neural speech separation system on wsjmix the
dataset. The system employs an encoder, a decoder, and a masking network.
To run this recipe, do the following:
> python train.py hparams/sepformer.yaml
> python train.py hparams/dualpath_rnn.yaml
> python train.py hparams/convtasnet.yaml
The experiment file is flexible enough to support different neural
networks. By properly changing the parameter files, you can try
different architectures. The script supports both wsj2mix and
wsj3mix.
Authors
* Cem Subakan 2020
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Mirko Bronzi 2020
* Jianyuan Zhong 2020
"""
import os
import sys
import torch
import torch.nn.functional as F
import torchaudio
import speechbrain as sb
import speechbrain.nnet.schedulers as schedulers
from speechbrain.utils.distributed import run_on_main
from torch.cuda.amp import autocast
from hyperpyyaml import load_hyperpyyaml
import numpy as np
from tqdm import tqdm
import csv
import logging
# Define training procedure
class Separation(sb.Brain):
def compute_forward(self, mix, targets, stage, noise=None):
"""Forward computations from the mixture to the separated signals."""
# Unpack lists and put tensors in the right device
mix, mix_lens = mix
mix, mix_lens = mix.to(self.device), mix_lens.to(self.device)
# Convert targets to tensor
targets = torch.cat(
[targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)],
dim=-1,
).to(self.device)
# Add speech distortions
if stage == sb.Stage.TRAIN:
with torch.no_grad():
if self.hparams.use_speedperturb or self.hparams.use_rand_shift:
mix, targets = self.add_speed_perturb(targets, mix_lens)
mix = targets.sum(-1)
if self.hparams.use_wavedrop:
mix = self.hparams.wavedrop(mix, mix_lens)
if self.hparams.limit_training_signal_len:
mix, targets = self.cut_signals(mix, targets)
# Separation
mix_w = self.hparams.Encoder(mix)
est_mask = self.hparams.MaskNet(mix_w)
mix_w = torch.stack([mix_w] * self.hparams.num_spks)
sep_h = mix_w * est_mask
# Decoding
est_source = torch.cat(
[
self.hparams.Decoder(sep_h[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, 0, 0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin, :]
return est_source, targets
def compute_objectives(self, predictions, targets):
"""Computes the sinr loss"""
return self.hparams.loss(targets, predictions)
def fit_batch(self, batch):
"""Trains one batch"""
# Unpacking batch list
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
if self.auto_mix_prec:
with autocast():
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN
)
loss = self.compute_objectives(predictions, targets)
# hard threshold the easy dataitems
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
self.scaler.scale(loss).backward()
if self.hparams.clip_grad_norm >= 0:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm,
)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
else:
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN
)
loss = self.compute_objectives(predictions, targets)
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
snt_id = batch.id
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(mixture, targets, stage)
loss = self.compute_objectives(predictions, targets)
# Manage audio file saving
if stage == sb.Stage.TEST and self.hparams.save_audio:
if hasattr(self.hparams, "n_audio_to_save"):
if self.hparams.n_audio_to_save > 0:
self.save_audio(snt_id[0], mixture, targets, predictions)
self.hparams.n_audio_to_save += -1
else:
self.save_audio(snt_id[0], mixture, targets, predictions)
return loss.mean().detach()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"si-snr": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def add_speed_perturb(self, targets, targ_lens):
"""Adds speed perturbation and random_shift to the input signals"""
min_len = -1
recombine = False
if self.hparams.use_speedperturb:
# Performing speed change (independently on each source)
new_targets = []
recombine = True
for i in range(targets.shape[-1]):
new_target = self.hparams.speedperturb(
targets[:, :, i], targ_lens
)
new_targets.append(new_target)
if i == 0:
min_len = new_target.shape[-1]
else:
if new_target.shape[-1] < min_len:
min_len = new_target.shape[-1]
if self.hparams.use_rand_shift:
# Performing random_shift (independently on each source)
recombine = True
for i in range(targets.shape[-1]):
rand_shift = torch.randint(
self.hparams.min_shift, self.hparams.max_shift, (1,)
)
new_targets[i] = new_targets[i].to(self.device)
new_targets[i] = torch.roll(
new_targets[i], shifts=(rand_shift[0],), dims=1
)
# Re-combination
if recombine:
if self.hparams.use_speedperturb:
targets = torch.zeros(
targets.shape[0],
min_len,
targets.shape[-1],
device=targets.device,
dtype=torch.float,
)
for i, new_target in enumerate(new_targets):
targets[:, :, i] = new_targets[i][:, 0:min_len]
mix = targets.sum(-1)
return mix, targets
def cut_signals(self, mixture, targets):
"""This function selects a random segment of a given length within the mixture.
The corresponding targets are selected accordingly"""
randstart = torch.randint(
0,
1 + max(0, mixture.shape[1] - self.hparams.training_signal_len),
(1,),
).item()
targets = targets[
:, randstart : randstart + self.hparams.training_signal_len, :
]
mixture = mixture[
:, randstart : randstart + self.hparams.training_signal_len
]
return mixture, targets
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def save_results(self, test_data):
"""This script computes the SDR and SI-SNR metrics and saves
them into a csv file"""
# This package is required for SDR computation
from mir_eval.separation import bss_eval_sources
# Create folders where to store audio
save_file = os.path.join(self.hparams.output_folder, "test_results.csv")
# Variable init
all_sdrs = []
all_sdrs_i = []
all_sisnrs = []
all_sisnrs_i = []
csv_columns = ["snt_id", "sdr", "sdr_i", "si-snr", "si-snr_i"]
test_loader = sb.dataio.dataloader.make_dataloader(
test_data, **self.hparams.dataloader_opts
)
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop over all test sentence
with tqdm(test_loader, dynamic_ncols=True) as t:
for i, batch in enumerate(t):
# Apply Separation
mixture, mix_len = batch.mix_sig
snt_id = batch.id
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(
batch.mix_sig, targets, sb.Stage.TEST
)
# Compute SI-SNR
sisnr = self.compute_objectives(predictions, targets)
# Compute SI-SNR improvement
mixture_signal = torch.stack(
[mixture] * self.hparams.num_spks, dim=-1
)
mixture_signal = mixture_signal.to(targets.device)
sisnr_baseline = self.compute_objectives(
mixture_signal, targets
)
sisnr_i = sisnr - sisnr_baseline
# Compute SDR
sdr, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
predictions[0].t().detach().cpu().numpy(),
)
sdr_baseline, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
mixture_signal[0].t().detach().cpu().numpy(),
)
sdr_i = sdr.mean() - sdr_baseline.mean()
# Saving on a csv file
row = {
"snt_id": snt_id[0],
"sdr": sdr.mean(),
"sdr_i": sdr_i,
"si-snr": -sisnr.item(),
"si-snr_i": -sisnr_i.item(),
}
writer.writerow(row)
# Metric Accumulation
all_sdrs.append(sdr.mean())
all_sdrs_i.append(sdr_i.mean())
all_sisnrs.append(-sisnr.item())
all_sisnrs_i.append(-sisnr_i.item())
row = {
"snt_id": "avg",
"sdr": np.array(all_sdrs).mean(),
"sdr_i": np.array(all_sdrs_i).mean(),
"si-snr": np.array(all_sisnrs).mean(),
"si-snr_i": np.array(all_sisnrs_i).mean(),
}
writer.writerow(row)
logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean()))
logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean()))
logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean()))
logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean()))
def save_audio(self, snt_id, mixture, targets, predictions):
"saves the test audio (mixture, targets, and estimated sources) on disk"
# Create outout folder
save_path = os.path.join(self.hparams.save_folder, "audio_results")
if not os.path.exists(save_path):
os.mkdir(save_path)
for ns in range(self.hparams.num_spks):
# Estimated source
signal = predictions[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Original source
signal = targets[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Mixture
signal = mixture[0][0, :]
signal = signal / signal.abs().max()
save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id))
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
def dataio_prep(hparams):
"""Creates data processing pipeline"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"],
replacements={"data_root": hparams["data_folder"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"],
replacements={"data_root": hparams["data_folder"]},
)
datasets = [train_data, valid_data, test_data]
# 2. Provide audio pipelines
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides("mix_sig")
def audio_pipeline_mix(mix_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
return mix_sig
@sb.utils.data_pipeline.takes("s1_wav")
@sb.utils.data_pipeline.provides("s1_sig")
def audio_pipeline_s1(s1_wav):
s1_sig = sb.dataio.dataio.read_audio(s1_wav)
return s1_sig
@sb.utils.data_pipeline.takes("s2_wav")
@sb.utils.data_pipeline.provides("s2_sig")
def audio_pipeline_s2(s2_wav):
s2_sig = sb.dataio.dataio.read_audio(s2_wav)
return s2_sig
if hparams["num_spks"] == 3:
@sb.utils.data_pipeline.takes("s3_wav")
@sb.utils.data_pipeline.provides("s3_sig")
def audio_pipeline_s3(s3_wav):
s3_sig = sb.dataio.dataio.read_audio(s3_wav)
return s3_sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2)
if hparams["num_spks"] == 3:
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s3)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig"]
)
else:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig"]
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Logger info
logger = logging.getLogger(__name__)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Check if wsj0_tr is set with dynamic mixing
if hparams["dynamic_mixing"] and not os.path.exists(
hparams["base_folder_dm"]
):
print(
"Please, specify a valid base_folder_dm folder when using dynamic mixing"
)
sys.exit(1)
# Data preparation
from prepare_data import prepare_wsjmix # noqa
run_on_main(
prepare_wsjmix,
kwargs={
"datapath": hparams["data_folder"],
"savepath": hparams["save_folder"],
"n_spks": hparams["num_spks"],
"skip_prep": hparams["skip_prep"],
"fs": hparams["sample_rate"],
},
)
# Create dataset objects
if hparams["dynamic_mixing"]:
from dynamic_mixing import dynamic_mix_data_prep
# if the base_folder for dm is not processed, preprocess them
if "processed" not in hparams["base_folder_dm"]:
# if the processed folder already exists we just use it otherwise we do the preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
):
from preprocess_dynamic_mixing import resample_folder
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm"],
"output_folder": os.path.normpath(
hparams["base_folder_dm"]
)
+ "_processed",
"fs": hparams["sample_rate"],
"regex": "**/*.wav",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
# Colleting the hparams for dynamic batching
dm_hparams = {
"train_data": hparams["train_data"],
"data_folder": hparams["data_folder"],
"base_folder_dm": hparams["base_folder_dm"],
"sample_rate": hparams["sample_rate"],
"num_spks": hparams["num_spks"],
"training_signal_len": hparams["training_signal_len"],
"dataloader_opts": hparams["dataloader_opts"],
}
train_data = dynamic_mix_data_prep(dm_hparams)
_, valid_data, test_data = dataio_prep(hparams)
else:
train_data, valid_data, test_data = dataio_prep(hparams)
# Load pretrained model if pretrained_separator is present in the yaml
if "pretrained_separator" in hparams:
run_on_main(hparams["pretrained_separator"].collect_files)
hparams["pretrained_separator"].load_collected()
# Brain class initialization
separator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# re-initialize the parameters if we don't use a pretrained model
if "pretrained_separator" not in hparams:
for module in separator.modules.values():
separator.reset_layer_recursively(module)
if not hparams["test_only"]:
# Training
separator.fit(
separator.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Eval
separator.evaluate(test_data, min_key="si-snr")
separator.save_results(test_data)
| 23,418 | 35.706897 | 108 | py |
speechbrain | speechbrain-main/recipes/WSJ0Mix/meta/preprocess_dynamic_mixing.py | """
This script allows to resample a folder which contains audio files.
The files are parsed recursively. An exact copy of the folder is created,
with same structure but contained resampled audio files.
Resampling is performed by using sox through torchaudio.
Author
------
Samuele Cornell, 2020
"""
import os
import argparse
from pathlib import Path
import tqdm
import torchaudio
import glob
# from oct2py import octave
from scipy import signal
import numpy as np
import torch
parser = argparse.ArgumentParser(
"utility for resampling all audio files in a folder recursively"
"It --input_folder to --output_folder and "
"resamples all audio files with specified format to --fs."
)
parser.add_argument("--input_folder", type=str, required=True)
parser.add_argument("--output_folder", type=str, required=True)
parser.add_argument(
"--fs", type=str, default=8000, help="this is the target sampling frequency"
)
parser.add_argument("--regex", type=str, default="**/*.wav")
def resample_folder(input_folder, output_folder, fs, regex):
"""Resamples the wav files within an input folder.
Arguments
---------
input_folder : path
Path of the folder to resample.
output_folder : path
Path of the output folder with the resampled data.
fs : int
Target sampling frequency.
reg_exp: str
Regular expression for search.
"""
# filedir = os.path.dirname(os.path.realpath(__file__))
# octave.addpath(filedir)
# add the matlab functions to octave dir here
files = glob.glob(os.path.join(input_folder, regex), recursive=True)
for f in tqdm.tqdm(files):
audio, fs_read = torchaudio.load(f)
audio = audio[0].numpy()
audio = signal.resample_poly(audio, fs, fs_read)
# tmp = octave.activlev(audio.tolist(), fs, "n")
# audio, _ = tmp[:-1].squeeze(), tmp[-1]
peak = np.max(np.abs(audio))
audio = audio / peak
audio = torch.from_numpy(audio).float()
relative_path = os.path.join(
Path(f).relative_to(Path(input_folder)).parent,
Path(f).relative_to(Path(input_folder)).stem
+ "_peak_{}.wav".format(peak),
)
os.makedirs(
Path(
os.path.join(
output_folder, Path(f).relative_to(Path(input_folder))
)
).parent,
exist_ok=True,
)
torchaudio.save(
os.path.join(output_folder, relative_path),
audio.reshape(1, -1),
fs,
)
if __name__ == "__main__":
args = parser.parse_args()
resample_folder(
args.input_folder, args.output_folder, int(args.fs), args.regex
)
| 2,732 | 27.175258 | 80 | py |
speechbrain | speechbrain-main/recipes/VoxCeleb/voxceleb_prepare.py | """
Data preparation.
Download: http://www.robots.ox.ac.uk/~vgg/data/voxceleb/
"""
import os
import csv
import logging
import glob
import random
import shutil
import sys # noqa F401
import numpy as np
import torch
import torchaudio
from tqdm.contrib import tqdm
from speechbrain.dataio.dataio import (
load_pkl,
save_pkl,
)
logger = logging.getLogger(__name__)
OPT_FILE = "opt_voxceleb_prepare.pkl"
TRAIN_CSV = "train.csv"
DEV_CSV = "dev.csv"
TEST_CSV = "test.csv"
ENROL_CSV = "enrol.csv"
SAMPLERATE = 16000
DEV_WAV = "vox1_dev_wav.zip"
TEST_WAV = "vox1_test_wav.zip"
META = "meta"
def prepare_voxceleb(
data_folder,
save_folder,
verification_pairs_file,
splits=["train", "dev", "test"],
split_ratio=[90, 10],
seg_dur=3.0,
amp_th=5e-04,
source=None,
split_speaker=False,
random_segment=False,
skip_prep=False,
):
"""
Prepares the csv files for the Voxceleb1 or Voxceleb2 datasets.
Please follow the instructions in the README.md file for
preparing Voxceleb2.
Arguments
---------
data_folder : str
Path to the folder where the original VoxCeleb dataset is stored.
save_folder : str
The directory where to store the csv files.
verification_pairs_file : str
txt file containing the verification split.
splits : list
List of splits to prepare from ['train', 'dev']
split_ratio : list
List if int for train and validation splits
seg_dur : int
Segment duration of a chunk in seconds (e.g., 3.0 seconds).
amp_th : float
removes segments whose average amplitude is below the
given threshold.
source : str
Path to the folder where the VoxCeleb dataset source is stored.
split_speaker : bool
Speaker-wise split
random_segment : bool
Train random segments
skip_prep: Bool
If True, skip preparation.
Example
-------
>>> from recipes.VoxCeleb.voxceleb1_prepare import prepare_voxceleb
>>> data_folder = 'data/VoxCeleb1/'
>>> save_folder = 'VoxData/'
>>> splits = ['train', 'dev']
>>> split_ratio = [90, 10]
>>> prepare_voxceleb(data_folder, save_folder, splits, split_ratio)
"""
if skip_prep:
return
# Create configuration for easily skipping data_preparation stage
conf = {
"data_folder": data_folder,
"splits": splits,
"split_ratio": split_ratio,
"save_folder": save_folder,
"seg_dur": seg_dur,
"split_speaker": split_speaker,
}
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Setting ouput files
save_opt = os.path.join(save_folder, OPT_FILE)
save_csv_train = os.path.join(save_folder, TRAIN_CSV)
save_csv_dev = os.path.join(save_folder, DEV_CSV)
# Create the data folder contains VoxCeleb1 test data from the source
if source is not None:
if not os.path.exists(os.path.join(data_folder, "wav", "id10270")):
logger.info(f"Extracting {source}/{TEST_WAV} to {data_folder}")
shutil.unpack_archive(os.path.join(source, TEST_WAV), data_folder)
if not os.path.exists(os.path.join(data_folder, "meta")):
logger.info(f"Copying {source}/meta to {data_folder}")
shutil.copytree(
os.path.join(source, "meta"), os.path.join(data_folder, "meta")
)
# Check if this phase is already done (if so, skip it)
if skip(splits, save_folder, conf):
logger.info("Skipping preparation, completed in previous run.")
return
# Additional checks to make sure the data folder contains VoxCeleb data
if "," in data_folder:
data_folder = data_folder.replace(" ", "").split(",")
else:
data_folder = [data_folder]
# _check_voxceleb1_folders(data_folder, splits)
msg = "\tCreating csv file for the VoxCeleb Dataset.."
logger.info(msg)
# Split data into 90% train and 10% validation (verification split)
wav_lst_train, wav_lst_dev = _get_utt_split_lists(
data_folder, split_ratio, verification_pairs_file, split_speaker
)
# Creating csv file for training data
if "train" in splits:
prepare_csv(
seg_dur, wav_lst_train, save_csv_train, random_segment, amp_th
)
if "dev" in splits:
prepare_csv(seg_dur, wav_lst_dev, save_csv_dev, random_segment, amp_th)
# For PLDA verification
if "test" in splits:
prepare_csv_enrol_test(
data_folder, save_folder, verification_pairs_file
)
# Saving options (useful to skip this phase when already done)
save_pkl(conf, save_opt)
def skip(splits, save_folder, conf):
"""
Detects if the voxceleb data_preparation has been already done.
If the preparation has been done, we can skip it.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
# Checking csv files
skip = True
split_files = {
"train": TRAIN_CSV,
"dev": DEV_CSV,
"test": TEST_CSV,
"enrol": ENROL_CSV,
}
for split in splits:
if not os.path.isfile(os.path.join(save_folder, split_files[split])):
skip = False
# Checking saved options
save_opt = os.path.join(save_folder, OPT_FILE)
if skip is True:
if os.path.isfile(save_opt):
opts_old = load_pkl(save_opt)
if opts_old == conf:
skip = True
else:
skip = False
else:
skip = False
return skip
def _check_voxceleb_folders(data_folders, splits):
"""
Check if the data folder actually contains the Voxceleb1 dataset.
If it does not, raise an error.
Returns
-------
None
Raises
------
FileNotFoundError
"""
for data_folder in data_folders:
if "train" in splits:
folder_vox1 = os.path.join(data_folder, "wav", "id10001")
folder_vox2 = os.path.join(data_folder, "wav", "id00012")
if not os.path.exists(folder_vox1) or not os.path.exists(
folder_vox2
):
err_msg = "the specified folder does not contain Voxceleb"
raise FileNotFoundError(err_msg)
if "test" in splits:
folder = os.path.join(data_folder, "wav", "id10270")
if not os.path.exists(folder):
err_msg = (
"the folder %s does not exist (as it is expected in "
"the Voxceleb dataset)" % folder
)
raise FileNotFoundError(err_msg)
folder = os.path.join(data_folder, "meta")
if not os.path.exists(folder):
err_msg = (
"the folder %s does not exist (as it is expected in "
"the Voxceleb dataset)" % folder
)
raise FileNotFoundError(err_msg)
# Used for verification split
def _get_utt_split_lists(
data_folders, split_ratio, verification_pairs_file, split_speaker=False
):
"""
Tot. number of speakers vox1= 1211.
Tot. number of speakers vox2= 5994.
Splits the audio file list into train and dev.
This function automatically removes verification test files from the training and dev set (if any).
"""
train_lst = []
dev_lst = []
print("Getting file list...")
for data_folder in data_folders:
test_lst = [
line.rstrip("\n").split(" ")[1]
for line in open(verification_pairs_file)
]
test_lst = set(sorted(test_lst))
test_spks = [snt.split("/")[0] for snt in test_lst]
path = os.path.join(data_folder, "wav", "**", "*.wav")
if split_speaker:
# avoid test speakers for train and dev splits
audio_files_dict = {}
for f in glob.glob(path, recursive=True):
spk_id = f.split("/wav/")[1].split("/")[0]
if spk_id not in test_spks:
audio_files_dict.setdefault(spk_id, []).append(f)
spk_id_list = list(audio_files_dict.keys())
random.shuffle(spk_id_list)
split = int(0.01 * split_ratio[0] * len(spk_id_list))
for spk_id in spk_id_list[:split]:
train_lst.extend(audio_files_dict[spk_id])
for spk_id in spk_id_list[split:]:
dev_lst.extend(audio_files_dict[spk_id])
else:
# avoid test speakers for train and dev splits
audio_files_list = []
for f in glob.glob(path, recursive=True):
try:
spk_id = f.split("/wav/")[1].split("/")[0]
except ValueError:
logger.info(f"Malformed path: {f}")
continue
if spk_id not in test_spks:
audio_files_list.append(f)
random.shuffle(audio_files_list)
split = int(0.01 * split_ratio[0] * len(audio_files_list))
train_snts = audio_files_list[:split]
dev_snts = audio_files_list[split:]
train_lst.extend(train_snts)
dev_lst.extend(dev_snts)
return train_lst, dev_lst
def _get_chunks(seg_dur, audio_id, audio_duration):
"""
Returns list of chunks
"""
num_chunks = int(audio_duration / seg_dur) # all in milliseconds
chunk_lst = [
audio_id + "_" + str(i * seg_dur) + "_" + str(i * seg_dur + seg_dur)
for i in range(num_chunks)
]
return chunk_lst
def prepare_csv(seg_dur, wav_lst, csv_file, random_segment=False, amp_th=0):
"""
Creates the csv file given a list of wav files.
Arguments
---------
wav_lst : list
The list of wav files of a given data split.
csv_file : str
The path of the output csv file
random_segment: bool
Read random segments
amp_th: float
Threshold on the average amplitude on the chunk.
If under this threshold, the chunk is discarded.
Returns
-------
None
"""
msg = '\t"Creating csv lists in %s..."' % (csv_file)
logger.info(msg)
csv_output = [["ID", "duration", "wav", "start", "stop", "spk_id"]]
# For assigning unique ID to each chunk
my_sep = "--"
entry = []
# Processing all the wav files in the list
for wav_file in tqdm(wav_lst, dynamic_ncols=True):
# Getting sentence and speaker ids
try:
[spk_id, sess_id, utt_id] = wav_file.split("/")[-3:]
except ValueError:
logger.info(f"Malformed path: {wav_file}")
continue
audio_id = my_sep.join([spk_id, sess_id, utt_id.split(".")[0]])
# Reading the signal (to retrieve duration in seconds)
signal, fs = torchaudio.load(wav_file)
signal = signal.squeeze(0)
if random_segment:
audio_duration = signal.shape[0] / SAMPLERATE
start_sample = 0
stop_sample = signal.shape[0]
# Composition of the csv_line
csv_line = [
audio_id,
str(audio_duration),
wav_file,
start_sample,
stop_sample,
spk_id,
]
entry.append(csv_line)
else:
audio_duration = signal.shape[0] / SAMPLERATE
uniq_chunks_list = _get_chunks(seg_dur, audio_id, audio_duration)
for chunk in uniq_chunks_list:
s, e = chunk.split("_")[-2:]
start_sample = int(float(s) * SAMPLERATE)
end_sample = int(float(e) * SAMPLERATE)
# Avoid chunks with very small energy
mean_sig = torch.mean(np.abs(signal[start_sample:end_sample]))
if mean_sig < amp_th:
continue
# Composition of the csv_line
csv_line = [
chunk,
str(audio_duration),
wav_file,
start_sample,
end_sample,
spk_id,
]
entry.append(csv_line)
csv_output = csv_output + entry
# Writing the csv lines
with open(csv_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_output:
csv_writer.writerow(line)
# Final prints
msg = "\t%s successfully created!" % (csv_file)
logger.info(msg)
def prepare_csv_enrol_test(data_folders, save_folder, verification_pairs_file):
"""
Creates the csv file for test data (useful for verification)
Arguments
---------
data_folder : str
Path of the data folders
save_folder : str
The directory where to store the csv files.
Returns
-------
None
"""
# msg = '\t"Creating csv lists in %s..."' % (csv_file)
# logger.debug(msg)
csv_output_head = [
["ID", "duration", "wav", "start", "stop", "spk_id"]
] # noqa E231
for data_folder in data_folders:
test_lst_file = verification_pairs_file
enrol_ids, test_ids = [], []
# Get unique ids (enrol and test utterances)
for line in open(test_lst_file):
e_id = line.split(" ")[1].rstrip().split(".")[0].strip()
t_id = line.split(" ")[2].rstrip().split(".")[0].strip()
enrol_ids.append(e_id)
test_ids.append(t_id)
enrol_ids = list(np.unique(np.array(enrol_ids)))
test_ids = list(np.unique(np.array(test_ids)))
# Prepare enrol csv
logger.info("preparing enrol csv")
enrol_csv = []
for id in enrol_ids:
wav = data_folder + "/wav/" + id + ".wav"
# Reading the signal (to retrieve duration in seconds)
signal, fs = torchaudio.load(wav)
signal = signal.squeeze(0)
audio_duration = signal.shape[0] / SAMPLERATE
start_sample = 0
stop_sample = signal.shape[0]
[spk_id, sess_id, utt_id] = wav.split("/")[-3:]
csv_line = [
id,
audio_duration,
wav,
start_sample,
stop_sample,
spk_id,
]
enrol_csv.append(csv_line)
csv_output = csv_output_head + enrol_csv
csv_file = os.path.join(save_folder, ENROL_CSV)
# Writing the csv lines
with open(csv_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_output:
csv_writer.writerow(line)
# Prepare test csv
logger.info("preparing test csv")
test_csv = []
for id in test_ids:
wav = data_folder + "/wav/" + id + ".wav"
# Reading the signal (to retrieve duration in seconds)
signal, fs = torchaudio.load(wav)
signal = signal.squeeze(0)
audio_duration = signal.shape[0] / SAMPLERATE
start_sample = 0
stop_sample = signal.shape[0]
[spk_id, sess_id, utt_id] = wav.split("/")[-3:]
csv_line = [
id,
audio_duration,
wav,
start_sample,
stop_sample,
spk_id,
]
test_csv.append(csv_line)
csv_output = csv_output_head + test_csv
csv_file = os.path.join(save_folder, TEST_CSV)
# Writing the csv lines
with open(csv_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_output:
csv_writer.writerow(line)
| 16,001 | 29.192453 | 103 | py |
speechbrain | speechbrain-main/recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py | #!/usr/bin/python3
"""Recipe for training speaker embeddings (e.g, xvectors) using the VoxCeleb Dataset.
We employ an encoder followed by a speaker classifier.
To run this recipe, use the following command:
> python train_speaker_embeddings.py {hyperparameter_file}
Using your own hyperparameter file or one of the following:
hyperparams/train_x_vectors.yaml (for standard xvectors)
hyperparams/train_ecapa_tdnn.yaml (for the ecapa+tdnn system)
Author
* Mirco Ravanelli 2020
* Hwidong Na 2020
* Nauman Dawalatabad 2020
"""
import os
import sys
import random
import torch
import torchaudio
import speechbrain as sb
from speechbrain.utils.data_utils import download_file
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
class SpeakerBrain(sb.core.Brain):
"""Class for speaker embedding training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + speaker classifier.
Data augmentation and environmental corruption are applied to the
input speech.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
# Embeddings + speaker classifier
embeddings = self.modules.embedding_model(feats)
outputs = self.modules.classifier(embeddings)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using speaker-id as label.
"""
predictions, lens = predictions
uttid = batch.id
spkid, _ = batch.spk_id_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN:
spkid = torch.cat([spkid] * self.n_augment, dim=0)
loss = self.hparams.compute_cost(predictions, spkid, lens)
if stage == sb.Stage.TRAIN and hasattr(
self.hparams.lr_annealing, "on_batch_end"
):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, spkid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of an epoch."""
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ErrorRate"] = self.error_metrics.summarize("average")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ErrorRate": stage_stats["ErrorRate"]},
min_keys=["ErrorRate"],
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
snt_len_sample = int(hparams["sample_rate"] * hparams["sentence_len"])
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop", "duration")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop, duration):
if hparams["random_chunk"]:
duration_sample = int(duration * hparams["sample_rate"])
start = random.randint(0, duration_sample - snt_len_sample)
stop = start + snt_len_sample
else:
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("spk_id")
@sb.utils.data_pipeline.provides("spk_id", "spk_id_encoded")
def label_pipeline(spk_id):
yield spk_id
spk_id_encoded = label_encoder.encode_sequence_torch([spk_id])
yield spk_id_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-GPU DDP support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file, from_didatasets=[train_data], output_key="spk_id",
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id_encoded"])
return train_data, valid_data, label_encoder
if __name__ == "__main__":
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
hparams["save_folder"], os.path.basename(hparams["verification_file"])
)
download_file(hparams["verification_file"], veri_file_path)
# Dataset prep (parsing VoxCeleb and annotation into csv files)
from voxceleb_prepare import prepare_voxceleb # noqa
run_on_main(
prepare_voxceleb,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"verification_pairs_file": veri_file_path,
"splits": ["train", "dev"],
"split_ratio": hparams["split_ratio"],
"seg_dur": hparams["sentence_len"],
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, label_encoder = dataio_prep(hparams)
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Brain class initialization
speaker_brain = SpeakerBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Training
speaker_brain.fit(
speaker_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
| 8,833 | 33.108108 | 85 | py |
speechbrain | speechbrain-main/recipes/VoxCeleb/SpeakerRec/speaker_verification_cosine.py | #!/usr/bin/python3
"""Recipe for training a speaker verification system based on cosine distance.
The cosine distance is computed on the top of pre-trained embeddings.
The pre-trained model is automatically downloaded from the web if not specified.
This recipe is designed to work on a single GPU.
To run this recipe, run the following command:
> python speaker_verification_cosine.py hyperparams/verification_ecapa_tdnn.yaml
Authors
* Hwidong Na 2020
* Mirco Ravanelli 2020
"""
import os
import sys
import torch
import logging
import torchaudio
import speechbrain as sb
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import EER, minDCF
from speechbrain.utils.data_utils import download_file
from speechbrain.utils.distributed import run_on_main
# Compute embeddings from the waveforms
def compute_embedding(wavs, wav_lens):
"""Compute speaker embeddings.
Arguments
---------
wavs : Torch.Tensor
Tensor containing the speech waveform (batch, time).
Make sure the sample rate is fs=16000 Hz.
wav_lens: Torch.Tensor
Tensor containing the relative length for each sentence
in the length (e.g., [0.8 0.6 1.0])
"""
with torch.no_grad():
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, wav_lens)
embeddings = params["embedding_model"](feats, wav_lens)
return embeddings.squeeze(1)
def compute_embedding_loop(data_loader):
"""Computes the embeddings of all the waveforms specified in the
dataloader.
"""
embedding_dict = {}
with torch.no_grad():
for batch in tqdm(data_loader, dynamic_ncols=True):
batch = batch.to(run_opts["device"])
seg_ids = batch.id
wavs, lens = batch.sig
found = False
for seg_id in seg_ids:
if seg_id not in embedding_dict:
found = True
if not found:
continue
wavs, lens = (
wavs.to(run_opts["device"]),
lens.to(run_opts["device"]),
)
emb = compute_embedding(wavs, lens).unsqueeze(1)
for i, seg_id in enumerate(seg_ids):
embedding_dict[seg_id] = emb[i].detach().clone()
return embedding_dict
def get_verification_scores(veri_test):
"""Computes positive and negative scores given the verification split."""
scores = []
positive_scores = []
negative_scores = []
save_file = os.path.join(params["output_folder"], "scores.txt")
s_file = open(save_file, "w")
# Cosine similarity initialization
similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
# creating cohort for score normalization
if "score_norm" in params:
train_cohort = torch.stack(list(train_dict.values()))
for i, line in enumerate(veri_test):
# Reading verification file (enrol_file test_file label)
lab_pair = int(line.split(" ")[0].rstrip().split(".")[0].strip())
enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip()
test_id = line.split(" ")[2].rstrip().split(".")[0].strip()
enrol = enrol_dict[enrol_id]
test = test_dict[test_id]
if "score_norm" in params:
# Getting norm stats for enrol impostors
enrol_rep = enrol.repeat(train_cohort.shape[0], 1, 1)
score_e_c = similarity(enrol_rep, train_cohort)
if "cohort_size" in params:
score_e_c = torch.topk(
score_e_c, k=params["cohort_size"], dim=0
)[0]
mean_e_c = torch.mean(score_e_c, dim=0)
std_e_c = torch.std(score_e_c, dim=0)
# Getting norm stats for test impostors
test_rep = test.repeat(train_cohort.shape[0], 1, 1)
score_t_c = similarity(test_rep, train_cohort)
if "cohort_size" in params:
score_t_c = torch.topk(
score_t_c, k=params["cohort_size"], dim=0
)[0]
mean_t_c = torch.mean(score_t_c, dim=0)
std_t_c = torch.std(score_t_c, dim=0)
# Compute the score for the given sentence
score = similarity(enrol, test)[0]
# Perform score normalization
if "score_norm" in params:
if params["score_norm"] == "z-norm":
score = (score - mean_e_c) / std_e_c
elif params["score_norm"] == "t-norm":
score = (score - mean_t_c) / std_t_c
elif params["score_norm"] == "s-norm":
score_e = (score - mean_e_c) / std_e_c
score_t = (score - mean_t_c) / std_t_c
score = 0.5 * (score_e + score_t)
# write score file
s_file.write("%s %s %i %f\n" % (enrol_id, test_id, lab_pair, score))
scores.append(score)
if lab_pair == 1:
positive_scores.append(score)
else:
negative_scores.append(score)
s_file.close()
return positive_scores, negative_scores
def dataio_prep(params):
"Creates the dataloaders and their data processing pipelines."
data_folder = params["data_folder"]
# Train data (used for normalization)
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["train_data"], replacements={"data_root": data_folder},
)
train_data = train_data.filtered_sorted(
sort_key="duration", select_n=params["n_train_snts"]
)
# Enrol data
enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["enrol_data"], replacements={"data_root": data_folder},
)
enrol_data = enrol_data.filtered_sorted(sort_key="duration")
# Test data
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, enrol_data, test_data]
# Define audio pipeline
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# Set output
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"])
# Create dataloaders
train_dataloader = sb.dataio.dataloader.make_dataloader(
train_data, **params["train_dataloader_opts"]
)
enrol_dataloader = sb.dataio.dataloader.make_dataloader(
enrol_data, **params["enrol_dataloader_opts"]
)
test_dataloader = sb.dataio.dataloader.make_dataloader(
test_data, **params["test_dataloader_opts"]
)
return train_dataloader, enrol_dataloader, test_dataloader
if __name__ == "__main__":
# Logger setup
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
params["save_folder"], os.path.basename(params["verification_file"])
)
download_file(params["verification_file"], veri_file_path)
from voxceleb_prepare import prepare_voxceleb # noqa E402
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
# Prepare data from dev of Voxceleb1
prepare_voxceleb(
data_folder=params["data_folder"],
save_folder=params["save_folder"],
verification_pairs_file=veri_file_path,
splits=["train", "dev", "test"],
split_ratio=params["split_ratio"],
seg_dur=3.0,
skip_prep=params["skip_prep"],
source=params["voxceleb_source"]
if "voxceleb_source" in params
else None,
)
# here we create the datasets objects as well as tokenization and encoding
train_dataloader, enrol_dataloader, test_dataloader = dataio_prep(params)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected(run_opts["device"])
params["embedding_model"].eval()
params["embedding_model"].to(run_opts["device"])
# Computing enrollment and test embeddings
logger.info("Computing enroll/test embeddings...")
# First run
enrol_dict = compute_embedding_loop(enrol_dataloader)
test_dict = compute_embedding_loop(test_dataloader)
if "score_norm" in params:
train_dict = compute_embedding_loop(train_dataloader)
# Compute the EER
logger.info("Computing EER..")
# Reading standard verification split
with open(veri_file_path) as f:
veri_test = [line.rstrip() for line in f]
positive_scores, negative_scores = get_verification_scores(veri_test)
del enrol_dict, test_dict
eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
logger.info("EER(%%)=%f", eer * 100)
min_dcf, th = minDCF(
torch.tensor(positive_scores), torch.tensor(negative_scores)
)
logger.info("minDCF=%f", min_dcf * 100)
| 9,905 | 33.515679 | 85 | py |
speechbrain | speechbrain-main/recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py | #!/usr/bin/python3
"""Recipe for training a speaker verification system based on PLDA using the voxceleb dataset.
The system employs a pre-trained model followed by a PLDA transformation.
The pre-trained model is automatically downloaded from the web if not specified.
To run this recipe, run the following command:
> python speaker_verification_plda.py hyperparams/verification_plda_xvector.yaml
Authors
* Nauman Dawalatabad 2020
* Mirco Ravanelli 2020
"""
import os
import sys
import torch
import torchaudio
import logging
import speechbrain as sb
import numpy
import pickle
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import EER, minDCF
from speechbrain.processing.PLDA_LDA import StatObject_SB
from speechbrain.processing.PLDA_LDA import Ndx
from speechbrain.processing.PLDA_LDA import fast_PLDA_scoring
from speechbrain.utils.data_utils import download_file
from speechbrain.utils.distributed import run_on_main
# Compute embeddings from the waveforms
def compute_embeddings(wavs, wav_lens):
"""Compute speaker embeddings.
Arguments
---------
wavs : Torch.Tensor
Tensor containing the speech waveform (batch, time).
Make sure the sample rate is fs=16000 Hz.
wav_lens: Torch.Tensor
Tensor containing the relative length for each sentence
in the length (e.g., [0.8 0.6 1.0])
"""
wavs = wavs.to(run_opts["device"])
wav_lens = wav_lens.to(run_opts["device"])
with torch.no_grad():
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, wav_lens)
embeddings = params["embedding_model"](feats, wav_lens)
return embeddings.squeeze(1)
def emb_computation_loop(split, set_loader, stat_file):
"""Computes the embeddings and saves the in a stat file"""
# Extract embeddings (skip if already done)
if not os.path.isfile(stat_file):
embeddings = numpy.empty(
shape=[0, params["emb_dim"]], dtype=numpy.float64
)
modelset = []
segset = []
with tqdm(set_loader, dynamic_ncols=True) as t:
for batch in t:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
# Enrollment and test embeddings
embs = compute_embeddings(wavs, lens)
xv = embs.squeeze().cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
# Stat object (used to collect embeddings)
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.info(f"Saving stat obj for {split}")
stat_obj.save_stat_object(stat_file)
else:
logger.info(f"Skipping embedding Extraction for {split}")
logger.info(f"Loading previously saved stat_object for {split}")
with open(stat_file, "rb") as input:
stat_obj = pickle.load(input)
return stat_obj
def verification_performance(scores_plda):
"""Computes the Equal Error Rate give the PLDA scores"""
# Create ids, labels, and scoring list for EER evaluation
ids = []
labels = []
positive_scores = []
negative_scores = []
for line in open(veri_file_path):
lab = int(line.split(" ")[0].rstrip().split(".")[0].strip())
enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip()
test_id = line.split(" ")[2].rstrip().split(".")[0].strip()
# Assuming enrol_id and test_id are unique
i = int(numpy.where(scores_plda.modelset == enrol_id)[0][0])
j = int(numpy.where(scores_plda.segset == test_id)[0][0])
s = float(scores_plda.scoremat[i, j])
labels.append(lab)
ids.append(enrol_id + "<>" + test_id)
if lab == 1:
positive_scores.append(s)
else:
negative_scores.append(s)
# Clean variable
del scores_plda
# Final EER computation
eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
min_dcf, th = minDCF(
torch.tensor(positive_scores), torch.tensor(negative_scores)
)
return eer, min_dcf
# Function to get mod and seg
def get_utt_ids_for_test(ids, data_dict):
mod = [data_dict[x]["wav1"]["data"] for x in ids]
seg = [data_dict[x]["wav2"]["data"] for x in ids]
return mod, seg
def dataio_prep(params):
"Creates the dataloaders and their data processing pipelines."
data_folder = params["data_folder"]
# 1. Declarations:
# Train data (used for normalization)
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["train_data"], replacements={"data_root": data_folder},
)
train_data = train_data.filtered_sorted(
sort_key="duration", select_n=params["n_train_snts"]
)
# Enrol data
enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["enrol_data"], replacements={"data_root": data_folder},
)
enrol_data = enrol_data.filtered_sorted(sort_key="duration")
# Test data
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, enrol_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id"])
# 4 Create dataloaders
train_dataloader = sb.dataio.dataloader.make_dataloader(
train_data, **params["train_dataloader_opts"]
)
enrol_dataloader = sb.dataio.dataloader.make_dataloader(
enrol_data, **params["enrol_dataloader_opts"]
)
test_dataloader = sb.dataio.dataloader.make_dataloader(
test_data, **params["test_dataloader_opts"]
)
return train_dataloader, enrol_dataloader, test_dataloader
if __name__ == "__main__":
# Logger setup
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
params["save_folder"], os.path.basename(params["verification_file"])
)
download_file(params["verification_file"], veri_file_path)
from voxceleb_prepare import prepare_voxceleb # noqa E402
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
# Prepare data from dev of Voxceleb1
logger.info("Data preparation")
prepare_voxceleb(
data_folder=params["data_folder"],
save_folder=params["save_folder"],
verification_pairs_file=veri_file_path,
splits=["train", "test"],
split_ratio=params["split_ratio"],
seg_dur=params["seg_dur"],
skip_prep=params["skip_prep"],
)
# here we create the datasets objects as well as tokenization and encoding
train_dataloader, enrol_dataloader, test_dataloader = dataio_prep(params)
# Initialize PLDA vars
modelset, segset = [], []
embeddings = numpy.empty(shape=[0, params["emb_dim"]], dtype=numpy.float64)
# Embedding file for train data
xv_file = os.path.join(
params["save_folder"], "VoxCeleb1_train_embeddings_stat_obj.pkl"
)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected()
params["embedding_model"].eval()
params["embedding_model"].to(run_opts["device"])
# Computing training embeddings (skip it of if already extracted)
if not os.path.exists(xv_file):
logger.info("Extracting embeddings from Training set..")
with tqdm(train_dataloader, dynamic_ncols=True) as t:
for batch in t:
snt_id = batch.id
wav, lens = batch.sig
spk_ids = batch.spk_id
# Flattening speaker ids
modelset = modelset + spk_ids
# For segset
segset = segset + snt_id
# Compute embeddings
emb = compute_embeddings(wav, lens)
xv = emb.squeeze(1).cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
# Speaker IDs and utterance IDs
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
embeddings_stat = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
del embeddings
# Save TRAINING embeddings in StatObject_SB object
embeddings_stat.save_stat_object(xv_file)
else:
# Load the saved stat object for train embedding
logger.info("Skipping embedding Extraction for training set")
logger.info(
"Loading previously saved stat_object for train embeddings.."
)
with open(xv_file, "rb") as input:
embeddings_stat = pickle.load(input)
# Training Gaussian PLDA model
logger.info("Training PLDA model")
params["compute_plda"].plda(embeddings_stat)
logger.info("PLDA training completed")
# Set paths for enrol/test embeddings
enrol_stat_file = os.path.join(params["save_folder"], "stat_enrol.pkl")
test_stat_file = os.path.join(params["save_folder"], "stat_test.pkl")
ndx_file = os.path.join(params["save_folder"], "ndx.pkl")
# Compute enrol and Test embeddings
enrol_obj = emb_computation_loop("enrol", enrol_dataloader, enrol_stat_file)
test_obj = emb_computation_loop("test", test_dataloader, test_stat_file)
# Prepare Ndx Object
if not os.path.isfile(ndx_file):
models = enrol_obj.modelset
testsegs = test_obj.modelset
logger.info("Preparing Ndx")
ndx_obj = Ndx(models=models, testsegs=testsegs)
logger.info("Saving ndx obj...")
ndx_obj.save_ndx_object(ndx_file)
else:
logger.info("Skipping Ndx preparation")
logger.info("Loading Ndx from disk")
with open(ndx_file, "rb") as input:
ndx_obj = pickle.load(input)
# PLDA scoring
logger.info("PLDA scoring...")
scores_plda = fast_PLDA_scoring(
enrol_obj,
test_obj,
ndx_obj,
params["compute_plda"].mean,
params["compute_plda"].F,
params["compute_plda"].Sigma,
)
logger.info("Computing EER... ")
# Cleaning variable
del enrol_dataloader
del test_dataloader
del enrol_obj
del test_obj
del embeddings_stat
# Final EER computation
eer, min_dcf = verification_performance(scores_plda)
logger.info("EER(%%)=%f", eer * 100)
logger.info("min_dcf=%f", min_dcf * 100)
| 12,496 | 32.414439 | 94 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/transducer/train_wav2vec.py | #!/usr/bin/env/python3
"""Recipe for training a phoneme recognizer with
Transducer loss on the TIMIT dataset.
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/TIMIT
Authors
* Abdel Heba 2020
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class ASR_Brain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the phoneme probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns, phn_lens = batch.phn_encoded
# Adding optional augmentation when specified:
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Model computations
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
x = self.modules.enc_lin(x)
# Prepend bos token at the beginning
y_in = sb.dataio.dataio.prepend_bos_token(
phns, self.hparams.blank_index
)
e_in = self.modules.emb(y_in)
h, _ = self.modules.dec(e_in)
h = self.modules.dec_lin(h)
# Joint network
# add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc]
# add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec]
joint = self.modules.Tjoint(x.unsqueeze(2), h.unsqueeze(1))
# output layer for seq2seq log-probabilities
logits = self.modules.output(joint)
if stage == sb.Stage.VALID:
hyps, scores, _, _ = self.hparams.Greedysearcher(x)
return logits, hyps
elif stage == sb.Stage.TEST:
(
best_hyps,
best_scores,
nbest_hyps,
nbest_scores,
) = self.hparams.Beamsearcher(x)
return logits, best_hyps
return logits
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the loss."
ids = batch.id
_, wav_lens = batch.sig
phns, phn_lens = batch.phn_encoded
if stage != sb.Stage.TRAIN:
predictions, hyps = predictions
# Transducer loss use logits from RNN-T model.
loss = self.hparams.compute_cost(predictions, phns, wav_lens, phn_lens)
self.transducer_metrics.append(
ids, predictions, phns, wav_lens, phn_lens
)
if stage != sb.Stage.TRAIN:
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim
)
return loss
def on_stage_start(self, stage, epoch):
"Gets called when a stage (either training, validation, test) starts."
self.transducer_metrics = self.hparams.transducer_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam(per)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
per
)
sb.nnet.schedulers.update_learning_rate(
self.adam_optimizer, new_lr_adam
)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_adam": old_lr_adam,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats={"loss": self.train_loss},
valid_stats={"loss": stage_loss, "PER": per},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("Transducer loss stats:\n")
self.transducer_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"Transducer and PER stats written to file",
self.hparams.wer_file,
)
def fit_batch(self, batch):
"""Fit one batch, override to do multiple updates.
The default implementation depends on a few methods being defined
with a particular behavior:
* ``compute_forward()``
* ``compute_objectives()``
Also depends on having optimizers passed at initialization.
Arguments
---------
batch : list of torch.Tensors
Batch of data to use for training. Default implementation assumes
this batch has two elements: inputs and targets.
Returns
-------
detached loss
"""
# Managing automatic mixed precision
if self.auto_mix_prec:
self.wav2vec_optimizer.zero_grad()
self.adam_optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.wav2vec_optimizer)
self.scaler.unscale_(self.adam_optimizer)
if self.check_gradients(loss):
self.scaler.step(self.wav2vec_optimizer)
self.scaler.step(self.adam_optimizer)
self.scaler.update()
else:
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec_optimizer.step()
self.adam_optimizer.step()
self.wav2vec_optimizer.zero_grad()
self.adam_optimizer.zero_grad()
return loss.detach()
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.adam_optimizer = self.hparams.adam_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.checkpointer.add_recoverable("adam_opt", self.adam_optimizer)
def zero_grad(self, set_to_none=False):
self.wav2vec_optimizer.zero_grad(set_to_none)
self.adam_optimizer.zero_grad(set_to_none)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-gpu dpp support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="phn_list",
special_labels={"blank_label": hparams["blank_index"]},
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data, test_data, label_encoder
# Begin Recipe!
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
hparams["wav2vec2"] = hparams["wav2vec2"].to(run_opts["device"])
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
"uppercase": hparams["uppercase"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Trainer initialization
asr_brain = ASR_Brain(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 12,544 | 32.632708 | 83 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/transducer/train.py | #!/usr/bin/env/python3
"""Recipe for training a phoneme recognizer with
Transducer loss on the TIMIT dataset.
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/TIMIT
Authors
* Abdel Heba 2020
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class ASR_Brain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the phoneme probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns, phn_lens = batch.phn_encoded
# Adding optional augmentation when specified:
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
batch.sig = wavs, wav_lens
phns = torch.cat([phns, phns], dim=0)
phn_lens = torch.cat([phn_lens, phn_lens])
batch.phn_encoded = phns, phn_lens
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Model computations
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats)
x = self.modules.enc_lin(x)
# Prepend bos token at the beginning
y_in = sb.dataio.dataio.prepend_bos_token(
phns, self.hparams.blank_index
)
e_in = self.modules.emb(y_in)
h, _ = self.modules.dec(e_in)
h = self.modules.dec_lin(h)
# Joint network
# add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc]
# add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec]
joint = self.modules.Tjoint(x.unsqueeze(2), h.unsqueeze(1))
# output layer for seq2seq log-probabilities
logits = self.modules.output(joint)
if stage == sb.Stage.VALID:
hyps, scores, _, _ = self.hparams.Greedysearcher(x)
return logits, hyps
elif stage == sb.Stage.TEST:
(
best_hyps,
best_scores,
nbest_hyps,
nbest_scores,
) = self.hparams.Beamsearcher(x)
return logits, best_hyps
return logits
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the loss."
ids = batch.id
_, wav_lens = batch.sig
phns, phn_lens = batch.phn_encoded
if stage != sb.Stage.TRAIN:
predictions, hyps = predictions
# Transducer loss use logits from RNN-T model.
loss = self.hparams.compute_cost(predictions, phns, wav_lens, phn_lens)
self.transducer_metrics.append(
ids, predictions, phns, wav_lens, phn_lens
)
if stage != sb.Stage.TRAIN:
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim
)
return loss
def on_stage_start(self, stage, epoch):
"Gets called when a stage (either training, validation, test) starts."
self.transducer_metrics = self.hparams.transducer_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(per)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={"loss": stage_loss, "PER": per},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("Transducer loss stats:\n")
self.transducer_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"Transducer and PER stats written to file",
self.hparams.wer_file,
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-gpu dpp support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="phn_list",
special_labels={"blank_label": hparams["blank_index"]},
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data, test_data, label_encoder
# Begin Recipe!
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
"uppercase": hparams["uppercase"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Trainer initialization
asr_brain = ASR_Brain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 10,118 | 33.301695 | 83 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_kd.py | #!/usr/bin/env python3
"""Recipe for doing ASR with phoneme targets and joint seq2seq
and CTC loss on the TIMIT dataset following a knowledge distillation scheme as
reported in " Distilling Knowledge from Ensembles of Acoustic Models for Joint
CTC-Attention End-to-End Speech Recognition", Yan Gao et al.
To run this recipe, do the following:
> python experiment.py hyperparams.yaml --data_folder /path/to/TIMIT
Authors
* Yan Gao 2021
* Titouan Parcollet 2021
"""
import sys
import torch
import h5py
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns_bos, _ = batch.phn_encoded_bos
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
phns_bos = torch.cat([phns_bos, phns_bos])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
e_in = self.modules.emb(phns_bos)
h, _ = self.modules.dec(e_in, x, wav_lens)
# output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
if stage == sb.Stage.VALID:
hyps, scores = self.hparams.greedy_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
elif stage == sb.Stage.TEST:
hyps, scores = self.hparams.beam_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
return p_ctc, p_seq, wav_lens
def def_tea_name(self):
# define teacher variable name
tea_name = []
for tea_num in range(self.hparams.num_tea):
tea = "t{}".format(tea_num)
tea_name.append(tea)
return tea_name
def re_format(self, data_dict):
item_tea_list = [None, None, None, None]
tea_name = self.def_tea_name()
for tea_num in range(self.hparams.num_tea):
for i in range(4):
item_tea = data_dict[str(self.step)][tea_name[tea_num]][
self.hparams.tea_keys[i]
][()]
if self.hparams.tea_keys[i].startswith("wer"):
item_tea = torch.tensor(item_tea)
else:
item_tea = torch.from_numpy(item_tea)
item_tea = item_tea.to(self.device)
item_tea = torch.unsqueeze(item_tea, 0)
if tea_num == 0:
item_tea_list[i] = item_tea
else:
item_tea_list[i] = torch.cat(
[item_tea_list[i], item_tea], 0
)
return item_tea_list
def compute_objectives(self, predictions, batch, stage):
if stage == sb.Stage.TRAIN:
p_ctc, p_seq, wav_lens = predictions
else:
p_ctc, p_seq, wav_lens, hyps = predictions
ids = batch.id
phns_eos, phn_lens_eos = batch.phn_encoded_eos
phns, phn_lens = batch.phn_encoded
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
phns_eos = torch.cat([phns_eos, phns_eos], dim=0)
phn_lens_eos = torch.cat([phn_lens_eos, phn_lens_eos], dim=0)
# normal supervised training
loss_ctc_nor = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens)
loss_seq_nor = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos)
# load teacher inference results
data_dict = (
self.train_dict
if stage == sb.Stage.TRAIN
else self.valid_dict
if stage == sb.Stage.VALID
else self.test_dict
)
item_tea_list = self.re_format(data_dict)
p_ctc_tea, p_seq_tea, wer_ctc_tea, wer_tea = [
item for item in item_tea_list
]
# Strategy "average": average losses of teachers when doing distillation.
# Strategy "best": choosing the best teacher based on WER.
# Strategy "weighted": assigning weights to teachers based on WER.
if self.hparams.strategy == "best":
# tea_ce for kd
wer_scores, indx = torch.min(wer_tea, dim=0)
indx = list(indx.cpu().numpy())
# select the best teacher for each sentence
tea_seq2seq_pout = None
for stn_indx, tea_indx in enumerate(indx):
s2s_one = p_seq_tea[tea_indx][stn_indx]
s2s_one = torch.unsqueeze(s2s_one, 0)
if stn_indx == 0:
tea_seq2seq_pout = s2s_one
else:
tea_seq2seq_pout = torch.cat([tea_seq2seq_pout, s2s_one], 0)
apply_softmax = torch.nn.Softmax(dim=0)
if (
self.hparams.strategy == "best"
or self.hparams.strategy == "weighted"
):
# mean wer for ctc
tea_wer_ctc_mean = wer_ctc_tea.mean(1)
tea_acc_main = 100 - tea_wer_ctc_mean
# normalise weights via Softmax function
tea_acc_softmax = apply_softmax(tea_acc_main)
if self.hparams.strategy == "weighted":
# mean wer for ce
tea_wer_mean = wer_tea.mean(1)
tea_acc_ce_main = 100 - tea_wer_mean
# normalise weights via Softmax function
tea_acc_ce_softmax = apply_softmax(tea_acc_ce_main)
# kd loss
ctc_loss_list = None
ce_loss_list = None
for tea_num in range(self.hparams.num_tea):
# ctc
p_ctc_tea_one = p_ctc_tea[tea_num]
# calculate CTC distillation loss of one teacher
loss_ctc_one = self.hparams.ctc_cost_kd(
p_ctc, p_ctc_tea_one, wav_lens, device=self.device
)
loss_ctc_one = torch.unsqueeze(loss_ctc_one, 0)
if tea_num == 0:
ctc_loss_list = loss_ctc_one
else:
ctc_loss_list = torch.cat([ctc_loss_list, loss_ctc_one])
# ce
p_seq_tea_one = p_seq_tea[tea_num]
# calculate CE distillation loss of one teacher
loss_seq_one = self.hparams.seq_cost_kd(
p_seq, p_seq_tea_one, phn_lens_eos
)
loss_seq_one = torch.unsqueeze(loss_seq_one, 0)
if tea_num == 0:
ce_loss_list = loss_seq_one
else:
ce_loss_list = torch.cat([ce_loss_list, loss_seq_one])
# kd loss
if self.hparams.strategy == "average":
# get average value of losses from all teachers (CTC and CE loss)
ctc_loss_kd = ctc_loss_list.mean(0)
seq2seq_loss_kd = ce_loss_list.mean(0)
else:
# assign weights to different teachers (CTC loss)
ctc_loss_kd = (tea_acc_softmax * ctc_loss_list).sum(0)
if self.hparams.strategy == "best":
# only use the best teacher to compute CE loss
seq2seq_loss_kd = self.hparams.seq_cost_kd(
p_seq, tea_seq2seq_pout, phn_lens_eos
)
if self.hparams.strategy == "weighted":
# assign weights to different teachers (CE loss)
seq2seq_loss_kd = (tea_acc_ce_softmax * ce_loss_list).sum(0)
# total loss
# combine normal supervised training
loss_ctc = (
self.hparams.temperature
* self.hparams.temperature
* self.hparams.alpha
* ctc_loss_kd
+ (1 - self.hparams.alpha) * loss_ctc_nor
)
loss_seq = (
self.hparams.temperature
* self.hparams.temperature
* self.hparams.alpha
* seq2seq_loss_kd
+ (1 - self.hparams.alpha) * loss_seq_nor
)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
# Record losses for posterity
if stage != sb.Stage.TRAIN:
self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens)
self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens_eos)
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim,
)
return loss
def fit_batch(self, batch):
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
self.ctc_metrics = self.hparams.ctc_stats()
self.seq_metrics = self.hparams.seq_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(per)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={
"loss": stage_loss,
"ctc_loss": self.ctc_metrics.summarize("average"),
"seq_loss": self.seq_metrics.summarize("average"),
"PER": per,
},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("CTC loss stats:\n")
self.ctc_metrics.write_stats(w)
w.write("\nseq2seq loss stats:\n")
self.seq_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"CTC, seq2seq, and PER stats written to file",
self.hparams.wer_file,
)
def data_io_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list",
"phn_encoded_list",
"phn_encoded",
"phn_encoded_eos",
"phn_encoded_bos",
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded_list = label_encoder.encode_sequence(phn_list)
yield phn_encoded_list
phn_encoded = torch.LongTensor(phn_encoded_list)
yield phn_encoded
phn_encoded_eos = torch.LongTensor(
label_encoder.append_eos_index(phn_encoded_list)
)
yield phn_encoded_eos
phn_encoded_bos = torch.LongTensor(
label_encoder.prepend_bos_index(phn_encoded_list)
)
yield phn_encoded_bos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.update_from_didataset(train_data, output_key="phn_list")
if (
hparams["blank_index"] != hparams["bos_index"]
or hparams["blank_index"] != hparams["eos_index"]
):
label_encoder.insert_blank(index=hparams["blank_index"])
if hparams["bos_index"] == hparams["eos_index"]:
label_encoder.insert_bos_eos(
bos_label="<eos-bos>",
eos_label="<eos-bos>",
bos_index=hparams["bos_index"],
)
else:
label_encoder.insert_bos_eos(
bos_label="<bos>",
eos_label="<eos>",
bos_index=hparams["bos_index"],
eos_index=hparams["eos_index"],
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"],
)
return train_data, valid_data, test_data, label_encoder
def load_teachers(hparams):
"""
Load results of inference of teacher models stored on disk.
Note: Run experiment_save_teachers.py beforehand to generate .hdf5 files.
"""
path = hparams["tea_infer_dir"] + "/tea_infer_{}batch.hdf5".format(
hparams["batch_size"]
)
f = h5py.File(path, "r")
train_dict = f["train"]
valid_dict = f["valid"]
test_dict = f["test"]
return train_dict, valid_dict, test_dict
def st_load(hparams, asr_brain):
"""
load pre-trained student model and remove decoder layer.
"""
print("loading pre-trained student model...")
chpt_path = hparams["pretrain_st_dir"] + "/model.ckpt"
weight_dict = torch.load(chpt_path)
# del the decoder layer
key_list = []
for k in weight_dict.keys():
key_list.append(k)
for k in key_list:
if not k.startswith("0"):
del weight_dict[k]
# loading weights
asr_brain.hparams.model.load_state_dict(weight_dict, strict=False)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = data_io_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# load teacher models
train_dict, valid_dict, test_dict = load_teachers(hparams)
asr_brain.train_dict = train_dict
asr_brain.valid_dict = valid_dict
asr_brain.test_dict = test_dict
if hparams["pretrain"]:
# load pre-trained student model except last layer
if hparams["epoch_counter"].current == 0:
st_load(hparams, asr_brain)
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 18,522 | 34.081439 | 81 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/save_teachers.py | #!/usr/bin/env python3
"""Recipe for doing ASR with phoneme targets and joint seq2seq
and CTC loss on the TIMIT dataset following a knowledge distillation scheme as
reported in " Distilling Knowledge from Ensembles of Acoustic Models for Joint
CTC-Attention End-to-End Speech Recognition", Yan Gao et al.
To run this recipe, do the following:
> python experiment.py hyperparams.yaml --data_folder /path/to/TIMIT
Authors
* Yan Gao 2021
* Titouan Parcollet 2021
"""
import sys
import torch
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from tqdm.contrib import tqdm
import h5py
import numpy as np
# Define training procedure
class ASR(sb.Brain):
def __init__(self, tea_modules_list=None, hparams=None, run_opts=None):
super(ASR, self).__init__(
modules=None,
opt_class=None,
hparams=hparams,
run_opts=run_opts,
checkpointer=None,
)
# Initialize teacher parameters
tea_modules_list_ = []
for tea_modules in tea_modules_list:
tea_modules_ = torch.nn.ModuleList(tea_modules)
tea_modules_ = tea_modules_.to(self.device)
tea_modules_list_.append(tea_modules_)
self.tea_modules_list = tea_modules_list_
def compute_forward_tea(self, batch):
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns_bos, _ = batch.phn_encoded_bos
phns, phn_lens = batch.phn_encoded
feats = self.hparams.compute_features(wavs)
feats = self.hparams.normalize(feats, wav_lens)
apply_softmax = torch.nn.Softmax(dim=-1)
# run inference to each teacher model
tea_dict_list = []
for num in range(self.hparams.num_tea):
tea_dict = {}
self.tea_modules_list[num].eval()
with torch.no_grad():
x_tea = tea_enc_list[num](feats)
ctc_logits_tea = tea_ctc_lin_list[num](x_tea)
# output layer for ctc log-probabilities
p_ctc_tea = self.hparams.log_softmax(
ctc_logits_tea / self.hparams.temperature
)
e_in_tea = tea_emb_list[num](phns_bos)
h_tea, _ = tea_dec_list[num](e_in_tea, x_tea, wav_lens)
# output layer for seq2seq log-probabilities
seq_logits_tea = tea_seq_lin_list[num](h_tea)
p_seq_tea = apply_softmax(
seq_logits_tea / self.hparams.temperature
)
# WER from output layer of CTC
sequence_ctc = sb.decoders.ctc_greedy_decode(
p_ctc_tea, wav_lens, blank_id=self.hparams.blank_index
)
phns_decode = sb.utils.data_utils.undo_padding(phns, phn_lens)
phns_decode = self.label_encoder.decode_ndim(phns_decode)
sequence_decode = self.label_encoder.decode_ndim(sequence_ctc)
per_stats_ctc = sb.utils.edit_distance.wer_details_for_batch(
batch.id,
phns_decode,
sequence_decode,
compute_alignments=False,
)
wer_ctc_tea = []
for item in per_stats_ctc:
wer_ctc_tea.append(item["WER"])
wer_ctc_tea = exclude_wer(wer_ctc_tea)
wer_ctc_tea = np.expand_dims(wer_ctc_tea, axis=0)
# WER from output layer of CE
_, predictions = p_seq_tea.max(dim=-1)
hyps = sb.decoders.seq2seq.batch_filter_seq2seq_output(
predictions, eos_id=self.hparams.eos_index
)
sequence_ce = self.label_encoder.decode_ndim(hyps)
per_stats_ce = sb.utils.edit_distance.wer_details_for_batch(
batch.id, phns_decode, sequence_ce, compute_alignments=False
)
wer_tea = []
for item in per_stats_ce:
wer_tea.append(item["WER"])
wer_tea = exclude_wer(wer_tea)
wer_tea = np.expand_dims(wer_tea, axis=0)
# save the variables into dict
tea_dict["p_ctc_tea"] = p_ctc_tea.cpu().numpy()
tea_dict["p_seq_tea"] = p_seq_tea.cpu().numpy()
tea_dict["wer_ctc_tea"] = wer_ctc_tea
tea_dict["wer_tea"] = wer_tea
tea_dict_list.append(tea_dict)
return tea_dict_list
def def_tea_name(self):
# define teacher variable name
tea_name = []
for tea_num in range(self.hparams.num_tea):
tea = "t{}".format(tea_num)
tea_name.append(tea)
return tea_name
def fit_save(self, train_set, valid_set=None, test_set=None):
data_sets = [train_set, valid_set, test_set]
stage = self.hparams.stage
tea_name = self.def_tea_name()
# define output file name
f_name = "/tea_infer_{}batch.hdf5".format(self.hparams.batch_size)
f = h5py.File(self.hparams.output_folder + f_name, "w")
for num in range(len(stage)):
# create group for each set (train, valid, test).
g_sets = f.create_group(stage[num])
with tqdm(
data_sets[num], initial=self.step, dynamic_ncols=True,
) as t:
for batch in t:
self.step += 1
# create group for each batch
g_batch = g_sets.create_group(str(self.step))
# run inference to each teacher
tea_dict_list = self.compute_forward_tea(batch)
for tea_num in range(self.hparams.num_tea):
# create group for each teacher
g_tea = g_batch.create_group(tea_name[tea_num])
g_tea.create_dataset(
"p_ctc_tea",
data=tea_dict_list[tea_num]["p_ctc_tea"],
)
g_tea.create_dataset(
"p_seq_tea",
data=tea_dict_list[tea_num]["p_seq_tea"],
)
g_tea.create_dataset(
"wer_ctc_tea",
data=tea_dict_list[tea_num]["wer_ctc_tea"][0],
)
g_tea.create_dataset(
"wer_tea", data=tea_dict_list[tea_num]["wer_tea"][0]
)
self.step = 0
f.close()
def exclude_wer(wer):
"""
This function is used to exclude the
wer values which is more than 100.
"""
wer_list = []
for item in wer:
if item > 100:
item = 100
wer_list.append(item)
return np.array(wer_list)
def data_io_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list",
"phn_encoded_list",
"phn_encoded",
"phn_encoded_eos",
"phn_encoded_bos",
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded_list = label_encoder.encode_sequence(phn_list)
yield phn_encoded_list
phn_encoded = torch.LongTensor(phn_encoded_list)
yield phn_encoded
phn_encoded_eos = torch.LongTensor(
label_encoder.append_eos_index(phn_encoded_list)
)
yield phn_encoded_eos
phn_encoded_bos = torch.LongTensor(
label_encoder.prepend_bos_index(phn_encoded_list)
)
yield phn_encoded_bos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.update_from_didataset(train_data, output_key="phn_list")
if (
hparams["blank_index"] != hparams["bos_index"]
or hparams["blank_index"] != hparams["eos_index"]
):
label_encoder.insert_blank(index=hparams["blank_index"])
if hparams["bos_index"] == hparams["eos_index"]:
label_encoder.insert_bos_eos(
bos_label="<eos-bos>",
eos_label="<eos-bos>",
bos_index=hparams["bos_index"],
)
else:
label_encoder.insert_bos_eos(
bos_label="<bos>",
eos_label="<eos>",
bos_index=hparams["bos_index"],
eos_index=hparams["eos_index"],
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"],
)
return train_data, valid_data, test_data, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = data_io_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# initialise teacher model variables
tea_enc_list = []
tea_emb_list = []
tea_dec_list = []
tea_ctc_lin_list = []
tea_seq_lin_list = []
for i in range(hparams["num_tea"]):
exec("tea_enc_list.append(hparams['tea{}_enc'])".format(i))
exec("tea_emb_list.append(hparams['tea{}_emb'])".format(i))
exec("tea_dec_list.append(hparams['tea{}_dec'])".format(i))
exec("tea_ctc_lin_list.append(hparams['tea{}_ctc_lin'])".format(i))
exec("tea_seq_lin_list.append(hparams['tea{}_seq_lin'])".format(i))
# create ModuleList
for i in range(hparams["num_tea"]):
exec(
"tea{}_modules = torch.nn.ModuleList([tea_enc_list[i], tea_emb_list[i], tea_dec_list[i], tea_ctc_lin_list[i], tea_seq_lin_list[i]])".format(
i
)
) # i denotes the index of teacher models
tea_modules_list = []
for i in range(hparams["num_tea"]):
exec("tea_modules_list.append(tea{}_modules)".format(i))
# Trainer initialization
asr_brain = ASR(
tea_modules_list=tea_modules_list, hparams=hparams, run_opts=run_opts
)
asr_brain.label_encoder = label_encoder
# load pre-trained weights of teacher models
with open(hparams["tea_models_dir"], "r") as f:
enter_token = "\n"
for i, path in enumerate(f.readlines()):
exec(
"tea{}_modules.load_state_dict(torch.load(path.strip(enter_token)))".format(
i
)
)
# make dataloaders
train_set = sb.dataio.dataloader.make_dataloader(
train_data, **hparams["train_dataloader_opts"]
)
valid_set = sb.dataio.dataloader.make_dataloader(
valid_data, **hparams["valid_dataloader_opts"]
)
test_set = sb.dataio.dataloader.make_dataloader(
test_data, **hparams["test_dataloader_opts"]
)
# run inference and save results
asr_brain.fit_save(train_set, valid_set, test_set)
| 14,025 | 34.329975 | 152 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/seq2seq_knowledge_distillation/train_teacher.py | #!/usr/bin/env python3
"""Recipe for doing ASR with phoneme targets and joint seq2seq
and CTC loss on the TIMIT dataset following a knowledge distillation scheme as
reported in " Distilling Knowledge from Ensembles of Acoustic Models for Joint
CTC-Attention End-to-End Speech Recognition", Yan Gao et al.
To run this recipe, do the following:
> python experiment.py hyperparams.yaml --data_folder /path/to/TIMIT
Authors
* Yan Gao 2021
* Titouan Parcollet 2021
"""
import os
import sys
import torch
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns_bos, _ = batch.phn_encoded_bos
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
phns_bos = torch.cat([phns_bos, phns_bos])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
e_in = self.modules.emb(phns_bos)
h, _ = self.modules.dec(e_in, x, wav_lens)
# output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
if stage == sb.Stage.VALID:
hyps, scores = self.hparams.greedy_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
elif stage == sb.Stage.TEST:
hyps, scores = self.hparams.beam_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
return p_ctc, p_seq, wav_lens
def compute_objectives(self, predictions, batch, stage):
if stage == sb.Stage.TRAIN:
p_ctc, p_seq, wav_lens = predictions
else:
p_ctc, p_seq, wav_lens, hyps = predictions
ids = batch.id
phns_eos, phn_lens_eos = batch.phn_encoded_eos
phns, phn_lens = batch.phn_encoded
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
phns_eos = torch.cat([phns_eos, phns_eos], dim=0)
phn_lens_eos = torch.cat([phn_lens_eos, phn_lens_eos], dim=0)
loss_ctc = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens)
loss_seq = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
# Record losses for posterity
if stage != sb.Stage.TRAIN:
self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens)
self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens)
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim,
)
return loss
def fit_batch(self, batch):
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
self.ctc_metrics = self.hparams.ctc_stats()
self.seq_metrics = self.hparams.seq_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(per)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={
"loss": stage_loss,
"ctc_loss": self.ctc_metrics.summarize("average"),
"seq_loss": self.seq_metrics.summarize("average"),
"PER": per,
},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("CTC loss stats:\n")
self.ctc_metrics.write_stats(w)
w.write("\nseq2seq loss stats:\n")
self.seq_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"CTC, seq2seq, and PER stats written to file",
self.hparams.wer_file,
)
def data_io_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list",
"phn_encoded_list",
"phn_encoded",
"phn_encoded_eos",
"phn_encoded_bos",
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded_list = label_encoder.encode_sequence(phn_list)
yield phn_encoded_list
phn_encoded = torch.LongTensor(phn_encoded_list)
yield phn_encoded
phn_encoded_eos = torch.LongTensor(
label_encoder.append_eos_index(phn_encoded_list)
)
yield phn_encoded_eos
phn_encoded_bos = torch.LongTensor(
label_encoder.prepend_bos_index(phn_encoded_list)
)
yield phn_encoded_bos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder
label_encoder_file = os.path.join(
hparams["save_folder"], "label_encoder.txt"
)
if os.path.exists(label_encoder_file):
label_encoder.load(label_encoder_file)
else:
label_encoder.update_from_didataset(train_data, output_key="phn_list")
if (
hparams["blank_index"] != hparams["bos_index"]
or hparams["blank_index"] != hparams["eos_index"]
):
label_encoder.insert_blank(index=hparams["blank_index"])
if hparams["bos_index"] == hparams["eos_index"]:
label_encoder.insert_bos_eos(
bos_label="<eos-bos>",
eos_label="<eos-bos>",
bos_index=hparams["bos_index"],
)
else:
label_encoder.insert_bos_eos(
bos_label="<bos>",
eos_label="<eos>",
bos_index=hparams["bos_index"],
eos_index=hparams["eos_index"],
)
label_encoder.save(
os.path.join(hparams["save_folder"], "label_encoder.txt")
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"],
)
return train_data, valid_data, test_data, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = data_io_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 11,903 | 34.323442 | 80 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/seq2seq/train_with_wav2vec2.py | #!/usr/bin/env python3
"""Recipe for training a phoneme recognizer on TIMIT.
The system relies on an encoder, a decoder, and attention mechanisms between them.
Training is done with NLL. CTC loss is also added on the top of the encoder.
Greedy search is using for validation, while beamsearch is used at test time to
improve the system performance.
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/TIMIT
Authors
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
* Abdel Heba 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the phoneme probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns_bos, _ = batch.phn_encoded_bos
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
e_in = self.modules.emb(phns_bos)
h, _ = self.modules.dec(e_in, x, wav_lens)
# output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
if stage == sb.Stage.VALID:
hyps, scores = self.hparams.greedy_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
elif stage == sb.Stage.TEST:
hyps, scores = self.hparams.beam_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
return p_ctc, p_seq, wav_lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the NLL loss."
if stage == sb.Stage.TRAIN:
p_ctc, p_seq, wav_lens = predictions
else:
p_ctc, p_seq, wav_lens, hyps = predictions
ids = batch.id
phns_eos, phn_lens_eos = batch.phn_encoded_eos
phns, phn_lens = batch.phn_encoded
loss_ctc = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens)
loss_seq = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
# Record losses for posterity
if stage != sb.Stage.TRAIN:
self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens)
self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens_eos)
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim,
)
return loss
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"Gets called when a stage (either training, validation, test) starts."
self.ctc_metrics = self.hparams.ctc_stats()
self.seq_metrics = self.hparams.seq_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam(per)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
per
)
sb.nnet.schedulers.update_learning_rate(
self.adam_optimizer, new_lr_adam
)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_adam": old_lr_adam,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats={"loss": self.train_loss},
valid_stats={
"loss": stage_loss,
"ctc_loss": self.ctc_metrics.summarize("average"),
"seq_loss": self.seq_metrics.summarize("average"),
"PER": per,
},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("CTC loss stats:\n")
self.ctc_metrics.write_stats(w)
w.write("\nseq2seq loss stats:\n")
self.seq_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"CTC, seq2seq, and PER stats written to file",
self.hparams.wer_file,
)
def fit_batch(self, batch):
"""Fit one batch, override to do multiple updates.
The default implementation depends on a few methods being defined
with a particular behavior:
* ``compute_forward()``
* ``compute_objectives()``
Also depends on having optimizers passed at initialization.
Arguments
---------
batch : list of torch.Tensors
Batch of data to use for training. Default implementation assumes
this batch has two elements: inputs and targets.
Returns
-------
detached loss
"""
# Managing automatic mixed precision
if self.auto_mix_prec:
self.wav2vec_optimizer.zero_grad()
self.adam_optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.wav2vec_optimizer)
self.scaler.unscale_(self.adam_optimizer)
if self.check_gradients(loss):
self.scaler.step(self.wav2vec_optimizer)
self.scaler.step(self.adam_optimizer)
self.scaler.update()
else:
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec_optimizer.step()
self.adam_optimizer.step()
self.wav2vec_optimizer.zero_grad()
self.adam_optimizer.zero_grad()
return loss.detach().cpu()
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.adam_optimizer = self.hparams.adam_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.checkpointer.add_recoverable("adam_opt", self.adam_optimizer)
def zero_grad(self, set_to_none=False):
self.wav2vec_optimizer.zero_grad(set_to_none)
self.adam_optimizer.zero_grad(set_to_none)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list",
"phn_encoded_list",
"phn_encoded",
"phn_encoded_eos",
"phn_encoded_bos",
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded_list = label_encoder.encode_sequence(phn_list)
yield phn_encoded_list
phn_encoded = torch.LongTensor(phn_encoded_list)
yield phn_encoded
phn_encoded_eos = torch.LongTensor(
label_encoder.append_eos_index(phn_encoded_list)
)
yield phn_encoded_eos
phn_encoded_bos = torch.LongTensor(
label_encoder.prepend_bos_index(phn_encoded_list)
)
yield phn_encoded_bos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
special_labels = {
"bos_label": hparams["bos_index"],
"eos_label": hparams["eos_index"],
"blank_label": hparams["blank_index"],
}
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="phn_list",
special_labels=special_labels,
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"],
)
return train_data, valid_data, test_data, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
"uppercase": hparams["uppercase"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 13,829 | 33.575 | 83 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/seq2seq/train.py | #!/usr/bin/env python3
"""Recipe for training a phoneme recognizer on TIMIT.
The system relies on an encoder, a decoder, and attention mechanisms between them.
Training is done with NLL. CTC loss is also added on the top of the encoder.
Greedy search is using for validation, while beamsearch is used at test time to
improve the system performance.
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/TIMIT
Authors
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
* Abdel Heba 2020
* Andreas Nautsch 2021
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.dataloader import SaveableDataLoader
from speechbrain.dataio.sampler import DynamicBatchSampler
from speechbrain.dataio.batch import PaddedBatch
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the phoneme probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
phns_bos, _ = batch.phn_encoded_bos
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
phns_bos = torch.cat([phns_bos, phns_bos])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
e_in = self.modules.emb(phns_bos)
h, _ = self.modules.dec(e_in, x, wav_lens)
# output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
if stage == sb.Stage.VALID:
hyps, scores = self.hparams.greedy_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
elif stage == sb.Stage.TEST:
hyps, scores = self.hparams.beam_searcher(x, wav_lens)
return p_ctc, p_seq, wav_lens, hyps
return p_ctc, p_seq, wav_lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the NLL loss."
if stage == sb.Stage.TRAIN:
p_ctc, p_seq, wav_lens = predictions
else:
p_ctc, p_seq, wav_lens, hyps = predictions
ids = batch.id
phns_eos, phn_lens_eos = batch.phn_encoded_eos
phns, phn_lens = batch.phn_encoded
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
phns = torch.cat([phns, phns], dim=0)
phn_lens = torch.cat([phn_lens, phn_lens], dim=0)
phns_eos = torch.cat([phns_eos, phns_eos], dim=0)
phn_lens_eos = torch.cat([phn_lens_eos, phn_lens_eos], dim=0)
loss_ctc = self.hparams.ctc_cost(p_ctc, phns, wav_lens, phn_lens)
loss_seq = self.hparams.seq_cost(p_seq, phns_eos, phn_lens_eos)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
# Record losses for posterity
if stage != sb.Stage.TRAIN:
self.ctc_metrics.append(ids, p_ctc, phns, wav_lens, phn_lens)
self.seq_metrics.append(ids, p_seq, phns_eos, phn_lens_eos)
self.per_metrics.append(
ids, hyps, phns, None, phn_lens, self.label_encoder.decode_ndim,
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"Gets called when a stage (either training, validation, test) starts."
self.ctc_metrics = self.hparams.ctc_stats()
self.seq_metrics = self.hparams.seq_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(per)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={
"loss": stage_loss,
"ctc_loss": self.ctc_metrics.summarize("average"),
"seq_loss": self.seq_metrics.summarize("average"),
"PER": per,
},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("CTC loss stats:\n")
self.ctc_metrics.write_stats(w)
w.write("\nseq2seq loss stats:\n")
self.seq_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print(
"CTC, seq2seq, and PER stats written to file",
self.hparams.wer_file,
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides(
"phn_list",
"phn_encoded_list",
"phn_encoded",
"phn_encoded_eos",
"phn_encoded_bos",
)
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded_list = label_encoder.encode_sequence(phn_list)
yield phn_encoded_list
phn_encoded = torch.LongTensor(phn_encoded_list)
yield phn_encoded
phn_encoded_eos = torch.LongTensor(
label_encoder.append_eos_index(phn_encoded_list)
)
yield phn_encoded_eos
phn_encoded_bos = torch.LongTensor(
label_encoder.prepend_bos_index(phn_encoded_list)
)
yield phn_encoded_bos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
special_labels = {
"bos_label": hparams["bos_index"],
"eos_label": hparams["eos_index"],
"blank_label": hparams["blank_index"],
}
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="phn_list",
special_labels=special_labels,
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "phn_encoded", "phn_encoded_eos", "phn_encoded_bos"],
)
# Support for dynamic batching
if hparams["dynamic_batching"]:
dynamic_hparams = hparams["dynamic_batch_sampler"]
hop_size = dynamic_hparams["feats_hop_size"]
batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=dynamic_hparams["num_buckets"],
length_func=lambda x: x["duration"] * (1 / hop_size),
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
train_data = SaveableDataLoader(
train_data, batch_sampler=batch_sampler, collate_fn=PaddedBatch
)
return train_data, valid_data, test_data, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
"uppercase": hparams["uppercase"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 12,912 | 34.869444 | 83 | py |
speechbrain | speechbrain-main/recipes/TIMIT/ASR/CTC/train.py | #!/usr/bin/env python3
"""Recipe for training a phoneme recognizer on TIMIT.
The system relies on a model trained with CTC.
Greedy search is using for validation, while beamsearch
is used at test time to improve the system performance.
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/TIMIT
Authors
* Mirco Ravanelli 2020
* Peter Plantinga 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class ASR_Brain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the phoneme probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
# Adding optional augmentation when specified:
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
out = self.modules.model(feats)
out = self.modules.output(out)
pout = self.hparams.log_softmax(out)
return pout, wav_lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CTC loss."
pout, pout_lens = predictions
phns, phn_lens = batch.phn_encoded
if stage == sb.Stage.TRAIN and hasattr(self.hparams, "env_corrupt"):
phns = torch.cat([phns, phns], dim=0)
phn_lens = torch.cat([phn_lens, phn_lens], dim=0)
loss = self.hparams.compute_cost(pout, phns, pout_lens, phn_lens)
self.ctc_metrics.append(batch.id, pout, phns, pout_lens, phn_lens)
if stage != sb.Stage.TRAIN:
sequence = sb.decoders.ctc_greedy_decode(
pout, pout_lens, blank_id=self.hparams.blank_index
)
self.per_metrics.append(
ids=batch.id,
predict=sequence,
target=phns,
target_len=phn_lens,
ind2lab=self.label_encoder.decode_ndim,
)
return loss
def on_stage_start(self, stage, epoch):
"Gets called when a stage (either training, validation, test) starts."
self.ctc_metrics = self.hparams.ctc_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(per)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={"loss": stage_loss, "PER": per},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.wer_file, "w") as w:
w.write("CTC loss stats:\n")
self.ctc_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print("CTC and PER stats written to ", self.hparams.wer_file)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-gpu dpp support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="phn_list",
special_labels={"blank_label": hparams["blank_index"]},
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data, test_data, label_encoder
# Begin Recipe!
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"skip_prep": hparams["skip_prep"],
"uppercase": hparams["uppercase"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Trainer initialization
asr_brain = ASR_Brain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# Training/validation loop
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.evaluate(
test_data,
min_key="PER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 9,074 | 33.637405 | 80 | py |
speechbrain | speechbrain-main/recipes/TIMIT/Alignment/train.py | #!/usr/bin/env python3
"""Recipe for training a HMM-DNN alignment system on the TIMIT dataset.
The system is trained can be trained with Viterbi, forward, or CTC loss.
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/TIMIT
Authors
* Elena Rastorgueva 2020
* Mirco Ravanelli 2020
* Peter Plantinga 2020
"""
import os
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
# Define training procedure
class AlignBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Computations from the waveform to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
# Adding augmentation when specified:
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
if hasattr(self.hparams, "normalize"):
feats = self.modules.normalize(feats, wav_lens)
out = self.modules.model(feats)
out = self.modules.output(out)
out = out - out.mean(1).unsqueeze(1)
pout = self.hparams.log_softmax(out)
return pout, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss with the specified alignment algorithm"""
pout, pout_lens = predictions
ids = batch.id
phns, phn_lens = batch.phn_encoded
phn_ends, _ = batch.phn_ends
if stage == sb.Stage.TRAIN and hasattr(self.modules, "env_corrupt"):
phns = torch.cat([phns, phns], dim=0)
phn_lens = torch.cat([phn_lens, phn_lens], dim=0)
phns, phn_lens = phns.to(self.device), phn_lens.to(self.device)
phns_orig = sb.utils.data_utils.undo_padding(phns, phn_lens)
phns = self.hparams.aligner.expand_phns_by_states_per_phoneme(
phns, phn_lens
)
phns = phns.int()
if self.training_type == "forward":
forward_scores = self.hparams.aligner(
pout, pout_lens, phns, phn_lens, "forward"
)
loss = -forward_scores
elif self.training_type == "ctc":
loss = self.hparams.compute_cost_ctc(
pout, phns, pout_lens, phn_lens
)
elif self.training_type == "viterbi":
prev_alignments = self.hparams.aligner.get_prev_alignments(
ids, pout, pout_lens, phns, phn_lens
)
prev_alignments = prev_alignments.to(self.hparams.device)
loss = self.hparams.compute_cost_nll(pout, prev_alignments)
viterbi_scores, alignments = self.hparams.aligner(
pout, pout_lens, phns, phn_lens, "viterbi"
)
if self.training_type in ["viterbi", "forward"]:
self.hparams.aligner.store_alignments(ids, alignments)
if stage != sb.Stage.TRAIN:
self.accuracy_metrics.append(ids, alignments, phn_ends, phns_orig)
return loss
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.training_type = self.hparams.init_training_type
if stage != sb.Stage.TRAIN:
self.accuracy_metrics = self.hparams.accuracy_stats()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
if hasattr(self.hparams, "switch_training_type"):
if not hasattr(self.hparams, "switch_training_epoch"):
raise ValueError(
"Please specify `switch_training_epoch` in `params`"
)
if (
self.hparams.epoch_counter.current
== self.hparams.switch_training_epoch
) and stage == sb.Stage.VALID:
self.training_type = self.hparams.switch_training_type
print("Switching to training type", self.training_type)
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
acc = self.accuracy_metrics.summarize("average")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(acc)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={"loss": stage_loss, "accuracy": acc},
)
self.checkpointer.save_and_keep_only(
meta={"accuracy": acc}, max_keys=["accuracy"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "accuracy": acc},
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.TextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Define end sample pipeline
# (end sample is used to retrieve the golden alignment )
@sb.utils.data_pipeline.takes("ground_truth_phn_ends")
@sb.utils.data_pipeline.provides("phn_ends")
def phn_ends_pipeline(ground_truth_phn_ends):
phn_ends = ground_truth_phn_ends.strip().split()
phn_ends = [int(i) for i in phn_ends]
phn_ends = torch.Tensor(phn_ends)
return phn_ends
sb.dataio.dataset.add_dynamic_item(datasets, phn_ends_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder
label_encoder_file = os.path.join(
hparams["save_folder"], "label_encoder.txt"
)
if os.path.exists(label_encoder_file):
label_encoder.load(label_encoder_file)
else:
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.save(
os.path.join(hparams["save_folder"], "label_encoder.txt")
)
# 6. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "phn_encoded", "phn_ends"]
)
return train_data, valid_data, test_data, label_encoder
# Begin Recipe!
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Dataset prep (parsing TIMIT and annotation into csv files)
from timit_prepare import prepare_timit # noqa
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_timit,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"phn_set": hparams["phn_set"],
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Trainer initialization
align_brain = AlignBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
align_brain.label_encoder = label_encoder
# Training/validation loop
print("Starting training type:", hparams["init_training_type"])
align_brain.fit(
align_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Test
align_brain.evaluate(
test_data,
max_key="accuracy",
test_loader_kwargs=hparams["dataloader_options"],
)
| 10,757 | 34.272131 | 83 | py |
speechbrain | speechbrain-main/recipes/fluent-speech-commands/direct/train.py | #!/usr/bin/env/python3
"""
Recipe for "direct" (speech -> semantics) SLU with ASR-based transfer learning.
We encode input waveforms into features using a model trained on LibriSpeech,
then feed the features into a seq2seq model to map them to semantics.
(Adapted from the LibriSpeech seq2seq ASR recipe written by Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, and Peter Plantinga.)
Run using:
> python train.py hparams/train.yaml
Authors
* Loren Lugosch 2020
* Mirco Ravanelli 2020
"""
import sys
import torch
import speechbrain as sb
import logging
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, tokens_bos_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, wav_lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
wavs_aug_tot.append(wavs_aug)
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
wav_lens = torch.cat([wav_lens] * self.n_augment)
tokens_bos = torch.cat([tokens_bos] * self.n_augment)
# ASR encoder forward pass
with torch.no_grad():
ASR_encoder_out = self.hparams.asr_model.encode_batch(
wavs.detach(), wav_lens
)
# SLU forward pass
encoder_out = self.hparams.slu_enc(ASR_encoder_out)
e_in = self.hparams.output_emb(tokens_bos)
h, _ = self.hparams.dec(e_in, encoder_out, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
if stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos] * self.n_augment, dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens] * self.n_augment, dim=0
)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# (No ctc loss)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]).replace("|", ","))
print(" ".join(target_semantics[i]).replace("|", ","))
print("")
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_valid"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(semantics):
yield semantics
tokens_list = tokenizer.encode_as_ids(semantics)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "semantics", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data, tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep
from prepare import prepare_FSC # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_FSC,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(train_set, valid_set, test_set, tokenizer,) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"])
| 12,070 | 33.686782 | 125 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/common_voice_prepare.py | """
Data preparation.
Download: https://voice.mozilla.org/en/datasets
Author
------
Titouan Parcollet
Luca Della Libera 2022
Pooneh Mousavi 2022
"""
import os
import csv
import re
import logging
import torchaudio
import unicodedata
from tqdm.contrib import tzip
logger = logging.getLogger(__name__)
def prepare_common_voice(
data_folder,
save_folder,
train_tsv_file=None,
dev_tsv_file=None,
test_tsv_file=None,
accented_letters=False,
language="en",
skip_prep=False,
):
"""
Prepares the csv files for the Mozilla Common Voice dataset.
Download: https://voice.mozilla.org/en/datasets
Arguments
---------
data_folder : str
Path to the folder where the original Common Voice dataset is stored.
This path should include the lang: /datasets/CommonVoice/<language>/
save_folder : str
The directory where to store the csv files.
train_tsv_file : str, optional
Path to the Train Common Voice .tsv file (cs)
dev_tsv_file : str, optional
Path to the Dev Common Voice .tsv file (cs)
test_tsv_file : str, optional
Path to the Test Common Voice .tsv file (cs)
accented_letters : bool, optional
Defines if accented letters will be kept as individual letters or
transformed to the closest non-accented letters.
language: str
Specify the language for text normalization.
skip_prep: bool
If True, skip data preparation.
Example
-------
>>> from recipes.CommonVoice.common_voice_prepare import prepare_common_voice
>>> data_folder = '/datasets/CommonVoice/en'
>>> save_folder = 'exp/CommonVoice_exp'
>>> train_tsv_file = '/datasets/CommonVoice/en/train.tsv'
>>> dev_tsv_file = '/datasets/CommonVoice/en/dev.tsv'
>>> test_tsv_file = '/datasets/CommonVoice/en/test.tsv'
>>> accented_letters = False
>>> duration_threshold = 10
>>> prepare_common_voice( \
data_folder, \
save_folder, \
train_tsv_file, \
dev_tsv_file, \
test_tsv_file, \
accented_letters, \
language="en" \
)
"""
if skip_prep:
return
# If not specified point toward standard location w.r.t CommonVoice tree
if train_tsv_file is None:
train_tsv_file = data_folder + "/train.tsv"
else:
train_tsv_file = train_tsv_file
if dev_tsv_file is None:
dev_tsv_file = data_folder + "/dev.tsv"
else:
dev_tsv_file = dev_tsv_file
if test_tsv_file is None:
test_tsv_file = data_folder + "/test.tsv"
else:
test_tsv_file = test_tsv_file
# Setting the save folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Setting ouput files
save_csv_train = save_folder + "/train.csv"
save_csv_dev = save_folder + "/dev.csv"
save_csv_test = save_folder + "/test.csv"
# If csv already exists, we skip the data preparation
if skip(save_csv_train, save_csv_dev, save_csv_test):
msg = "%s already exists, skipping data preparation!" % (save_csv_train)
logger.info(msg)
msg = "%s already exists, skipping data preparation!" % (save_csv_dev)
logger.info(msg)
msg = "%s already exists, skipping data preparation!" % (save_csv_test)
logger.info(msg)
return
# Additional checks to make sure the data folder contains Common Voice
check_commonvoice_folders(data_folder)
# Creating csv files for {train, dev, test} data
file_pairs = zip(
[train_tsv_file, dev_tsv_file, test_tsv_file],
[save_csv_train, save_csv_dev, save_csv_test],
)
for tsv_file, save_csv in file_pairs:
create_csv(
tsv_file, save_csv, data_folder, accented_letters, language,
)
def skip(save_csv_train, save_csv_dev, save_csv_test):
"""
Detects if the Common Voice data preparation has been already done.
If the preparation has been done, we can skip it.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
# Checking folders and save options
skip = False
if (
os.path.isfile(save_csv_train)
and os.path.isfile(save_csv_dev)
and os.path.isfile(save_csv_test)
):
skip = True
return skip
def create_csv(
orig_tsv_file, csv_file, data_folder, accented_letters=False, language="en"
):
"""
Creates the csv file given a list of wav files.
Arguments
---------
orig_tsv_file : str
Path to the Common Voice tsv file (standard file).
data_folder : str
Path of the CommonVoice dataset.
accented_letters : bool, optional
Defines if accented letters will be kept as individual letters or
transformed to the closest non-accented letters.
Returns
-------
None
"""
# Check if the given files exists
if not os.path.isfile(orig_tsv_file):
msg = "\t%s doesn't exist, verify your dataset!" % (orig_tsv_file)
logger.info(msg)
raise FileNotFoundError(msg)
# We load and skip the header
loaded_csv = open(orig_tsv_file, "r").readlines()[1:]
nb_samples = str(len(loaded_csv))
msg = "Preparing CSV files for %s samples ..." % (str(nb_samples))
logger.info(msg)
# Adding some Prints
msg = "Creating csv lists in %s ..." % (csv_file)
logger.info(msg)
csv_lines = [["ID", "duration", "wav", "spk_id", "wrd"]]
# Start processing lines
total_duration = 0.0
for line in tzip(loaded_csv):
line = line[0]
# Path is at indice 1 in Common Voice tsv files. And .mp3 files
# are located in datasets/lang/clips/
mp3_path = data_folder + "/clips/" + line.split("\t")[1]
file_name = mp3_path.split(".")[-2].split("/")[-1]
spk_id = line.split("\t")[0]
snt_id = file_name
# Setting torchaudio backend to sox-io (needed to read mp3 files)
if torchaudio.get_audio_backend() != "sox_io":
logger.warning("This recipe needs the sox-io backend of torchaudio")
logger.warning("The torchaudio backend is changed to sox_io")
torchaudio.set_audio_backend("sox_io")
# Reading the signal (to retrieve duration in seconds)
if os.path.isfile(mp3_path):
info = torchaudio.info(mp3_path)
else:
msg = "\tError loading: %s" % (str(len(file_name)))
logger.info(msg)
continue
duration = info.num_frames / info.sample_rate
total_duration += duration
# Getting transcript
words = line.split("\t")[2]
# Unicode Normalization
words = unicode_normalisation(words)
# !! Language specific cleaning !!
words = language_specific_preprocess(language, words)
# Remove accents if specified
if not accented_letters:
words = strip_accents(words)
words = words.replace("'", " ")
words = words.replace("’", " ")
# Remove multiple spaces
words = re.sub(" +", " ", words)
# Remove spaces at the beginning and the end of the sentence
words = words.lstrip().rstrip()
# Getting chars
chars = words.replace(" ", "_")
chars = " ".join([char for char in chars][:])
# Remove too short sentences (or empty):
if language in ["ja", "ch"]:
if len(chars) < 3:
continue
else:
if len(words.split(" ")) < 3:
continue
# Composition of the csv_line
csv_line = [snt_id, str(duration), mp3_path, spk_id, str(words)]
# Adding this line to the csv_lines list
csv_lines.append(csv_line)
# Writing the csv lines
with open(csv_file, mode="w", encoding="utf-8") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_lines:
csv_writer.writerow(line)
# Final prints
msg = "%s successfully created!" % (csv_file)
logger.info(msg)
msg = "Number of samples: %s " % (str(len(loaded_csv)))
logger.info(msg)
msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2)))
logger.info(msg)
def language_specific_preprocess(language, words):
# !! Language specific cleaning !!
# Important: feel free to specify the text normalization
# corresponding to your alphabet.
if language in ["en", "fr", "it", "rw"]:
words = re.sub(
"[^’'A-Za-z0-9À-ÖØ-öø-ÿЀ-ӿéæœâçèàûî]+", " ", words
).upper()
if language == "de":
# this replacement helps preserve the case of ß
# (and helps retain solitary occurrences of SS)
# since python's upper() converts ß to SS.
words = words.replace("ß", "0000ß0000")
words = re.sub("[^’'A-Za-z0-9öÖäÄüÜß]+", " ", words).upper()
words = words.replace("'", " ")
words = words.replace("’", " ")
words = words.replace(
"0000SS0000", "ß"
) # replace 0000SS0000 back to ß as its initial presence in the corpus
if language == "fr":
# Replace J'y D'hui etc by J_ D_hui
words = words.replace("'", " ")
words = words.replace("’", " ")
elif language == "ar":
HAMZA = "\u0621"
ALEF_MADDA = "\u0622"
ALEF_HAMZA_ABOVE = "\u0623"
letters = (
"ابتةثجحخدذرزژشسصضطظعغفقكلمنهويىءآأؤإئ"
+ HAMZA
+ ALEF_MADDA
+ ALEF_HAMZA_ABOVE
)
words = re.sub("[^" + letters + " ]+", "", words).upper()
elif language == "fa":
HAMZA = "\u0621"
ALEF_MADDA = "\u0622"
ALEF_HAMZA_ABOVE = "\u0623"
letters = (
"ابپتةثجحخچدذرزژسشصضطظعغفقگکلمنهویىءآأؤإئ"
+ HAMZA
+ ALEF_MADDA
+ ALEF_HAMZA_ABOVE
)
words = re.sub("[^" + letters + " ]+", "", words).upper()
elif language == "ga-IE":
# Irish lower() is complicated, but upper() is nondeterministic, so use lowercase
def pfxuc(a):
return len(a) >= 2 and a[0] in "tn" and a[1] in "AEIOUÁÉÍÓÚ"
def galc(w):
return w.lower() if not pfxuc(w) else w[0] + "-" + w[1:].lower()
words = re.sub("[^-A-Za-z'ÁÉÍÓÚáéíóú]+", " ", words)
words = " ".join(map(galc, words.split(" ")))
elif language == "es":
# Fix the following error in dataset large:
# KeyError: 'The item En noviembre lanzaron Queen Elizabeth , coproducida por Foreign Noi$e . requires replacements which were not supplied.'
words = words.replace("$", "s")
return words
def check_commonvoice_folders(data_folder):
"""
Check if the data folder actually contains the Common Voice dataset.
If not, raises an error.
Returns
-------
None
Raises
------
FileNotFoundError
If data folder doesn't contain Common Voice dataset.
"""
files_str = "/clips"
# Checking clips
if not os.path.exists(data_folder + files_str):
err_msg = (
"the folder %s does not exist (it is expected in "
"the Common Voice dataset)" % (data_folder + files_str)
)
raise FileNotFoundError(err_msg)
def unicode_normalisation(text):
return str(text)
def strip_accents(text):
text = (
unicodedata.normalize("NFD", text)
.encode("ascii", "ignore")
.decode("utf-8")
)
return str(text)
| 11,693 | 29.854881 | 149 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/self-supervised-learning/wav2vec2/train_hf_wav2vec2.py | #!/usr/bin/env python3
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
"""Recipe for pretraining a wav2vec 2.0 model on CommonVoice EN. Note that it can be
trained with ANY dataset as long as you provide the correct JSON or CSV file.
The HuggingFace implementation of the wav2vec 2.0 pretraining is used and wrapped
to fit properly the SpeechBrain framework. Models have been compared to the original
fairseq implementation with success. The Transformers HuggingFace library is
required:
> pip install extra_requirements.txt
Hence the process is the following:
1. Indicate a HuggingFace repository that stores the wav2vec 2.0 config file.
This is necessary to determine the architecture of the model that will be
instantiated.
2. Train it with our wrapper.
3. Save it to be reused as a pretrained encoder within SpeechBrain (or others).
wav2vec 2.0: https://arxiv.org/abs/2006.11477
HuggingFace: https://huggingface.co/transformers/model_doc/wav2vec2.html
To run this recipe, do the following:
> python train.py hparams/hyperparams.yaml
Authors
* Titouan Parcollet 2021
* Yan Gao 2021
"""
logger = logging.getLogger(__name__)
# Define training procedure
class W2VBrain(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the w2v2 loss."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Forward on w2v2 and take the loss.
# It has to be on train mode even for eval. Otherwise it would deactivate
# the loss computation ...
out, mask = self.modules.wav2vec2(wavs, wav_lens)
loss = out.loss
if stage != sb.Stage.TRAIN:
return loss, out, mask
return loss
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
if stage == sb.Stage.TRAIN:
# We don't have to compute anything as the HF model directly returns
# the constrative loss.
loss = predictions
else:
# We compute the accuracy between embeddings with cosing sim.
loss, out, mask_time_indices = predictions
cosine_sim = torch.cosine_similarity(
out.projected_states, out.projected_quantized_states, dim=-1
)
acc = torch.masked_select(
cosine_sim,
mask_time_indices.type(torch.BoolTensor).to(self.device),
).mean()
self.acc_metric.append(acc)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# Here we manage mixed precision
if self.auto_mix_prec:
with torch.cuda.amp.autocast():
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN
)
# normalize the loss by gradient_accumulation step
self.scaler.scale(
loss / self.hparams.gradient_accumulation
).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.scaler.unscale_(self.optimizer)
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
else:
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = []
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["acc"] = sum(self.acc_metric) / len(self.acc_metric)
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"acc": stage_stats["acc"], "epoch": epoch},
max_keys=["acc"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
# Define custom data procedure
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_shorter_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_shorter_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
train_data = train_data.filtered_sorted(
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_shorter_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# defining tokenizer and loading it
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
if info.num_channels > 1:
sig = torch.mean(sig, dim=1)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig"],
)
# 5. If Dynamic Batching is used, we instantiate the needed samplers.
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_data,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Create the datasets objects as well as tokenization and encoding :-D
(
train_data,
valid_data,
test_data,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# Trainer initialization
asr_brain = W2VBrain(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
opt_class=hparams["opt_class"],
checkpointer=hparams["checkpointer"],
)
# Changing the samplers if dynamic batching is activated
train_dataloader_opts = hparams["dataloader_options"]
valid_dataloader_opts = hparams["test_dataloader_options"]
if train_bsampler is not None:
train_dataloader_opts = {
"batch_sampler": train_bsampler,
"num_workers": hparams["num_workers"],
}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Test
asr_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 12,890 | 33.746631 | 84 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/ASR/transducer/train.py | #!/usr/bin/env/python3
"""Recipe for training a Transducer ASR system with librispeech.
The system employs an encoder, a decoder, and an joint network
between them. Decoding is performed with beamsearch coupled with a neural
language model.
To run this recipe, do the following:
> python train.py hparams/train.yaml
With the default hyperparameters, the system employs a CRDNN encoder.
The decoder is based on a standard GRU. Beamsearch coupled with a RNN
language model is used on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
Authors
* Abdel Heba 2020
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
* Peter Plantinga 2020
"""
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from speechbrain.utils.distributed import run_on_main
from speechbrain.utils.data_utils import undo_padding
from speechbrain.tokenizers.SentencePiece import SentencePiece
from hyperpyyaml import load_hyperpyyaml
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_with_bos, token_with_bos_lens = batch.tokens_bos
# wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "augmentation"):
feats = self.modules.augmentation(feats)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_with_bos)
h, _ = self.modules.dec(e_in)
# Joint network
# add labelseq_dim to the encoder tensor: [B,T,H_enc] => [B,T,1,H_enc]
# add timeseq_dim to the decoder tensor: [B,U,H_dec] => [B,1,U,H_dec]
joint = self.modules.Tjoint(x.unsqueeze(2), h.unsqueeze(1))
# Output layer for transducer log-probabilities
logits_transducer = self.modules.transducer_lin(joint)
# Compute outputs
if stage == sb.Stage.TRAIN:
return_CTC = False
return_CE = False
current_epoch = self.hparams.epoch_counter.current
if (
hasattr(self.hparams, "ctc_cost")
and current_epoch <= self.hparams.number_of_ctc_epochs
):
return_CTC = True
# Output layer for ctc log-probabilities
out_ctc = self.modules.enc_lin(x)
p_ctc = self.hparams.log_softmax(out_ctc)
if (
hasattr(self.hparams, "ce_cost")
and current_epoch <= self.hparams.number_of_ce_epochs
):
return_CE = True
# Output layer for ctc log-probabilities
p_ce = self.modules.dec_lin(h)
p_ce = self.hparams.log_softmax(p_ce)
if return_CE and return_CTC:
return p_ctc, p_ce, logits_transducer, wav_lens
elif return_CTC:
return p_ctc, logits_transducer, wav_lens
elif return_CE:
return p_ce, logits_transducer, wav_lens
else:
return logits_transducer, wav_lens
elif stage == sb.Stage.VALID:
best_hyps, scores, _, _ = self.hparams.beam_searcher(x)
return logits_transducer, wav_lens, best_hyps
else:
(
best_hyps,
best_scores,
nbest_hyps,
nbest_scores,
) = self.hparams.beam_searcher(x)
return logits_transducer, wav_lens, best_hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (Transducer+(CTC+NLL)) given predictions and targets."""
ids = batch.id
current_epoch = self.hparams.epoch_counter.current
tokens, token_lens = batch.tokens
tokens_eos, token_eos_lens = batch.tokens_eos
if stage == sb.Stage.TRAIN:
if len(predictions) == 4:
p_ctc, p_ce, logits_transducer, wav_lens = predictions
CTC_loss = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, token_lens
)
CE_loss = self.hparams.ce_cost(
p_ce, tokens_eos, length=token_eos_lens
)
loss_transducer = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
loss = (
self.hparams.ctc_weight * CTC_loss
+ self.hparams.ce_weight * CE_loss
+ (1 - (self.hparams.ctc_weight + self.hparams.ce_weight))
* loss_transducer
)
elif len(predictions) == 3:
# one of the 2 heads (CTC or CE) is still computed
# CTC alive
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_transducer, wav_lens = predictions
CTC_loss = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, token_lens
)
loss_transducer = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
loss = (
self.hparams.ctc_weight * CTC_loss
+ (1 - self.hparams.ctc_weight) * loss_transducer
)
# CE for decoder alive
else:
p_ce, logits_transducer, wav_lens = predictions
CE_loss = self.hparams.ce_cost(
p_ce, tokens_eos, length=token_eos_lens
)
# Transducer loss use logits from RNN-T model.
loss_transducer = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
loss = (
self.hparams.ce_weight * CE_loss
+ (1 - self.hparams.ctc_weight) * loss_transducer
)
else:
logits_transducer, wav_lens = predictions
# Transducer loss use logits from RNN-T model.
loss = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
else:
logits_transducer, wav_lens, predicted_tokens = predictions
# Transducer loss use logits from RNN-T model.
loss = self.hparams.transducer_cost(
logits_transducer, tokens, wav_lens, token_lens
)
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, token_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the test data so it is faster to test
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["blank_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["blank_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 16,161 | 37.028235 | 89 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/ASR/seq2seq/train_with_wav2vec.py | #!/usr/bin/env python3
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
"""Recipe for training a sequence-to-sequence ASR system with CommonVoice.
The system employs a wav2vec2 encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beamsearch.
To run this recipe, do the following:
> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml
With the default hyperparameters, the system employs a pretrained wav2vec2 encoder.
The wav2vec2 model is pretrained following the XSLR French HuggingFace model:
facebook/wav2vec2-large-xlsr-53-french
The decoder is based on a standard GRU and BeamSearch (no LM).
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training languages (all CommonVoice languages), and many
other possible variations.
Authors
* Titouan Parcollet 2020
"""
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, p_seq, wav_lens
else:
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(x, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_seq, wav_lens = predictions
else:
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# Add ctc loss if necessary
if (
stage == sb.Stage.TRAIN
and current_epoch <= self.hparams.number_of_ctc_epochs
):
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
else:
loss = loss_seq
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
if self.auto_mix_prec:
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
self.scaler.scale(loss).backward()
if not self.hparams.wav2vec2.freeze:
self.scaler.unscale_(self.wav2vec_optimizer)
self.scaler.unscale_(self.model_optimizer)
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
self.scaler.step(self.wav2vec_optimizer)
self.scaler.step(self.model_optimizer)
self.scaler.update()
else:
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.step()
self.model_optimizer.step()
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
if not self.hparams.wav2vec2.freeze:
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def zero_grad(self, set_to_none=False):
self.model_optimizer.zero_grad(set_to_none)
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad(set_to_none)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# Create the datasets objects as well as tokenization and encoding :-D
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 15,201 | 35.719807 | 89 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/ASR/seq2seq/train.py | #!/usr/bin/env python3
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
"""Recipe for training a sequence-to-sequence ASR system with CommonVoice.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beamsearch.
To run this recipe, do the following:
> python train.py hparams/train.yaml
With the default hyperparameters, the system employs a CRDNN encoder.
The decoder is based on a standard GRU and BeamSearch (no LM).
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training languages (all CommonVoice languages), and many
other possible variations.
Authors
* Titouan Parcollet 2020
"""
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
## Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, p_seq, wav_lens
else:
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(x, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_seq, wav_lens = predictions
else:
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# Add ctc loss if necessary
if (
stage == sb.Stage.TRAIN
and current_epoch <= self.hparams.number_of_ctc_epochs
):
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
else:
loss = loss_seq
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
if info.num_channels > 1:
sig = torch.mean(sig, dim=1)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# Create the datasets objects as well as tokenization and encoding :-D
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
opt_class=hparams["opt_class"],
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 12,748 | 36.061047 | 89 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/ASR/CTC/train_with_wav2vec.py | #!/usr/bin/env python3
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
"""Recipe for training a sequence-to-sequence ASR system with CommonVoice.
The system employs a wav2vec2 encoder and a CTC decoder.
Decoding is performed with greedy decoding (will be extended to beam search).
To run this recipe, do the following:
> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml
With the default hyperparameters, the system employs a pretrained wav2vec2 encoder.
The wav2vec2 model is pretrained following the model given in the hprams file.
It may be dependent on the language.
The neural network is trained with CTC on sub-word units estimated with
Byte Pairwise Encoding (BPE).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training languages (all CommonVoice languages), and many
other possible variations.
Authors
* Titouan Parcollet 2021
"""
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC) given predictions and targets."""
p_ctc, wav_lens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
if stage != sb.Stage.TRAIN:
# Decode token terms to words
sequence = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
predicted_words = self.tokenizer(sequence, task="decode_from_list")
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
# TOFIX: CTC fine-tuning currently is unstable
# This is certainly due to CTC being done in fp16 instead of fp32
if self.auto_mix_prec:
with torch.cuda.amp.autocast():
with self.no_sync():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_sync(not should_step):
self.scaler.scale(
loss / self.grad_accumulation_factor
).backward()
if should_step:
if not self.hparams.wav2vec2.freeze:
self.scaler.unscale_(self.wav2vec_optimizer)
self.scaler.unscale_(self.model_optimizer)
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
if self.optimizer_step >= self.hparams.warmup_steps:
self.scaler.step(self.wav2vec_optimizer)
self.scaler.step(self.model_optimizer)
self.scaler.update()
self.zero_grad()
self.optimizer_step += 1
else:
# This is mandatory because HF models have a weird behavior with DDP
# on the forward pass
with self.no_sync():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_sync(not should_step):
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
if self.optimizer_step >= self.hparams.warmup_steps:
self.wav2vec_optimizer.step()
self.model_optimizer.step()
self.zero_grad()
self.optimizer_step += 1
self.on_fit_batch_end(batch, outputs, loss, should_step)
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
if not self.hparams.wav2vec2.freeze:
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
# If the wav2vec encoder is unfrozen, we create the optimizer
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def zero_grad(self, set_to_none=False):
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad(set_to_none)
self.model_optimizer.zero_grad(set_to_none)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# Create the datasets objects as well as tokenization and encoding :-D
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 14,594 | 36.51928 | 89 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/ASR/transformer/train.py | #!/usr/bin/env python3
"""Recipe for training a Transformer ASR system with CommonVoice
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with (CTC/Att joint) beamsearch.
To run this recipe, do the following:
> python train.py hparams/transformer.yaml
With the default hyperparameters, the system employs a convolutional frontend (ContextNet) and a transformer.
The decoder is based on a Transformer decoder.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
Authors
* Titouan Parcollet 2021
* Jianyuan Zhong 2020
"""
import sys
import torch
import torchaudio
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.distributed import run_on_main
from speechbrain.utils.data_utils import undo_padding
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
tokens_bos, _ = batch.tokens_bos
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.hparams.normalize(feats, wav_lens, epoch=current_epoch)
# Augmentation
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
src = self.modules.CNN(feats)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index
)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
hyps, _ = self.hparams.beam_searcher(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.beam_searcher(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, predicted_tokens,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(
target_words, task="decode_from_list"
)
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# check if we need to switch optimizer
# if so change the optimizer from Adam to SGD
self.check_and_reset_optimizer()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
# report different epoch stages according current stage
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.stage_one_epochs:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
optimizer = self.optimizer.__class__.__name__
else:
lr = self.hparams.lr_sgd
steps = -1
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def check_and_reset_optimizer(self):
"""reset the optimizer if training enters stage 2"""
current_epoch = self.hparams.epoch_counter.current
if not hasattr(self, "switched"):
self.switched = False
if isinstance(self.optimizer, torch.optim.SGD):
self.switched = True
if self.switched is True:
return
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
self.switched = True
def on_fit_start(self):
"""Gets called at the beginning of ``fit()``, on multiple processes
if ``distributed_count > 0`` and backend is ddp.
Default implementation compiles the jit modules, initializes
optimizers, and loads the latest checkpoint to resume training.
"""
# Run this *after* starting all processes since jit modules cannot be
# pickled.
self._compile_jit()
# Wrap modules with parallel backend after jit
self._wrap_distributed()
# Initialize optimizers after parameters are configured
self.init_optimizers()
# Load latest checkpoint to check to current epoch number
if self.checkpointer is not None:
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
# if the model is resumed from stage two, reinitialize the optimizer
current_epoch = self.hparams.epoch_counter.current
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_valid.txt"
asr_brain.evaluate(
valid_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 16,735 | 35.863436 | 109 | py |
speechbrain | speechbrain-main/recipes/CommonVoice/ASR/transformer/train_with_whisper.py | #!/usr/bin/env python3
"""Recipe for training a whisper-based ASR system with CommonVoice.
The system employs whisper from OpenAI (https://cdn.openai.com/papers/whisper.pdf).
This recipe take the whisper encoder-decoder to fine-tune on.
To run this recipe, do the following:
> python train_with_whisper.py hparams/train_<locale>_hf_whisper.yaml
* Pooneh Mousavi 2022
"""
import sys
import torch
import logging
import torchaudio
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from speechbrain.utils.data_utils import undo_padding
from hyperpyyaml import load_hyperpyyaml
from transformers.models.whisper.tokenization_whisper import LANGUAGES
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
bos_tokens, bos_tokens_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# We compute the padding mask and replace the values with the pad_token_id
# that the Whisper decoder expect to see.
abs_tokens_lens = (bos_tokens_lens * bos_tokens.shape[1]).long()
pad_mask = (
torch.arange(abs_tokens_lens.max(), device=self.device)[None, :]
< abs_tokens_lens[:, None]
)
bos_tokens[~pad_mask] = self.tokenizer.pad_token_id
# Forward encoder + decoder
enc_out, logits, _ = self.modules.whisper(wavs, bos_tokens)
hyps = None
if stage == sb.Stage.VALID:
hyps, _ = self.hparams.valid_greedy_searcher(enc_out, wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.valid_greedy_searcher(enc_out, wav_lens)
return logits, hyps, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss NLL given predictions and targets."""
logits, hyps, wav_lens, = predictions
batch = batch.to(self.device)
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
log_probs = self.hparams.log_softmax(logits)
loss = self.hparams.nll_loss(
log_probs, tokens_eos, length=tokens_eos_lens,
)
if stage != sb.Stage.TRAIN:
tokens, tokens_lens = batch.tokens
# Decode token terms to words
predicted_words = self.tokenizer.batch_decode(
hyps, skip_special_tokens=True
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer.batch_decode(
target_words, skip_special_tokens=True
)
if hasattr(self.hparams, "normalized_transcripts"):
predicted_words = [
self.tokenizer._normalize(text).split(" ")
for text in predicted_words
]
target_words = [
self.tokenizer._normalize(text).split(" ")
for text in target_words
]
else:
predicted_words = [text.split(" ") for text in predicted_words]
target_words = [text.split(" ") for text in target_words]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_whisper, new_lr_whisper = self.hparams.lr_annealing_whisper(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.optimizer, new_lr_whisper
)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr_whisper": old_lr_whisper},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_loader_kwargs"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_loader_kwargs"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode(wrd)
# avoid bos and eos tokens.
tokens_list = tokens_list[1:-1]
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + tokens_list)
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "tokens_list", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from common_voice_prepare import prepare_common_voice # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["locale"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = hparams["whisper"].tokenizer
language = LANGUAGES[hparams["locale"]]
tokenizer.set_prefix_tokens(language, "transcribe", False)
# we need to prepare the tokens for searchers
hparams["valid_greedy_searcher"].set_decoder_input_tokens(
tokenizer.prefix_tokens
)
hparams["valid_greedy_searcher"].set_language_token(
tokenizer.prefix_tokens[1]
)
hparams["test_beam_searcher"].set_decoder_input_tokens(
tokenizer.prefix_tokens
)
hparams["test_beam_searcher"].set_language_token(tokenizer.prefix_tokens[1])
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
opt_class=hparams["whisper_opt_class"],
)
# We load the pretrained whisper model
if "pretrainer" in hparams.keys():
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(asr_brain.device)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for Whisper.
asr_brain.tokenizer = tokenizer
if hparams["test_only"] is False:
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_loader_kwargs"],
valid_loader_kwargs=hparams["valid_loader_kwargs"],
)
# Testing
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_loader_kwargs"],
)
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_valid.txt"
asr_brain.evaluate(
valid_data,
min_key="WER",
test_loader_kwargs=hparams["test_loader_kwargs"],
)
| 11,891 | 34.60479 | 89 | py |
speechbrain | speechbrain-main/recipes/AMI/Diarization/experiment.py | #!/usr/bin/python3
"""This recipe implements diarization system using deep embedding extraction followed by spectral clustering.
To run this recipe:
> python experiment.py hparams/<your_hyperparams_file.yaml>
e.g., python experiment.py hparams/ecapa_tdnn.yaml
Condition: Oracle VAD (speech regions taken from the groundtruth).
Note: There are multiple ways to write this recipe. We iterate over individual recordings.
This approach is less GPU memory demanding and also makes code easy to understand.
Citation: This recipe is based on the following paper,
N. Dawalatabad, M. Ravanelli, F. Grondin, J. Thienpondt, B. Desplanques, H. Na,
"ECAPA-TDNN Embeddings for Speaker Diarization," arXiv:2104.01466, 2021.
Authors
* Nauman Dawalatabad 2020
"""
import os
import sys
import torch
import logging
import pickle
import json
import glob
import shutil
import numpy as np
import speechbrain as sb
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from speechbrain.processing.PLDA_LDA import StatObject_SB
from speechbrain.processing import diarization as diar
from speechbrain.utils.DER import DER
from speechbrain.dataio.dataio import read_audio
from speechbrain.dataio.dataio import read_audio_multichannel
np.random.seed(1234)
# Logger setup
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
try:
import sklearn # noqa F401
except ImportError:
err_msg = "Cannot import optional dependency `scikit-learn` (sklearn) used in this module.\n"
err_msg += "Please follow the below instructions\n"
err_msg += "=============================\n"
err_msg += "Using pip:\n"
err_msg += "pip install scikit-learn\n"
err_msg += "================================ \n"
err_msg += "Using conda:\n"
err_msg += "conda install scikit-learn"
raise ImportError(err_msg)
def compute_embeddings(wavs, lens):
"""Definition of the steps for computation of embeddings from the waveforms."""
with torch.no_grad():
wavs = wavs.to(run_opts["device"])
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, lens)
emb = params["embedding_model"](feats, lens)
emb = params["mean_var_norm_emb"](
emb, torch.ones(emb.shape[0], device=run_opts["device"])
)
return emb
def embedding_computation_loop(split, set_loader, stat_file):
"""Extracts embeddings for a given dataset loader."""
# Note: We use speechbrain.processing.PLDA_LDA.StatObject_SB type to store embeddings.
# Extract embeddings (skip if already done).
if not os.path.isfile(stat_file):
logger.debug("Extracting deep embeddings and diarizing")
embeddings = np.empty(shape=[0, params["emb_dim"]], dtype=np.float64)
modelset = []
segset = []
# Different data may have different statistics.
params["mean_var_norm_emb"].count = 0
for batch in set_loader:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
# Embedding computation.
emb = (
compute_embeddings(wavs, lens)
.contiguous()
.squeeze(1)
.cpu()
.numpy()
)
embeddings = np.concatenate((embeddings, emb), axis=0)
modelset = np.array(modelset, dtype="|O")
segset = np.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0.
s = np.array([None] * embeddings.shape[0])
b = np.array([[1.0]] * embeddings.shape[0])
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.debug("Saving Embeddings...")
stat_obj.save_stat_object(stat_file)
else:
logger.debug("Skipping embedding extraction (as already present).")
logger.debug("Loading previously saved embeddings.")
with open(stat_file, "rb") as in_file:
stat_obj = pickle.load(in_file)
return stat_obj
def prepare_subset_json(full_meta_data, rec_id, out_meta_file):
"""Prepares metadata for a given recording ID.
Arguments
---------
full_meta_data : json
Full meta (json) containing all the recordings
rec_id : str
The recording ID for which meta (json) has to be prepared
out_meta_file : str
Path of the output meta (json) file.
"""
subset = {}
for key in full_meta_data:
k = str(key)
if k.startswith(rec_id):
subset[key] = full_meta_data[key]
with open(out_meta_file, mode="w") as json_f:
json.dump(subset, json_f, indent=2)
def diarize_dataset(full_meta, split_type, n_lambdas, pval, n_neighbors=10):
"""This function diarizes all the recordings in a given dataset. It performs
computation of embedding and clusters them using spectral clustering (or other backends).
The output speaker boundary file is stored in the RTTM format.
"""
# Prepare `spkr_info` only once when Oracle num of speakers is selected.
# spkr_info is essential to obtain number of speakers from groundtruth.
if params["oracle_n_spkrs"] is True:
full_ref_rttm_file = (
params["ref_rttm_dir"] + "/fullref_ami_" + split_type + ".rttm"
)
rttm = diar.read_rttm(full_ref_rttm_file)
spkr_info = list( # noqa F841
filter(lambda x: x.startswith("SPKR-INFO"), rttm)
)
# Get all the recording IDs in this dataset.
all_keys = full_meta.keys()
A = [word.rstrip().split("_")[0] for word in all_keys]
all_rec_ids = list(set(A[1:]))
all_rec_ids.sort()
split = "AMI_" + split_type
i = 1
# Setting eval modality.
params["embedding_model"].eval()
msg = "Diarizing " + split_type + " set"
logger.info(msg)
if len(all_rec_ids) <= 0:
msg = "No recording IDs found! Please check if meta_data json file is properly generated."
logger.error(msg)
sys.exit()
# Diarizing different recordings in a dataset.
for rec_id in tqdm(all_rec_ids):
# This tag will be displayed in the log.
tag = (
"["
+ str(split_type)
+ ": "
+ str(i)
+ "/"
+ str(len(all_rec_ids))
+ "]"
)
i = i + 1
# Log message.
msg = "Diarizing %s : %s " % (tag, rec_id)
logger.debug(msg)
# Embedding directory.
if not os.path.exists(os.path.join(params["embedding_dir"], split)):
os.makedirs(os.path.join(params["embedding_dir"], split))
# File to store embeddings.
emb_file_name = rec_id + "." + params["mic_type"] + ".emb_stat.pkl"
diary_stat_emb_file = os.path.join(
params["embedding_dir"], split, emb_file_name
)
# Prepare a metadata (json) for one recording. This is basically a subset of full_meta.
# Lets keep this meta-info in embedding directory itself.
json_file_name = rec_id + "." + params["mic_type"] + ".json"
meta_per_rec_file = os.path.join(
params["embedding_dir"], split, json_file_name
)
# Write subset (meta for one recording) json metadata.
prepare_subset_json(full_meta, rec_id, meta_per_rec_file)
# Prepare data loader.
diary_set_loader = dataio_prep(params, meta_per_rec_file)
# Putting modules on the device.
params["compute_features"].to(run_opts["device"])
params["mean_var_norm"].to(run_opts["device"])
params["embedding_model"].to(run_opts["device"])
params["mean_var_norm_emb"].to(run_opts["device"])
# Compute Embeddings.
diary_obj = embedding_computation_loop(
"diary", diary_set_loader, diary_stat_emb_file
)
# Adding tag for directory path.
type_of_num_spkr = "oracle" if params["oracle_n_spkrs"] else "est"
tag = (
type_of_num_spkr
+ "_"
+ str(params["affinity"])
+ "_"
+ params["backend"]
)
out_rttm_dir = os.path.join(
params["sys_rttm_dir"], params["mic_type"], split, tag
)
if not os.path.exists(out_rttm_dir):
os.makedirs(out_rttm_dir)
out_rttm_file = out_rttm_dir + "/" + rec_id + ".rttm"
# Processing starts from here.
if params["oracle_n_spkrs"] is True:
# Oracle num of speakers.
num_spkrs = diar.get_oracle_num_spkrs(rec_id, spkr_info)
else:
if params["affinity"] == "nn":
# Num of speakers tunned on dev set (only for nn affinity).
num_spkrs = n_lambdas
else:
# Num of speakers will be estimated using max eigen gap for cos based affinity.
# So adding None here. Will use this None later-on.
num_spkrs = None
if params["backend"] == "kmeans":
diar.do_kmeans_clustering(
diary_obj, out_rttm_file, rec_id, num_spkrs, pval,
)
if params["backend"] == "SC":
# Go for Spectral Clustering (SC).
diar.do_spec_clustering(
diary_obj,
out_rttm_file,
rec_id,
num_spkrs,
pval,
params["affinity"],
n_neighbors,
)
# Can used for AHC later. Likewise one can add different backends here.
if params["backend"] == "AHC":
# call AHC
threshold = pval # pval for AHC is nothing but threshold.
diar.do_AHC(diary_obj, out_rttm_file, rec_id, num_spkrs, threshold)
# Once all RTTM outputs are generated, concatenate individual RTTM files to obtain single RTTM file.
# This is not needed but just staying with the standards.
concate_rttm_file = out_rttm_dir + "/sys_output.rttm"
logger.debug("Concatenating individual RTTM files...")
with open(concate_rttm_file, "w") as cat_file:
for f in glob.glob(out_rttm_dir + "/*.rttm"):
if f == concate_rttm_file:
continue
with open(f, "r") as indi_rttm_file:
shutil.copyfileobj(indi_rttm_file, cat_file)
msg = "The system generated RTTM file for %s set : %s" % (
split_type,
concate_rttm_file,
)
logger.debug(msg)
return concate_rttm_file
def dev_pval_tuner(full_meta, split_type):
"""Tuning p_value for affinity matrix.
The p_value used so that only p% of the values in each row is retained.
"""
DER_list = []
prange = np.arange(0.002, 0.015, 0.001)
n_lambdas = None # using it as flag later.
for p_v in prange:
# Process whole dataset for value of p_v.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, p_v
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append(DER_)
if params["oracle_n_spkrs"] is True and params["backend"] == "kmeans":
# no need of p_val search. Note p_val is needed for SC for both oracle and est num of speakers.
# p_val is needed in oracle_n_spkr=False when using kmeans backend.
break
# Take p_val that gave minmum DER on Dev dataset.
tuned_p_val = prange[DER_list.index(min(DER_list))]
return tuned_p_val
def dev_ahc_threshold_tuner(full_meta, split_type):
"""Tuning threshold for affinity matrix. This function is called when AHC is used as backend."""
DER_list = []
prange = np.arange(0.0, 1.0, 0.1)
n_lambdas = None # using it as flag later.
# Note: p_val is threshold in case of AHC.
for p_v in prange:
# Process whole dataset for value of p_v.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, p_v
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append(DER_)
if params["oracle_n_spkrs"] is True:
break # no need of threshold search.
# Take p_val that gave minmum DER on Dev dataset.
tuned_p_val = prange[DER_list.index(min(DER_list))]
return tuned_p_val
def dev_nn_tuner(full_meta, split_type):
"""Tuning n_neighbors on dev set. Assuming oracle num of speakers.
This is used when nn based affinity is selected.
"""
DER_list = []
pval = None
# Now assumming oracle num of speakers.
n_lambdas = 4
for nn in range(5, 15):
# Process whole dataset for value of n_lambdas.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, pval, nn
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append([nn, DER_])
if params["oracle_n_spkrs"] is True and params["backend"] == "kmeans":
break
DER_list.sort(key=lambda x: x[1])
tunned_nn = DER_list[0]
return tunned_nn[0]
def dev_tuner(full_meta, split_type):
"""Tuning n_components on dev set. Used for nn based affinity matrix.
Note: This is a very basic tunning for nn based affinity.
This is work in progress till we find a better way.
"""
DER_list = []
pval = None
for n_lambdas in range(1, params["max_num_spkrs"] + 1):
# Process whole dataset for value of n_lambdas.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, pval
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append(DER_)
# Take n_lambdas with minmum DER.
tuned_n_lambdas = DER_list.index(min(DER_list)) + 1
return tuned_n_lambdas
def dataio_prep(hparams, json_file):
"""Creates the datasets and their data processing pipelines.
This is used for multi-mic processing.
"""
# 1. Datasets
data_folder = hparams["data_folder"]
dataset = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=json_file, replacements={"data_root": data_folder},
)
# 2. Define audio pipeline.
if params["mic_type"] == "Array1":
# Multi-mic (Microphone Array)
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
mics_signals = read_audio_multichannel(wav).unsqueeze(0)
sig = params["multimic_beamformer"](mics_signals)
sig = sig.squeeze()
return sig
else:
# Single microphone
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item([dataset], audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys([dataset], ["id", "sig"])
# 4. Create dataloader:
dataloader = sb.dataio.dataloader.make_dataloader(
dataset, **params["dataloader_opts"]
)
return dataloader
# Begin experiment!
if __name__ == "__main__": # noqa: C901
# Load hyperparameters file with command-line overrides.
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
# Dataset prep (peparing metadata files)
from ami_prepare import prepare_ami # noqa
if not params["skip_prep"]:
run_on_main(
prepare_ami,
kwargs={
"data_folder": params["data_folder"],
"save_folder": params["save_folder"],
"ref_rttm_dir": params["ref_rttm_dir"],
"meta_data_dir": params["meta_data_dir"],
"manual_annot_folder": params["manual_annot_folder"],
"split_type": params["split_type"],
"skip_TNO": params["skip_TNO"],
"mic_type": params["mic_type"],
"vad_type": params["vad_type"],
"max_subseg_dur": params["max_subseg_dur"],
"overlap": params["overlap"],
},
)
# Create experiment directory.
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
# Few more experiment directories inside results/ (to maintain cleaner structure).
exp_dirs = [
params["embedding_dir"],
params["sys_rttm_dir"],
params["der_dir"],
]
for dir_ in exp_dirs:
if not os.path.exists(dir_):
os.makedirs(dir_)
# We download the pretrained Model from HuggingFace (or elsewhere depending on
# the path given in the YAML file).
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected(device=run_opts["device"])
params["embedding_model"].eval()
params["embedding_model"].to(run_opts["device"])
# AMI Dev Set: Tune hyperparams on dev set.
# Read the meta-data file for dev set generated during data_prep
dev_meta_file = params["dev_meta_file"]
with open(dev_meta_file, "r") as f:
meta_dev = json.load(f)
full_meta = meta_dev
# Processing starts from here
# Following few lines selects option for different backend and affinity matrices. Finds best values for hyperameters using dev set.
best_nn = None
if params["affinity"] == "nn":
logger.info("Tuning for nn (Multiple iterations over AMI Dev set)")
best_nn = dev_nn_tuner(full_meta, "dev")
n_lambdas = None
best_pval = None
if params["affinity"] == "cos" and (
params["backend"] == "SC" or params["backend"] == "kmeans"
):
# oracle num_spkrs or not, doesn't matter for kmeans and SC backends
# cos: Tune for the best pval for SC /kmeans (for unknown num of spkrs)
logger.info(
"Tuning for p-value for SC (Multiple iterations over AMI Dev set)"
)
best_pval = dev_pval_tuner(full_meta, "dev")
elif params["backend"] == "AHC":
logger.info("Tuning for threshold-value for AHC")
best_threshold = dev_ahc_threshold_tuner(full_meta, "dev")
best_pval = best_threshold
else:
# NN for unknown num of speakers (can be used in future)
if params["oracle_n_spkrs"] is False:
# nn: Tune num of number of components (to be updated later)
logger.info(
"Tuning for number of eigen components for NN (Multiple iterations over AMI Dev set)"
)
# dev_tuner used for tuning num of components in NN. Can be used in future.
n_lambdas = dev_tuner(full_meta, "dev")
# Load 'dev' and 'eval' metadata files.
full_meta_dev = full_meta # current full_meta is for 'dev'
eval_meta_file = params["eval_meta_file"]
with open(eval_meta_file, "r") as f:
full_meta_eval = json.load(f)
# Tag to be appended to final output DER files. Writing DER for individual files.
type_of_num_spkr = "oracle" if params["oracle_n_spkrs"] else "est"
tag = (
type_of_num_spkr
+ "_"
+ str(params["affinity"])
+ "."
+ params["mic_type"]
)
# Perform final diarization on 'dev' and 'eval' with best hyperparams.
final_DERs = {}
for split_type in ["dev", "eval"]:
if split_type == "dev":
full_meta = full_meta_dev
else:
full_meta = full_meta_eval
# Performing diarization.
msg = "Diarizing using best hyperparams: " + split_type + " set"
logger.info(msg)
out_boundaries = diarize_dataset(
full_meta,
split_type,
n_lambdas=n_lambdas,
pval=best_pval,
n_neighbors=best_nn,
)
# Computing DER.
msg = "Computing DERs for " + split_type + " set"
logger.info(msg)
ref_rttm = os.path.join(
params["ref_rttm_dir"], "fullref_ami_" + split_type + ".rttm"
)
sys_rttm = out_boundaries
[MS, FA, SER, DER_vals] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
individual_file_scores=True,
)
# Writing DER values to a file. Append tag.
der_file_name = split_type + "_DER_" + tag
out_der_file = os.path.join(params["der_dir"], der_file_name)
msg = "Writing DER file to: " + out_der_file
logger.info(msg)
diar.write_ders_file(ref_rttm, DER_vals, out_der_file)
msg = (
"AMI "
+ split_type
+ " set DER = %s %%\n" % (str(round(DER_vals[-1], 2)))
)
logger.info(msg)
final_DERs[split_type] = round(DER_vals[-1], 2)
# Final print DERs
msg = (
"Final Diarization Error Rate (%%) on AMI corpus: Dev = %s %% | Eval = %s %%\n"
% (str(final_DERs["dev"]), str(final_DERs["eval"]))
)
logger.info(msg)
| 22,324 | 32.172363 | 135 | py |
speechbrain | speechbrain-main/recipes/Switchboard/LM/train.py | #!/usr/bin/env python3
"""Recipe for training a Language Model on Switchboard and Fisher corpus.
To run this recipe, do the following:
> pip install datasets
> python train.py hparams/<params>.yaml
Authors
* Jianyuan Zhong 2021
* Ju-Chieh Chou 2020
* Dominik Wagner 2022
"""
import sys
import logging
import torch
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class LM(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the sentence batches to the output probabilities."""
batch = batch.to(self.device)
tokens_bos, _ = batch.tokens_bos
logits = self.hparams.model(tokens_bos)
pred = self.hparams.log_softmax(logits)
return pred
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given predictions and targets."""
batch = batch.to(self.device)
tokens_eos, tokens_len = batch.tokens_eos
loss = self.hparams.compute_cost(
predictions, tokens_eos, length=tokens_len
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
(loss / self.hparams.accu_steps).backward()
if self.step % self.hparams.accu_steps == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
if isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
) or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
):
self.hparams.lr_annealing(self.optimizer)
return loss
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
if not (
isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
)
or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
)
):
old_lr, new_lr = self.hparams.lr_annealing(stage_loss)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
else:
old_lr = self.hparams.lr_annealing.current_lr
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta=stage_stats, min_keys=["loss"],
)
def dataio_prepare(hparams):
"""
This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined
functions.
"""
data_folder = hparams["save_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data, test_data]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
"""Define text pipeline"""
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides("words", "tokens_bos", "tokens_eos")
def text_pipeline(words):
yield words
tokens_list = tokenizer.encode_as_ids(words)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "words", "tokens_bos", "tokens_eos"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# 1. # Dataset prep (parsing Switchboard (and Fisher) data)
from switchboard_prepare import prepare_switchboard # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_switchboard,
kwargs={
"data_folder": hparams["data_folder"],
"splits": hparams["splits"],
"save_folder": hparams["save_folder"],
"skip_prep": hparams["skip_prep"],
"add_fisher_corpus": hparams["add_fisher_corpus"],
"split_ratio": hparams["split_ratio"],
"max_utt": hparams["max_utt"],
},
)
# here we create the dataloader objects as well as tokenization and encoding
train_data, valid_data, test_data = dataio_prepare(hparams)
# We download the tokenizer and pretrained LM from HuggingFace (or elsewhere depending on
# the path given in tokenizer_file of the hparams YAML file).
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
lm_brain = LM(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
lm_brain.fit(
lm_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# evaluation
test_stats = lm_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
lm_brain.checkpointer.save_checkpoint(name="latest")
| 7,050 | 32.417062 | 93 | py |
speechbrain | speechbrain-main/recipes/Switchboard/ASR/seq2seq/train.py | #!/usr/bin/env/python3
"""Recipe for training a sequence-to-sequence ASR system with Switchboard.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beamsearch.
To run this recipe, do the following:
> python train.py hparams/train_BPE1000.yaml
With the default hyperparameters, the system employs a CRDNN encoder.
The decoder is based on a standard GRU.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split, and many other possible variations.
This recipe assumes that the tokenizer is already trained.
Authors
* Ju-Chieh Chou 2020
* Mirco Ravanelli 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
* Andreas Nautsch 2021
* Dominik Wagner 2022
"""
import functools
import os
import sys
import torch
import logging
import torchaudio
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def __init__(
self,
modules=None,
opt_class=None,
hparams=None,
run_opts=None,
checkpointer=None,
profiler=None,
normalize_fn=None,
):
self.normalize_fn = normalize_fn
super().__init__(
modules=modules,
opt_class=opt_class,
hparams=hparams,
run_opts=run_opts,
checkpointer=checkpointer,
profiler=profiler,
)
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, p_seq, wav_lens
else:
return p_seq, wav_lens
else:
if stage == sb.Stage.VALID:
p_tokens, scores = self.hparams.valid_search(x, wav_lens)
else:
p_tokens, scores = self.hparams.test_search(x, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_seq, wav_lens = predictions
else:
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# Add ctc loss if necessary
if (
stage == sb.Stage.TRAIN
and current_epoch <= self.hparams.number_of_ctc_epochs
):
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
else:
loss = loss_seq
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = [
self.tokenizer.decode_ids(utt_seq).split()
for utt_seq in predicted_tokens
]
target_words = [wrd.split() for wrd in batch.words]
# Check for possible word alternatives and exclusions
if stage == sb.Stage.TEST and self.normalize_fn is not None:
target_words, predicted_words = self.normalize_fn(
target_words, predicted_words
)
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for _, i in test_datasets.items()]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "channel", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, channel, start, stop):
# Select a speech segment from the sph file
# start and end times are already frames.
# This is done in data preparation stage.
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
info = torchaudio.info(wav)
resampled = sig
# Maybe resample to 16kHz
if int(info.sample_rate) != int(hparams["sample_rate"]):
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
resampled = resampled.transpose(0, 1).squeeze(1)
if info.num_channels > 1:
# Select the proper audio channel of the segment
if channel == "A":
resampled = resampled[:, 0]
else:
resampled = resampled[:, 1]
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides(
"words", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(words):
yield words
tokens_list = tokenizer.encode_as_ids(words)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "words", "tokens_bos", "tokens_eos", "tokens"],
)
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
from speechbrain.dataio.dataloader import SaveableDataLoader # noqa
from speechbrain.dataio.batch import PaddedBatch # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
hop_size = dynamic_hparams["feats_hop_size"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: int(float(x["duration"]) * (1 / hop_size)),
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: int(float(x["duration"]) * (1 / hop_size)),
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_datasets,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Switchboard)
from switchboard_prepare import prepare_switchboard # noqa
from normalize_util import normalize_words, read_glm_csv # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_switchboard,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"splits": hparams["splits"],
"split_ratio": hparams["split_ratio"],
"skip_prep": hparams["skip_prep"],
"add_fisher_corpus": hparams["add_fisher_corpus"],
"max_utt": hparams["max_utt"],
},
)
# create the dataset objects as well as tokenization and encoding
(
train_data,
valid_data,
test_datasets,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# Depending on the path given in the hparams YAML file,
# we download the pretrained LM and Tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Helper function that removes optional/deletable parts of the transcript
# for cleaner performance metrics
normalize_fn = None
if hparams["normalize_words"]:
normalize_fn = functools.partial(
normalize_words,
glm_alternatives=read_glm_csv(hparams["output_folder"]),
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
normalize_fn=normalize_fn,
)
# We dynamically add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!
asr_brain.tokenizer = hparams["tokenizer"]
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
if train_bsampler is not None:
train_dataloader_opts = {"batch_sampler": train_bsampler}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
for k in test_datasets.keys(): # keys are test_swbd and test_callhome
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 17,124 | 34.901468 | 89 | py |
speechbrain | speechbrain-main/recipes/Switchboard/ASR/CTC/train_with_wav2vec.py | #!/usr/bin/env python3
import functools
import os
import sys
from pathlib import Path
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
"""Recipe for training a sequence-to-sequence ASR system with Switchboard.
The system employs a wav2vec2 encoder and a CTC decoder.
Decoding is performed with greedy decoding.
To run this recipe, do the following:
> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml
With the default hyperparameters, the system employs a pretrained wav2vec2 encoder.
The wav2vec2 model is pretrained following the model given in the hparams file.
The neural network is trained with CTC on sub-word units (based on e.g. Byte Pairwise Encoding or a unigram language
model).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE), and many
other possible variations.
Authors
* Titouan Parcollet 2021
* Dominik Wagner 2022
"""
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def __init__(
self,
modules=None,
opt_class=None,
hparams=None,
run_opts=None,
checkpointer=None,
profiler=None,
normalize_fn=None,
):
self.normalize_fn = normalize_fn
super().__init__(
modules=modules,
opt_class=opt_class,
hparams=hparams,
run_opts=run_opts,
checkpointer=checkpointer,
profiler=profiler,
)
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC) given predictions and targets."""
p_ctc, wav_lens = predictions
ids = batch.id
tokens, tokens_lens = batch.tokens
loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
if stage != sb.Stage.TRAIN:
# Decode token terms to words
sequence = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
predicted_words = self.tokenizer(sequence, task="decode_from_list")
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
# Check for possible word alternatives and exclusions
if stage == sb.Stage.TEST and self.normalize_fn is not None:
target_words, predicted_words = self.normalize_fn(
target_words, predicted_words
)
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
if self.auto_mix_prec:
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
self.scaler.scale(loss).backward()
if not self.hparams.wav2vec2.freeze:
self.scaler.unscale_(self.wav2vec_optimizer)
self.scaler.unscale_(self.model_optimizer)
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
self.scaler.step(self.wav2vec_optimizer)
self.scaler.step(self.model_optimizer)
self.scaler.update()
else:
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.step()
self.model_optimizer.step()
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
if not self.hparams.wav2vec2.freeze:
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
# If the wav2vec encoder is unfrozen, we create the optimizer
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
# train_data = train_data.filtered_sorted(sort_key="duration",)
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
# train_data = train_data.filtered_sorted(
# sort_key="duration", reverse=True,
# )
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for _, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "channel", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, channel, start, stop):
# Select a speech segment from the sph file
# start and end times are already frames.
# This is done in data preparation stage.
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
info = torchaudio.info(wav)
resampled = sig
# Maybe resample to 16kHz
if int(info.sample_rate) != int(hparams["sample_rate"]):
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
resampled = resampled.transpose(0, 1).squeeze(1)
if info.num_channels > 1:
# Select the proper audio channel of the segment
if channel == "A":
resampled = resampled[:, 0]
else:
resampled = resampled[:, 1]
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides(
"tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing Switchboard)
from switchboard_prepare import prepare_switchboard # noqa
from normalize_util import normalize_words, read_glm_csv # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_switchboard,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"splits": hparams["splits"],
"split_ratio": hparams["split_ratio"],
"skip_prep": hparams["skip_prep"],
"add_fisher_corpus": hparams["add_fisher_corpus"],
"max_utt": hparams["max_utt"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_tokenizer_csv"],
annotation_read="words",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# Create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets = dataio_prepare(hparams, tokenizer)
# Helper function that removes optional/deletable parts of the transcript
# for cleaner performance metrics
normalize_fn = None
if hparams["normalize_words"]:
normalize_fn = functools.partial(
normalize_words,
glm_alternatives=read_glm_csv(hparams["output_folder"]),
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
normalize_fn=normalize_fn,
)
# Adding objects to trainer.
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k],
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 15,887 | 34.623318 | 116 | py |
speechbrain | speechbrain-main/recipes/Switchboard/ASR/transformer/train.py | #!/usr/bin/env python3
"""Recipe for training a Transformer ASR system with Switchboard.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural
language model.
To run this recipe, do the following:
> python train.py hparams/transformer.yaml
With the default hyperparameters, the system employs a convolutional frontend and a transformer.
The decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer
language model is used on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the full
Switchboard dataset (~300 h).
The best model is the average of the checkpoints from last 5 epochs.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE), and many
other possible variations.
Authors
* Jianyuan Zhong 2020
* Mirco Ravanelli 2020
* Peter Plantinga 2020
* Samuele Cornell 2020, 2021, 2022
* Titouan Parcollet 2021, 2022
* Dominik Wagner 2022
"""
import functools
import os
import sys
import torch
import logging
from pathlib import Path
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def __init__(
self,
modules=None,
opt_class=None,
hparams=None,
run_opts=None,
checkpointer=None,
profiler=None,
normalize_fn=None,
):
self.normalize_fn = normalize_fn
super().__init__(
modules=modules,
opt_class=opt_class,
hparams=hparams,
run_opts=run_opts,
checkpointer=checkpointer,
profiler=profiler,
)
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
src = self.modules.CNN(feats)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index,
)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
# for the sake of efficiency, we only perform beamsearch with limited capacity
# and no LM to give user some idea of how the AM is doing
hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, hyps,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
).sum()
# now as training progresses we use real prediction from the prev step instead of teacher forcing
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
).sum()
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = [
tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps
]
target_words = [wrd.split(" ") for wrd in batch.words]
# Check for possible word alternatives and exclusions
if stage == sb.Stage.TEST and self.normalize_fn is not None:
target_words, predicted_words = self.normalize_fn(
target_words, predicted_words
)
self.wer_metric.append(ids, predicted_words, target_words)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def fit_batch(self, batch):
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
if self.auto_mix_prec:
self.optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
self.scaler.scale(loss / self.grad_accumulation_factor).backward()
if should_step:
self.scaler.unscale_(self.optimizer)
if self.check_gradients(loss):
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer_step += 1
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
else:
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.optimizer_step += 1
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
lr = self.hparams.noam_annealing.current_lr
steps = self.optimizer_step
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
num_to_keep=5,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
# save the averaged checkpoint at the end of the evaluation stage
# delete the rest of the intermediate checkpoints
# ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint
self.checkpointer.save_and_keep_only(
meta={"ACC": 1.1, "epoch": epoch},
max_keys=["ACC"],
num_to_keep=1,
)
def on_evaluate_start(self, max_key=None, min_key=None):
"""perform checkpoint average if needed"""
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(
max_key=max_key, min_key=min_key
)
ckpt = sb.utils.checkpoints.average_checkpoints(
ckpts, recoverable_name="model", device=self.device
)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
valtest_datasets = [valid_data] + [i for k, i in test_datasets.items()]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "channel", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, channel, start, stop):
# Select a speech segment from the sph file
# start and end times are already frames.
# This is done in data preparation stage.
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
info = torchaudio.info(wav)
resampled = sig
# Maybe resample to 16kHz
if int(info.sample_rate) != int(hparams["sample_rate"]):
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
resampled = resampled.transpose(0, 1).squeeze(1)
if info.num_channels > 1:
# Select the proper audio channel of the segment
if channel == "A":
resampled = resampled[:, 0]
else:
resampled = resampled[:, 1]
return resampled
sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline)
@sb.utils.data_pipeline.takes("wav", "channel", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline_train(wav, channel, start, stop):
# Speed Perturb is done here so it is multi-threaded with the
# workers of the dataloader (faster).
# Select a speech segment from the sph file
# start and end times are already frames.
# This is done in data preparation stage.
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
info = torchaudio.info(wav)
resampled = sig
# Maybe resample to 16kHz
if int(info.sample_rate) != int(hparams["sample_rate"]):
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
resampled = resampled.transpose(0, 1).squeeze(1)
if info.num_channels > 1:
# Select the proper audio channel of the segment
if channel == "A":
resampled = resampled[:, 0]
else:
resampled = resampled[:, 1]
if hparams["speed_perturb"]:
# sig = sb.dataio.dataio.read_audio(wav)
# factor = np.random.uniform(0.95, 1.05)
# sig = resample(sig.numpy(), 16000, int(16000*factor))
speed = sb.processing.speech_augmentation.SpeedPerturb(
16000, [x for x in range(95, 105)]
)
resampled = speed(resampled.unsqueeze(0)).squeeze(
0
) # torch.from_numpy(sig)
return resampled
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides(
"words", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(words):
yield words
tokens_list = tokenizer.encode_as_ids(words)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "words", "tokens_bos", "tokens_eos", "tokens"],
)
return (
train_data,
valid_data,
test_datasets,
tokenizer,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
from switchboard_prepare import prepare_switchboard # noqa
from normalize_util import normalize_words, read_glm_csv # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_switchboard,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"splits": hparams["splits"],
"split_ratio": hparams["split_ratio"],
"skip_prep": hparams["skip_prep"],
"add_fisher_corpus": hparams["add_fisher_corpus"],
"max_utt": hparams["max_utt"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Helper function that removes optional/deletable parts of the transcript
# for cleaner performance metrics
normalize_fn = None
if hparams["normalize_words"]:
normalize_fn = functools.partial(
normalize_words,
glm_alternatives=read_glm_csv(hparams["output_folder"]),
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
normalize_fn=normalize_fn,
)
# adding objects to trainer:
asr_brain.tokenizer = hparams["tokenizer"]
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
for k in test_datasets.keys(): # keys are test_swbd and test_callhome
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k],
max_key="ACC",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 20,245 | 35.677536 | 105 | py |
speechbrain | speechbrain-main/recipes/Google-speech-commands/prepare_GSC.py | """
Data preparation for Google Speech Commands v0.02.
Download: http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz
Author
------
David Raby-Pepin 2021
"""
import os
from os import walk
import glob
import shutil
import logging
import torch
import re
import hashlib
import copy
import numpy as np
from speechbrain.utils.data_utils import download_file
from speechbrain.dataio.dataio import read_audio
try:
import pandas as pd
except ImportError:
err_msg = (
"The optional dependency pandas must be installed to run this recipe.\n"
)
err_msg += "Install using `pip install pandas`.\n"
raise ImportError(err_msg)
logger = logging.getLogger(__name__)
GSC_URL = "http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz"
# List of all the words (i.e. classes) within the GSC v2 dataset
all_words = [
"yes",
"no",
"up",
"down",
"left",
"right",
"on",
"off",
"stop",
"go",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"bed",
"bird",
"cat",
"dog",
"happy",
"house",
"marvin",
"sheila",
"tree",
"wow",
"backward",
"forward",
"follow",
"learn",
"visual",
]
def prepare_GSC(
data_folder,
save_folder,
validation_percentage=10,
testing_percentage=10,
percentage_unknown=10,
percentage_silence=10,
words_wanted=[
"yes",
"no",
"up",
"down",
"left",
"right",
"on",
"off",
"stop",
"go",
],
skip_prep=False,
):
"""
Prepares the Google Speech Commands V2 dataset.
Arguments
---------
data_folder : str
path to dataset. If not present, it will be downloaded here.
save_folder: str
folder where to store the data manifest files.
validation_percentage: int
How much of the data set to use for validation.
testing_percentage: int
How much of the data set to use for testing.
percentage unknown: int.
How much data outside of the known (i.e wanted) words to preserve; relative to the total number of known words.
percentage silence: int
How many silence samples to generate; relative to the total number of known words.
skip_prep: bool
If True, skip data preparation.
Example
-------
>>> data_folder = '/path/to/GSC'
>>> prepare_GSC(data_folder)
"""
if skip_prep:
return
# If the data folders do not exist, we need to extract the data
if not os.path.isdir(os.path.join(data_folder, "train-synth")):
# Check for zip file and download if it doesn't exist
tar_location = os.path.join(data_folder, "speech_commands_v0.02.tar.gz")
if not os.path.exists(tar_location):
download_file(GSC_URL, tar_location, unpack=True)
else:
logger.info("Extracting speech_commands_v0.02.tar.gz...")
shutil.unpack_archive(tar_location, data_folder)
# Define the words that we do not want to identify
unknown_words = list(np.setdiff1d(all_words, words_wanted))
# All metadata fields to appear within our dataset annotation files (i.e. train.csv, valid.csv, test.cvs)
fields = {
"ID": [],
"duration": [],
"start": [],
"stop": [],
"wav": [],
"spk_id": [],
"command": [],
"transcript": [],
}
splits = {
"train": copy.deepcopy(fields),
"valid": copy.deepcopy(fields),
"test": copy.deepcopy(fields),
}
num_known_samples_per_split = {"train": 0, "valid": 0, "test": 0}
words_wanted_parsed = False
commands = words_wanted + unknown_words
for i, command in enumerate(commands):
# logger.info("Preparing {}/{} commands...".format(i, len(commands)))
# Indicate once all wanted words are parsed
if i >= len(words_wanted) and not words_wanted_parsed:
num_known_samples_total = np.sum(
list(num_known_samples_per_split.values())
)
num_unknown_samples_total = 105829 - num_known_samples_total
percentage_applied_to_unknown_samples = (
percentage_unknown * num_known_samples_total
) / num_unknown_samples_total
words_wanted_parsed = True
# Read all files under a specific class (i.e. command)
files = []
for (dirpath, dirnames, filenames) in walk(
os.path.join(data_folder, command)
):
files.extend(filenames)
break
# Fill in all fields with metadata for each audio sample file under a specific class
for filename in files:
# Once all wanted words are parsed, only retain the required percentage of unknown words
if (
words_wanted_parsed
and torch.rand(1)[0].tolist()
> percentage_applied_to_unknown_samples / 100
):
continue
# select the required split (i.e. set) for the sample
split = which_set(
filename, validation_percentage, testing_percentage
)
splits[split]["ID"].append(
command + "/" + re.sub(r".wav", "", filename)
)
# We know that all recordings are 1 second long (i.e.16000 frames). No need to compute the duration.
splits[split]["duration"].append(1.0)
splits[split]["start"].append(0)
splits[split]["stop"].append(16000)
splits[split]["wav"].append(
os.path.join(data_folder, command, filename)
)
splits[split]["spk_id"].append(re.sub(r"_.*", "", filename))
if command in words_wanted:
splits[split]["command"].append(command)
num_known_samples_per_split[split] += 1
else:
splits[split]["command"].append("unknown")
splits[split]["transcript"].append(command)
if percentage_silence > 0:
generate_silence_data(
num_known_samples_per_split,
splits,
data_folder,
percentage_silence=percentage_silence,
)
for split in splits:
new_filename = os.path.join(save_folder, split) + ".csv"
new_df = pd.DataFrame(splits[split])
new_df.to_csv(new_filename, index=False)
MAX_NUM_WAVS_PER_CLASS = 2 ** 27 - 1 # ~134M
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Arguments
---------
filename: path
File path of the data sample.
validation_percentage: int
How much of the data set to use for validation.
testing_percentage: int
How much of the data set to use for testing.
Returns
---------
result: str
one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r"_nohash_.*$", "", base_name).encode("utf-8")
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(hash_name).hexdigest()
percentage_hash = (
int(hash_name_hashed, 16) % (MAX_NUM_WAVS_PER_CLASS + 1)
) * (100.0 / MAX_NUM_WAVS_PER_CLASS)
if percentage_hash < validation_percentage:
result = "valid"
elif percentage_hash < (testing_percentage + validation_percentage):
result = "test"
else:
result = "train"
return result
def generate_silence_data(
num_known_samples_per_split, splits, data_folder, percentage_silence=26
):
"""Generates silence samples.
Arguments
---------
num_known_samples_per_split: int
Total number of samples of known words for each split (i.e. set).
splits: str
Training, validation and test sets.
data_folder: str
path to dataset.
percentage_silence: int
How many silence samples to generate; relative to the total number of known words.
"""
for split in splits:
num_silence_samples = int(
(percentage_silence / 100.0) * num_known_samples_per_split[split]
)
# Fetch all background noise wav files used to generate silence samples
search_path = os.path.join(data_folder, "_background_noise_", "*.wav")
silence_paths = []
for wav_path in glob.glob(search_path):
silence_paths.append(wav_path)
# Generate random silence samples
# Assumes that the pytorch seed has been defined in the HyperPyYaml file
num_silence_samples_per_path = int(
num_silence_samples / len(silence_paths)
)
for silence_path in silence_paths:
signal = read_audio(silence_path)
random_starts = (
(
torch.rand(num_silence_samples_per_path)
* (signal.shape[0] - 16001)
)
.type(torch.int)
.tolist()
)
for i, random_start in enumerate(random_starts):
splits[split]["ID"].append(
re.sub(
r".wav",
"/" + str(random_start) + "_" + str(i),
re.sub(r".+?(?=_background_noise_)", "", silence_path),
)
)
splits[split]["duration"].append(1.0)
splits[split]["start"].append(random_start)
splits[split]["stop"].append(random_start + 16000)
splits[split]["wav"].append(silence_path)
splits[split]["spk_id"].append(None)
splits[split]["command"].append("silence")
splits[split]["transcript"].append(None)
| 11,265 | 30.915014 | 119 | py |
speechbrain | speechbrain-main/recipes/Google-speech-commands/train.py | #!/usr/bin/python3
"""Recipe for training a classifier using the
Google Speech Commands v0.02 Dataset.
To run this recipe, use the following command:
> python train.py {hyperparameter_file}
Using your own hyperparameter file or one of the following:
hyperparams/xvect.yaml (xvector system)
Author
* Mirco Ravanelli 2020
* Hwidong Na 2020
* Nauman Dawalatabad 2020
* Sarthak Yadav 2022
Script adapted by David Raby-Pepin 2021
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
import speechbrain.nnet.CNN
from speechbrain.utils.distributed import run_on_main
class SpeakerBrain(sb.core.Brain):
"""Class for GSC training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + command classifier.
Data augmentation and environmental corruption are applied to the
input speech.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
if isinstance(
self.modules.compute_features, speechbrain.lobes.features.Leaf
):
# if leaf, first normalize the wavs before feeding them to leaf
# no normalization is needed after LEAF
feats = self.modules.mean_var_norm(wavs, lens)
feats = self.modules.compute_features(feats)
else:
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
# Embeddings + classifier
embeddings = self.modules.embedding_model(feats)
outputs = self.modules.classifier(embeddings)
# Ecapa model uses softmax outside of its classifer
if "softmax" in self.modules.keys():
outputs = self.modules.softmax(outputs)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using command-id as label.
"""
predictions, lens = predictions
uttid = batch.id
command, _ = batch.command_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation:
command = torch.cat([command] * self.n_augment, dim=0)
# compute the cost function
loss = self.hparams.compute_cost(predictions, command, lens)
# loss = sb.nnet.losses.nll_loss(predictions, command, lens)
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, command, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of an epoch."""
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ErrorRate"] = self.error_metrics.summarize("average")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ErrorRate": stage_stats["ErrorRate"]},
min_keys=["ErrorRate"],
)
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop", "duration")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop, duration):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("command")
@sb.utils.data_pipeline.provides("command", "command_encoded")
def label_pipeline(command):
yield command
command_encoded = label_encoder.encode_sequence_torch([command])
yield command_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-GPU DDP support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file, from_didatasets=[train_data], output_key="command",
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "command_encoded"]
)
return train_data, valid_data, test_data, label_encoder
if __name__ == "__main__":
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing GSC and annotation into csv files)
from prepare_GSC import prepare_GSC
# Known words for V2 12 and V2 35 sets
if hparams["number_of_commands"] == 12:
words_wanted = [
"yes",
"no",
"up",
"down",
"left",
"right",
"on",
"off",
"stop",
"go",
]
elif hparams["number_of_commands"] == 35:
words_wanted = [
"yes",
"no",
"up",
"down",
"left",
"right",
"on",
"off",
"stop",
"go",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"bed",
"bird",
"cat",
"dog",
"happy",
"house",
"marvin",
"sheila",
"tree",
"wow",
"backward",
"forward",
"follow",
"learn",
"visual",
]
else:
raise ValueError("number_of_commands must be 12 or 35")
# Data preparation
run_on_main(
prepare_GSC,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"validation_percentage": hparams["validation_percentage"],
"testing_percentage": hparams["testing_percentage"],
"percentage_unknown": hparams["percentage_unknown"],
"percentage_silence": hparams["percentage_silence"],
"words_wanted": words_wanted,
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Brain class initialization
speaker_brain = SpeakerBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# with torch.autograd.detect_anomaly():
# Training
speaker_brain.fit(
speaker_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load the best checkpoint for evaluation
test_stats = speaker_brain.evaluate(
test_set=test_data,
min_key="ErrorRate",
test_loader_kwargs=hparams["dataloader_options"],
)
| 10,834 | 31.056213 | 80 | py |
speechbrain | speechbrain-main/recipes/IWSLT22_lowresource/train.py | #!/usr/bin/env python3
"""Recipe for fine-tuning a wav2vec model for the ST task (no transcriptions).
Author
* Marcely Zanon Boito, 2022
"""
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from sacremoses import MosesDetokenizer
# Define training procedure
class ST(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig # audio
tokens_bos, _ = batch.tokens_bos # translation
# wav2vec module
feats = self.modules.wav2vec2(wavs, wav_lens)
# dimensionality reduction
src = self.modules.enc(feats)
# transformer decoder
if self.distributed_launch:
dec_out = self.modules.Transformer.module.forward_mt_decoder_only(
src, tokens_bos, pad_idx=self.hparams.pad_index
)
else:
dec_out = self.modules.Transformer.forward_mt_decoder_only(
src, tokens_bos, pad_idx=self.hparams.pad_index
)
# logits and softmax
pred = self.modules.seq_lin(dec_out)
p_seq = self.hparams.log_softmax(pred)
# compute outputs
hyps = None
if stage == sb.Stage.VALID:
# the output of the encoder (enc) is used for valid search
hyps, _ = self.hparams.valid_search(src.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(src.detach(), wav_lens)
return p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given predictions and targets."""
(p_seq, wav_lens, hyps) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
# st loss
loss = self.hparams.seq_cost(p_seq, tokens_eos, length=tokens_eos_lens)
fr_detokenizer = MosesDetokenizer(lang=self.hparams.lang)
if stage != sb.Stage.TRAIN:
predictions = [
fr_detokenizer.detokenize(
tokenizer.sp.decode_ids(utt_seq).split(" ")
)
for utt_seq in hyps
]
detokenized_translation = [
fr_detokenizer.detokenize(translation.split(" "))
for translation in batch.trans
]
# it needs to be a list of list due to the extend on the bleu implementation
targets = [detokenized_translation]
self.bleu_metric.append(ids, predictions, targets)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def init_optimizers(self):
# Initializes the wav2vec2 optimizer if the model is not wav2vec2_frozen
if not self.hparams.wav2vec2_frozen:
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.adam_optimizer = self.hparams.adam_opt_class(
self.hparams.model.parameters()
)
def zero_grad(self, set_to_none=False):
if not self.hparams.wav2vec2_frozen:
self.wav2vec_optimizer.zero_grad(set_to_none)
self.adam_optimizer.zero_grad(set_to_none)
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
if not self.hparams.wav2vec2_frozen: # if wav2vec2 is not frozen
self.wav2vec_optimizer.step()
self.adam_optimizer.step()
if not self.hparams.wav2vec2_frozen:
self.wav2vec_optimizer.zero_grad()
self.adam_optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called when a stage (either training, validation, test) starts."""
self.bleu_metric = self.hparams.bleu_computer()
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.bleu_metric = self.hparams.bleu_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
if stage == sb.Stage.TRAIN:
self.train_stats = stage_loss
else: # valid or test
stage_stats = {"loss": stage_loss}
stage_stats["ACC"] = self.acc_metric.summarize()
stage_stats["BLEU"] = self.bleu_metric.summarize(field="BLEU")
stage_stats["BLEU_extensive"] = self.bleu_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
current_epoch = self.hparams.epoch_counter.current
old_lr_adam, new_lr_adam = self.hparams.lr_annealing_adam(
stage_stats["BLEU"]
)
sb.nnet.schedulers.update_learning_rate(
self.adam_optimizer, new_lr_adam
)
if not self.hparams.wav2vec2_frozen:
(
old_lr_wav2vec,
new_lr_wav2vec,
) = self.hparams.lr_annealing_wav2vec(stage_stats["BLEU"])
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": current_epoch,
"lr_adam": old_lr_adam,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats={"loss": self.train_stats},
valid_stats=stage_stats,
)
else:
self.hparams.train_logger.log_stats(
stats_meta={"epoch": current_epoch, "lr_adam": old_lr_adam},
train_stats={"loss": self.train_stats},
valid_stats=stage_stats,
)
# create checkpoing
meta = {"BLEU": stage_stats["BLEU"], "epoch": current_epoch}
name = "checkpoint_epoch" + str(current_epoch)
self.checkpointer.save_and_keep_only(
meta=meta, name=name, num_to_keep=10, max_keys=["BLEU"]
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
# Define custom data procedure
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# Define audio pipeline. In this case, we simply read the path contained
# in the variable wav with the audio reader.
@sb.utils.data_pipeline.takes("path")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the audio signal. This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
return sig
@sb.utils.data_pipeline.takes("path")
@sb.utils.data_pipeline.provides("sig")
def sp_audio_pipeline(wav):
"""Load the audio signal. This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
sig = sig.unsqueeze(0)
sig = hparams["speed_perturb"](sig)
sig = sig.squeeze(0)
return sig
# Define text processing pipeline. We start from the raw text and then
# encode it using the tokenizer. The tokens with BOS are used for feeding
# decoder during training, the tokens with EOS for computing the cost function.
@sb.utils.data_pipeline.takes("trans")
@sb.utils.data_pipeline.provides(
"trans", "tokens_list", "tokens_bos", "tokens_eos"
)
def reference_text_pipeline(translation):
"""Processes the transcriptions to generate proper labels"""
yield translation
tokens_list = tokenizer.sp.encode_as_ids(translation)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
data_folder = hparams["data_folder"]
# 1. train tokenizer on the data
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["vocab_size"],
annotation_train=hparams["annotation_train"],
annotation_read="trans",
annotation_format="json",
model_type="unigram",
bos_id=hparams["bos_index"],
eos_id=hparams["eos_index"],
)
# 2. load data and tokenize with trained tokenizer
datasets = {}
for dataset in ["train", "valid"]:
json_path = f"{data_folder}/{dataset}.json"
is_use_sp = dataset == "train" and "speed_perturb" in hparams
audio_pipeline_func = sp_audio_pipeline if is_use_sp else audio_pipeline
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=json_path,
replacements={"data_root": data_folder},
dynamic_items=[audio_pipeline_func, reference_text_pipeline],
output_keys=[
"id",
"sig",
"duration",
"trans",
"tokens_list",
"tokens_bos",
"tokens_eos",
],
)
for dataset in ["valid", "test"]:
json_path = hparams[f"annotation_{dataset}"]
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=json_path,
replacements={"data_root": data_folder},
dynamic_items=[audio_pipeline, reference_text_pipeline],
output_keys=[
"id",
"sig",
"duration",
"trans",
"tokens_list",
"tokens_bos",
"tokens_eos",
],
)
# Sorting training data with ascending order makes the code much
# faster because we minimize zero-padding. In most of the cases, this
# does not harm the performance.
if hparams["sorting"] == "ascending":
if hparams["debug"]:
datasets["train"] = datasets["train"].filtered_sorted(
key_min_value={"duration": hparams["sorting_min_duration"]},
key_max_value={"duration": hparams["sorting_max_duration"]},
sort_key="duration",
reverse=True,
)
datasets["valid"] = datasets["valid"].filtered_sorted(
key_min_value={"duration": hparams["sorting_min_duration"]},
key_max_value={"duration": hparams["sorting_max_duration"]},
sort_key="duration",
reverse=True,
)
else:
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="duration"
)
datasets["valid"] = datasets["valid"].filtered_sorted(
sort_key="duration"
)
hparams["dataloader_options"]["shuffle"] = False
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
# use smaller dataset to debug the model
if hparams["debug"]:
datasets["train"] = datasets["train"].filtered_sorted(
key_min_value={"duration": hparams["sorting_min_duration"]},
key_max_value={"duration": hparams["sorting_max_duration"]},
sort_key="duration",
reverse=True,
)
datasets["valid"] = datasets["valid"].filtered_sorted(
key_min_value={"duration": hparams["sorting_min_duration"]},
key_max_value={"duration": hparams["sorting_max_duration"]},
sort_key="duration",
reverse=True,
)
else:
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="duration", reverse=True
)
datasets["valid"] = datasets["valid"].filtered_sorted(
sort_key="duration", reverse=True
)
hparams["dataloader_options"]["shuffle"] = False
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
# use smaller dataset to debug the model
if hparams["debug"]:
datasets["train"] = datasets["train"].filtered_sorted(
key_min_value={"duration": hparams["sorting_debug_duration"]},
key_max_value={"duration": hparams["sorting_max_duration"]},
sort_key="duration",
)
datasets["valid"] = datasets["valid"].filtered_sorted(
key_min_value={"duration": hparams["sorting_min_duration"]},
key_max_value={"duration": hparams["sorting_max_duration"]},
sort_key="duration",
)
hparams["dataloader_options"]["shuffle"] = True
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
return datasets, tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# creates a logger
logger = logging.getLogger(__name__)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Create main experiment class
st_brain = ST(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Data preparation
import prepare_iwslt22
if not hparams["skip_prep"]:
run_on_main(
prepare_iwslt22.data_proc,
kwargs={
"dataset_folder": hparams["root_data_folder"],
"output_folder": hparams["data_folder"],
},
)
# Load datasets for training, valid, and test, trains and applies tokenizer
datasets, tokenizer = dataio_prepare(hparams)
# Before training, we drop some of the wav2vec 2.0 Transformer Encoder layers
st_brain.modules.wav2vec2.model.encoder.layers = st_brain.modules.wav2vec2.model.encoder.layers[
: hparams["keep_n_layers"]
]
# Training
st_brain.fit(
st_brain.hparams.epoch_counter,
datasets["train"],
datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
for dataset in ["valid", "test"]:
st_brain.evaluate(
datasets[dataset],
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 16,143 | 36.284065 | 100 | py |
speechbrain | speechbrain-main/recipes/Voicebank/voicebank_prepare.py | # -*- coding: utf-8 -*-
"""
Data preparation.
Download and resample, use ``download_vctk`` below.
https://datashare.is.ed.ac.uk/handle/10283/2791
Authors:
* Szu-Wei Fu, 2020
* Peter Plantinga, 2020
"""
import os
import json
import string
import urllib
import shutil
import logging
import tempfile
import torchaudio
from torchaudio.transforms import Resample
from speechbrain.utils.data_utils import get_all_files, download_file
from speechbrain.dataio.dataio import read_audio
logger = logging.getLogger(__name__)
LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt"
TRAIN_JSON = "train.json"
TEST_JSON = "test.json"
VALID_JSON = "valid.json"
SAMPLERATE = 16000
TRAIN_SPEAKERS = [
"p226",
"p287",
"p227",
"p228",
"p230",
"p231",
"p233",
"p236",
"p239",
"p243",
"p244",
"p250",
"p254",
"p256",
"p258",
"p259",
"p267",
"p268",
"p269",
"p270",
"p273",
"p274",
"p276",
"p277",
"p278",
"p279",
"p282",
"p286",
]
# Lexicon missing entries
MISSING_LEXICON = {
"CRUCIALLY": "K R UW SH AH L IY",
"PAEDOPHILES": "P EH D OW F AY L S",
"MR": "M IH S T ER",
"BBC": "B IY B IY S IY",
"EUPHORIC": "Y UW F AO R IH K",
"RACISM": "R EY S IH S M",
"MP": "EH M P IY",
"RESTRUCTURING": "R IY S T R AH K CH ER IH NG",
"OSAMA": "OW S AH M AH",
"GUITARIST": "G IH T AA R IH S T",
"BLUESHE": "B L UW SH IY",
"FLANKER": "F L AY N K ER",
"SADDAM": "S AA D AA M",
"COVERUP": "K UH V ER UH P",
"FBI": "EH F B IY AY",
"PREEMPTIVE": "P R IY EH M P T IH V",
"FOURYEAR": "F AO R Y IY R",
"XRAY": "EH K S R AY",
"TALIBAN": "T AE L IH B AA N",
"SUPERIMPOSITION": "S UW P ER IH M P OW S IH SH AH N",
"GUIDELINES": "G AY D L AY N S",
"FINALISED": "F AY N AH L AY Z D",
"HALFTIME": "H AE F T AY M",
"WINGERS": "W IH NG ER Z",
"GM": "J IY EH M",
"MCGREGOR": "M AH K G R EH G AO R",
"TWODAY": "T UW D EY",
"DATABASE": "D EY T AH B EY S",
"TELECOM": "T EH L AH K AO M",
"SHORTTERM": "SH AO R T ER M",
"SHORTFALL": "SH AO R T F AH L",
"MCCALL": "M AH K AH L",
"HEADTEACHER": "H EH D T IY CH ER",
"TAKEOVER": "T EY K OW V ER",
"ONETHIRD": "W AH N TH ER D",
"TV": "T IY V IY",
"SCREENPLAY": "S K R IY N P L EY",
"YUGOSLAV": "Y UW G OW S L AA V",
"HIBS": "HH IH B Z",
"DISPOSALS": "D IH S P OW S AH L Z",
"MODERNISATION": "M AA D ER N AH Z EY SH AH N",
"REALLIFE": "R IY L AY F",
"ONEYEAR": "W AH N Y IY R",
"GRASSROOTS": "G R AE S R UW T S",
"ARNIE": "AH R N IY",
"PARTTIME": "P AH R T AY M",
"SHORTLIST": "SH AO R T L IH S T",
"OUTPERFORMED": "OW T P ER F AO R M D",
"LONGTERM": "L AO NG T ER M",
"DAYTODAY": "D EY T UW D EY",
"MCPHERSON": "M AH K F ER S AH N",
"OUTSOURCING": "OW T S AO R S IH NG",
"FULLSCALE": "F UH L S K EY L",
"SERGIO": "S ER J IY OW",
"HENMAN": "HH EH N M AA N",
"MCLEOD": "M AH K L IY AO D",
"TIMESCALE": "T AY M S K EY L",
"REFURBISHMENT": "R IY F UH R B IH SH M AH N T",
"LINEUP": "L AY N UH P",
"DOWNBEAT": "D OW N B IY T",
"MANDELA": "M AE N D EH L AH",
"UNDERAGE": "UH N D ER EY J",
"MCNAUGHTON": "M AH K N AW T AH N",
"MICKELSON": "M IH K L S AH N",
"THREEQUARTERS": "TH R IY K AO R T ER Z",
"WEBSITE": "W EH B S AY T",
"BLUEITS": "B L UW IH T S",
"CEASEFIRE": "S IY S F AY R",
"FULLTIME": "F UH L T AY M",
"DOCHERTY": "D AH K ER T IY",
"RUNNERUP": "R UH N ER AH P",
"DOWNTURN": "D OW N T ER N",
"EUROS": "Y ER OW S",
"FOOTANDMOUTH": "F UH T AE N D M OW TH",
"HIGHLIGHTED": "HH AY L AY T AH D",
"MIDFIELD": "M IH D F IY L D",
"MCKENZIE": "M AH K EH N Z IY",
"BENCHMARK": "B EH N CH M AA R K",
"MCCONNELL": "M AH K AW N EH L",
"UPGRADING": "UH P G R EY D IH NG",
"BLUNKETT": "B L UH N K AH T",
"RETHINK": "R IY TH IH N K",
"UPBEAT": "AH P B IY T",
"TELECOMS": "T EH L AH K AO M Z",
"APARTHEID": "AH P AH R T HH AY D",
"AIRDRIE": "EY R D R IY",
"RETHINK": "R IY TH IH N K",
"HELPLINE": "HH EH L P L AY N",
"CLEARCUT": "K L IY R K UH T",
}
def prepare_voicebank(
data_folder, save_folder, valid_speaker_count=2, skip_prep=False
):
"""
Prepares the json files for the Voicebank dataset.
Expects the data folder to be the same format as the output of
``download_vctk()`` below.
Arguments
---------
data_folder : str
Path to the folder where the original Voicebank dataset is stored.
save_folder : str
The directory where to store the json files.
valid_speaker_count : int
The number of validation speakers to use (out of 28 in train set).
skip_prep: bool
If True, skip data preparation.
Example
-------
>>> data_folder = '/path/to/datasets/Voicebank'
>>> save_folder = 'exp/Voicebank_exp'
>>> prepare_voicebank(data_folder, save_folder)
"""
if skip_prep:
return
# Setting ouput files
save_json_train = os.path.join(save_folder, TRAIN_JSON)
save_json_valid = os.path.join(save_folder, VALID_JSON)
save_json_test = os.path.join(save_folder, TEST_JSON)
# Check if this phase is already done (if so, skip it)
if skip(save_json_train, save_json_test, save_json_valid):
logger.info("Preparation completed in previous run, skipping.")
return
train_clean_folder = os.path.join(
data_folder, "clean_trainset_28spk_wav_16k"
)
train_noisy_folder = os.path.join(
data_folder, "noisy_trainset_28spk_wav_16k"
)
train_txts = os.path.join(data_folder, "trainset_28spk_txt")
test_clean_folder = os.path.join(data_folder, "clean_testset_wav_16k")
test_noisy_folder = os.path.join(data_folder, "noisy_testset_wav_16k")
test_txts = os.path.join(data_folder, "testset_txt")
# Setting the save folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Additional checks to make sure the data folder contains Voicebank
check_voicebank_folders(
train_clean_folder,
train_noisy_folder,
train_txts,
test_clean_folder,
test_noisy_folder,
test_txts,
)
logger.debug("Creating lexicon...")
lexicon = create_lexicon(os.path.join(data_folder, "lexicon.txt"))
logger.info("Creating json files for noisy VoiceBank...")
logger.debug("Collecting files...")
extension = [".wav"]
valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count]
wav_lst_train = get_all_files(
train_noisy_folder, match_and=extension, exclude_or=valid_speakers
)
wav_lst_valid = get_all_files(
train_noisy_folder, match_and=extension, match_or=valid_speakers
)
wav_lst_test = get_all_files(test_noisy_folder, match_and=extension)
logger.debug("Creating json files for noisy VoiceBank...")
create_json(
wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon
)
create_json(
wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon
)
create_json(
wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon
)
def skip(*filenames):
"""
Detects if the Voicebank data_preparation has been already done.
If the preparation has been done, we can skip it.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
for filename in filenames:
if not os.path.isfile(filename):
return False
return True
def remove_punctuation(a_string):
"""Remove all punctuation from string"""
return a_string.translate(str.maketrans("", "", string.punctuation))
def create_lexicon(lexicon_save_filepath):
"""
Creates the lexicon object, downloading if it hasn't been done yet.
Arguments
---------
lexicon_save_filepath : str
Path to save the lexicon when downloading
"""
if not os.path.isfile(lexicon_save_filepath):
download_file(LEXICON_URL, lexicon_save_filepath)
# Iterate lexicon file and add the first pronunciation in the file for
# each word to our lexicon dictionary
lexicon = MISSING_LEXICON
delayed_words = {}
for line in open(lexicon_save_filepath):
line = line.split()
phns = " ".join(p.strip("012") for p in line[1:])
# Don't add words with punctuation until we can be sure they won't
# overwrite words without punctuation.
clean_word = remove_punctuation(line[0])
if clean_word != line[0] and clean_word not in delayed_words:
delayed_words[clean_word] = phns
elif clean_word == line[0] and clean_word not in lexicon:
lexicon[clean_word] = phns
# Add words with punctuation if they won't overwrite non-punctuated words
for word, phns in delayed_words.items():
if word not in lexicon:
lexicon[word] = phns
return lexicon
def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon):
"""
Creates the json file given a list of wav files.
Arguments
---------
wav_lst : list
The list of wav files.
json_file : str
The path of the output json file
clean_folder : str
The location of parallel clean samples.
txt_folder : str
The location of the transcript files.
"""
logger.debug(f"Creating json lists in {json_file}")
# Processing all the wav files in the list
json_dict = {}
for wav_file in wav_lst: # ex:p203_122.wav
# Example wav_file: p232_001.wav
noisy_path, filename = os.path.split(wav_file)
_, noisy_dir = os.path.split(noisy_path)
_, clean_dir = os.path.split(clean_folder)
noisy_rel_path = os.path.join("{data_root}", noisy_dir, filename)
clean_rel_path = os.path.join("{data_root}", clean_dir, filename)
# Reading the signal (to retrieve duration in seconds)
signal = read_audio(wav_file)
duration = signal.shape[0] / SAMPLERATE
# Read text
snt_id = filename.replace(".wav", "")
with open(os.path.join(txt_folder, snt_id + ".txt")) as f:
word_string = f.read()
word_string = remove_punctuation(word_string).strip().upper()
phones = [
phn for word in word_string.split() for phn in lexicon[word].split()
]
# Remove duplicate phones
phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j]
phone_string = " ".join(phones)
json_dict[snt_id] = {
"noisy_wav": noisy_rel_path,
"clean_wav": clean_rel_path,
"length": duration,
"words": word_string,
"phones": phone_string,
}
# Writing the json lines
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, indent=2)
logger.info(f"{json_file} successfully created!")
def check_voicebank_folders(*folders):
"""Raises FileNotFoundError if any passed folder does not exist."""
for folder in folders:
if not os.path.exists(folder):
raise FileNotFoundError(
f"the folder {folder} does not exist (it is expected in "
"the Voicebank dataset)"
)
def download_vctk(destination, tmp_dir=None, device="cpu"):
"""Download dataset and perform resample to 16000 Hz.
Arguments
---------
destination : str
Place to put final zipped dataset.
tmp_dir : str
Location to store temporary files. Will use `tempfile` if not provided.
device : str
Passed directly to pytorch's ``.to()`` method. Used for resampling.
"""
dataset_name = "noisy-vctk-16k"
if tmp_dir is None:
tmp_dir = tempfile.gettempdir()
final_dir = os.path.join(tmp_dir, dataset_name)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
if not os.path.isdir(final_dir):
os.mkdir(final_dir)
prefix = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/2791/"
noisy_vctk_urls = [
prefix + "clean_testset_wav.zip",
prefix + "noisy_testset_wav.zip",
prefix + "testset_txt.zip",
prefix + "clean_trainset_28spk_wav.zip",
prefix + "noisy_trainset_28spk_wav.zip",
prefix + "trainset_28spk_txt.zip",
]
zip_files = []
for url in noisy_vctk_urls:
filename = os.path.join(tmp_dir, url.split("/")[-1])
zip_files.append(filename)
if not os.path.isfile(filename):
print("Downloading " + url)
with urllib.request.urlopen(url) as response:
with open(filename, "wb") as tmp_file:
logger.info("... to " + tmp_file.name)
shutil.copyfileobj(response, tmp_file)
# Unzip
for zip_file in zip_files:
logger.info("Unzipping " + zip_file)
shutil.unpack_archive(zip_file, tmp_dir, "zip")
os.remove(zip_file)
# Move transcripts to final dir
shutil.move(os.path.join(tmp_dir, "testset_txt"), final_dir)
shutil.move(os.path.join(tmp_dir, "trainset_28spk_txt"), final_dir)
# Downsample
dirs = [
"noisy_testset_wav",
"clean_testset_wav",
"noisy_trainset_28spk_wav",
"clean_trainset_28spk_wav",
]
downsampler = Resample(orig_freq=48000, new_freq=16000)
for directory in dirs:
logger.info("Resampling " + directory)
dirname = os.path.join(tmp_dir, directory)
# Make directory to store downsampled files
dirname_16k = os.path.join(final_dir, directory + "_16k")
if not os.path.isdir(dirname_16k):
os.mkdir(dirname_16k)
# Load files and downsample
for filename in get_all_files(dirname, match_and=[".wav"]):
signal, rate = torchaudio.load(filename)
downsampled_signal = downsampler(signal.view(1, -1).to(device))
# Save downsampled file
torchaudio.save(
os.path.join(dirname_16k, filename[-12:]),
downsampled_signal.cpu(),
sample_rate=16000,
)
# Remove old file
os.remove(filename)
# Remove old directory
os.rmdir(dirname)
logger.info("Zipping " + final_dir)
final_zip = shutil.make_archive(
base_name=final_dir,
format="zip",
root_dir=os.path.dirname(final_dir),
base_dir=os.path.basename(final_dir),
)
logger.info(f"Moving {final_zip} to {destination}")
shutil.move(final_zip, os.path.join(destination, dataset_name + ".zip"))
| 14,761 | 30.14346 | 80 | py |
speechbrain | speechbrain-main/recipes/Voicebank/enhance/SEGAN/train.py | #!/usr/bin/env/python3
"""Recipe for training a speech enhancement system with the Voicebank dataset based on the SEGAN model architecture.
(based on the paper: Pascual et al. https://arxiv.org/pdf/1703.09452.pdf).
To run this recipe, do the following:
> python train.py hparams/train.yaml
Authors
* Francis Carter 2021
* Mirco Ravanelli 2021
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from pesq import pesq
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.nnet.loss.stoi_loss import stoi_loss
from speechbrain.utils.distributed import run_on_main
# Brain class for speech enhancement training
class SEBrain(sb.Brain):
def compute_forward_g(self, noisy_wavs):
"""Forward computations of the generator. Input noisy signal,
output clean signal"""
noisy_wavs = noisy_wavs.to(self.device)
predict_wavs = self.modules["model_g"](noisy_wavs)
return predict_wavs
def compute_forward_d(self, noisy_wavs, clean_wavs):
"""Forward computations from discriminator. Input denoised-noisy pair,
output whether denoising was properly acheived"""
noisy_wavs = noisy_wavs.to(self.device)
clean_wavs = clean_wavs.to(self.device)
inpt = torch.cat((noisy_wavs, clean_wavs), -1)
out = self.modules["model_d"](inpt)
return out
def compute_objectives_d1(self, d_outs, batch):
"""Computes the loss of a discriminator given predicted and
targeted outputs, with target being clean"""
loss = self.hparams.compute_cost["d1"](d_outs)
self.loss_metric_d1.append(batch.id, d_outs, reduction="batch")
return loss
def compute_objectives_d2(self, d_outs, batch):
"""Computes the loss of a discriminator given predicted and targeted outputs,
with target being noisy"""
loss = self.hparams.compute_cost["d2"](d_outs)
self.loss_metric_d2.append(batch.id, d_outs, reduction="batch")
return loss
def compute_objectives_g3(
self,
d_outs,
predict_wavs,
clean_wavs,
batch,
stage,
z_mean=None,
z_logvar=None,
):
"""Computes the loss of the generator based on discriminator and generator losses"""
clean_wavs_orig, lens = batch.clean_sig
clean_wavs_orig = clean_wavs_orig.to(self.device)
clean_wavs = clean_wavs.to(self.device)
loss = self.hparams.compute_cost["g3"](
d_outs,
predict_wavs,
clean_wavs,
lens,
l1LossCoeff=self.hparams.l1LossCoeff,
klLossCoeff=self.hparams.klLossCoeff,
z_mean=z_mean,
z_logvar=z_logvar,
)
self.loss_metric_g3.append(
batch.id,
d_outs,
predict_wavs,
clean_wavs,
lens,
l1LossCoeff=self.hparams.l1LossCoeff,
klLossCoeff=self.hparams.klLossCoeff,
z_mean=z_mean,
z_logvar=z_logvar,
reduction="batch",
)
if stage != sb.Stage.TRAIN:
# Evaluate speech quality/intelligibility
predict_wavs = predict_wavs.reshape(self.batch_current, -1)
clean_wavs = clean_wavs.reshape(self.batch_current, -1)
predict_wavs = predict_wavs[:, 0 : self.original_len]
clean_wavs = clean_wavs[:, 0 : self.original_len]
self.stoi_metric.append(
batch.id, predict_wavs, clean_wavs, lens, reduction="batch"
)
self.pesq_metric.append(
batch.id, predict=predict_wavs.cpu(), target=clean_wavs.cpu()
)
# Write enhanced test wavs to file
if stage == sb.Stage.TEST:
lens = lens * clean_wavs.shape[1]
for name, pred_wav, length in zip(batch.id, predict_wavs, lens):
name += ".wav"
enhance_path = os.path.join(
self.hparams.enhanced_folder, name
)
print(enhance_path)
pred_wav = pred_wav / torch.max(torch.abs(pred_wav)) * 0.99
torchaudio.save(
enhance_path,
pred_wav[: int(length)].cpu().unsqueeze(0),
hparams["sample_rate"],
)
return loss
def fit_batch(self, batch):
"""Fit one batch, override to do multiple updates.
The default implementation depends on a few methods being defined
with a particular behavior:
* ``compute_forward()``
* ``compute_objectives()``
Also depends on having optimizers passed at initialization.
Arguments
---------
batch : list of torch.Tensors
Batch of data to use for training. Default implementation assumes
this batch has two elements: inputs and targets.
Returns
-------
detached loss
"""
noisy_wavs, lens = batch.noisy_sig
clean_wavs, lens = batch.clean_sig
# split sentences in smaller chunks
noisy_wavs = create_chunks(
noisy_wavs,
chunk_size=hparams["chunk_size"],
chunk_stride=hparams["chunk_stride"],
)
clean_wavs = create_chunks(
clean_wavs,
chunk_size=hparams["chunk_size"],
chunk_stride=hparams["chunk_stride"],
)
# first of three step training process detailed in SEGAN paper
out_d1 = self.compute_forward_d(noisy_wavs, clean_wavs)
loss_d1 = self.compute_objectives_d1(out_d1, batch)
loss_d1.backward()
if self.check_gradients(loss_d1):
self.optimizer_d.step()
self.optimizer_d.zero_grad()
# second training step
z_mean = None
z_logvar = None
if self.modules["model_g"].latent_vae:
out_g2, z_mean, z_logvar = self.compute_forward_g(noisy_wavs)
else:
out_g2 = self.compute_forward_g(noisy_wavs)
out_d2 = self.compute_forward_d(out_g2, clean_wavs)
loss_d2 = self.compute_objectives_d2(out_d2, batch)
loss_d2.backward(retain_graph=True)
if self.check_gradients(loss_d2):
self.optimizer_d.step()
self.optimizer_d.zero_grad()
# third (last) training step
self.optimizer_g.zero_grad()
out_d3 = self.compute_forward_d(out_g2, clean_wavs)
loss_g3 = self.compute_objectives_g3(
out_d3,
out_g2,
clean_wavs,
batch,
sb.Stage.TRAIN,
z_mean=z_mean,
z_logvar=z_logvar,
)
loss_g3.backward()
if self.check_gradients(loss_g3):
self.optimizer_g.step()
self.optimizer_g.zero_grad()
self.optimizer_d.zero_grad()
loss_d1.detach().cpu()
loss_d2.detach().cpu()
loss_g3.detach().cpu()
return loss_d1 + loss_d2 + loss_g3
def evaluate_batch(self, batch, stage):
"""Evaluate one batch, override for different procedure than train.
The default implementation depends on two methods being defined
with a particular behavior:
* ``compute_forward()``
* ``compute_objectives()``
Arguments
---------
batch : list of torch.Tensors
Batch of data to use for evaluation. Default implementation assumes
this batch has two elements: inputs and targets.
stage : Stage
The stage of the experiment: Stage.VALID, Stage.TEST
Returns
-------
detached loss
"""
noisy_wavs, lens = batch.noisy_sig
clean_wavs, lens = batch.clean_sig
self.batch_current = clean_wavs.shape[0]
self.original_len = clean_wavs.shape[1]
# Add padding to make sure all the signal will be processed.
padding_elements = torch.zeros(
clean_wavs.shape[0], hparams["chunk_size"], device=clean_wavs.device
)
clean_wavs = torch.cat([clean_wavs, padding_elements], dim=1)
noisy_wavs = torch.cat([noisy_wavs, padding_elements], dim=1)
# Split sentences in smaller chunks
noisy_wavs = create_chunks(
noisy_wavs,
chunk_size=hparams["chunk_size"],
chunk_stride=hparams["chunk_size"],
)
clean_wavs = create_chunks(
clean_wavs,
chunk_size=hparams["chunk_size"],
chunk_stride=hparams["chunk_size"],
)
# Perform speech enhancement with the current model
out_d1 = self.compute_forward_d(noisy_wavs, clean_wavs)
loss_d1 = self.compute_objectives_d1(out_d1, batch)
z_mean = None
z_logvar = None
if self.modules["model_g"].latent_vae:
out_g2, z_mean, z_logvar = self.compute_forward_g(noisy_wavs)
else:
out_g2 = self.compute_forward_g(noisy_wavs)
out_d2 = self.compute_forward_d(out_g2, clean_wavs)
loss_d2 = self.compute_objectives_d2(out_d2, batch)
loss_g3 = self.compute_objectives_g3(
out_d2,
out_g2,
clean_wavs,
batch,
stage=stage,
z_mean=z_mean,
z_logvar=z_logvar,
)
loss_d1.detach().cpu()
loss_d2.detach().cpu()
loss_g3.detach().cpu()
return loss_d1 + loss_d2 + loss_g3
def init_optimizers(self):
"""Called during ``on_fit_start()``, initialize optimizers
after parameters are fully configured (e.g. DDP, jit).
The default implementation of this method depends on an optimizer
class being passed at initialization that takes only a list
of parameters (e.g., a lambda or a partial function definition).
This creates a single optimizer that optimizes all trainable params.
Override this class if there are multiple optimizers.
"""
if self.opt_class is not None:
self.optimizer_d = self.opt_class(
self.modules["model_d"].parameters()
)
self.optimizer_g = self.opt_class(
self.modules["model_g"].parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"optimizer_g", self.optimizer_g
)
self.checkpointer.add_recoverable(
"optimizer_d", self.optimizer_d
)
def zero_grad(self, set_to_none=False):
self.optimizer_d.zero_grad(set_to_none)
self.optimizer_g.zero_grad(set_to_none)
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch"""
self.loss_metric_d1 = MetricStats(
metric=self.hparams.compute_cost["d1"]
)
self.loss_metric_d2 = MetricStats(
metric=self.hparams.compute_cost["d2"]
)
self.loss_metric_g3 = MetricStats(
metric=self.hparams.compute_cost["g3"]
)
self.stoi_metric = MetricStats(metric=stoi_loss)
# Define function taking (prediction, target) for parallel eval
def pesq_eval(pred_wav, target_wav):
"""Computes the PESQ evaluation metric"""
return pesq(
fs=hparams["sample_rate"],
ref=target_wav.numpy().squeeze(),
deg=pred_wav.numpy().squeeze(),
mode="wb",
)
if stage != sb.Stage.TRAIN:
self.pesq_metric = MetricStats(
metric=pesq_eval, batch_eval=False, n_jobs=1
)
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = { # "loss": self.loss_metric.scores,
"loss_d1": self.loss_metric_d1.scores,
"loss_d2": self.loss_metric_d2.scores,
"loss_g3": self.loss_metric_g3.scores,
}
else:
stats = {
"loss": stage_loss,
"pesq": self.pesq_metric.summarize("average"),
"stoi": -self.stoi_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
if self.hparams.use_tensorboard:
valid_stats = {
# "loss": self.loss_metric.scores,
"loss_d1": self.loss_metric_d1.scores,
"loss_d2": self.loss_metric_d2.scores,
"loss_g3": self.loss_metric_g3.scores,
"stoi": self.stoi_metric.scores,
"pesq": self.pesq_metric.scores,
}
self.hparams.tensorboard_train_logger.log_stats(
{"Epoch": epoch}, self.train_stats, valid_stats
)
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"])
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def create_chunks(x, chunk_size=16384, chunk_stride=16384):
"""Splits the input into smaller chunks of size chunk_size with
an overlap chunk_stride. The chunks are concatenated over
the batch axis."""
x = x.unfold(1, chunk_size, chunk_stride)
x = x.reshape(x.shape[0] * x.shape[1], -1, 1)
return x
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# Define audio piplines
@sb.utils.data_pipeline.takes("noisy_wav")
@sb.utils.data_pipeline.provides("noisy_sig")
def noisy_pipeline(noisy_wav):
noisy_wav = sb.dataio.dataio.read_audio(noisy_wav)
return noisy_wav
@sb.utils.data_pipeline.takes("clean_wav")
@sb.utils.data_pipeline.provides("clean_sig")
def clean_pipeline(clean_wav):
clean_wav = sb.dataio.dataio.read_audio(clean_wav)
return clean_wav
# Define datasets
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[noisy_pipeline, clean_pipeline],
output_keys=["id", "noisy_sig", "clean_sig"],
)
# Sort train dataset
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending"
)
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
return datasets
def create_folder(folder):
"""Creates a new folder (where to store enhanced wavs)"""
if not os.path.isdir(folder):
os.makedirs(folder)
# Recipe begins!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Data preparation
from voicebank_prepare import prepare_voicebank # noqa
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["data_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects
datasets = dataio_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
# Create the folder to save enhanced files (+ support for DDP)
run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]})
se_brain = SEBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Load latest checkpoint to resume training
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Load best checkpoint for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key="pesq",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 17,802 | 33.568932 | 116 | py |
speechbrain | speechbrain-main/recipes/Voicebank/enhance/MetricGAN/train.py | #!/usr/bin/env/python3
"""
Recipe for training a speech enhancement system with the Voicebank dataset.
To run this recipe, do the following:
> python train.py hparams/{hyperparam_file}.yaml
Authors
* Szu-Wei Fu 2020
* Peter Plantinga 2021
"""
import os
import sys
import shutil
import pickle
import torch
import torchaudio
import speechbrain as sb
from pesq import pesq
from enum import Enum, auto
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.processing.features import spectral_magnitude
from speechbrain.nnet.loss.stoi_loss import stoi_loss
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler
def pesq_eval(pred_wav, target_wav):
"""Normalized PESQ (to 0-1)"""
return (
pesq(fs=16000, ref=target_wav.numpy(), deg=pred_wav.numpy(), mode="wb")
+ 0.5
) / 5
class SubStage(Enum):
"""For keeping track of training stage progress"""
GENERATOR = auto()
CURRENT = auto()
HISTORICAL = auto()
class MetricGanBrain(sb.Brain):
def load_history(self):
if os.path.isfile(self.hparams.historical_file):
with open(self.hparams.historical_file, "rb") as fp: # Unpickling
self.historical_set = pickle.load(fp)
def compute_feats(self, wavs):
"""Feature computation pipeline"""
feats = self.hparams.compute_STFT(wavs)
feats = spectral_magnitude(feats, power=0.5)
feats = torch.log1p(feats)
return feats
def compute_forward(self, batch, stage):
"Given an input batch computes the enhanced signal"
batch = batch.to(self.device)
if self.sub_stage == SubStage.HISTORICAL:
predict_wav, lens = batch.enh_sig
else:
noisy_wav, lens = batch.noisy_sig
noisy_spec = self.compute_feats(noisy_wav)
# mask with "signal approximation (SA)"
mask = self.modules.generator(noisy_spec, lengths=lens)
mask = mask.clamp(min=self.hparams.min_mask).squeeze(2)
predict_spec = torch.mul(mask, noisy_spec)
# Also return predicted wav
predict_wav = self.hparams.resynth(
torch.expm1(predict_spec), noisy_wav
)
return predict_wav
def compute_objectives(self, predictions, batch, stage, optim_name=""):
"Given the network predictions and targets compute the total loss"
predict_wav = predictions
predict_spec = self.compute_feats(predict_wav)
clean_wav, lens = batch.clean_sig
clean_spec = self.compute_feats(clean_wav)
ids = self.compute_ids(batch.id, optim_name)
# One is real, zero is fake
if optim_name == "generator":
target_score = torch.ones(self.batch_size, 1, device=self.device)
est_score = self.est_score(predict_spec, clean_spec)
self.mse_metric.append(
ids, predict_spec, clean_spec, lens, reduction="batch"
)
mse_cost = self.hparams.compute_cost(predict_spec, clean_spec, lens)
# D Learns to estimate the scores of clean speech
elif optim_name == "D_clean":
target_score = torch.ones(self.batch_size, 1, device=self.device)
est_score = self.est_score(clean_spec, clean_spec)
# D Learns to estimate the scores of enhanced speech
elif optim_name == "D_enh" and self.sub_stage == SubStage.CURRENT:
target_score = self.score(ids, predict_wav, clean_wav, lens)
est_score = self.est_score(predict_spec, clean_spec)
# Write enhanced wavs during discriminator training, because we
# compute the actual score here and we can save it
self.write_wavs(batch.id, ids, predict_wav, target_score, lens)
# D Relearns to estimate the scores of previous epochs
elif optim_name == "D_enh" and self.sub_stage == SubStage.HISTORICAL:
target_score = batch.score.unsqueeze(1).float()
est_score = self.est_score(predict_spec, clean_spec)
# D Learns to estimate the scores of noisy speech
elif optim_name == "D_noisy":
noisy_wav, _ = batch.noisy_sig
noisy_spec = self.compute_feats(noisy_wav)
target_score = self.score(ids, noisy_wav, clean_wav, lens)
est_score = self.est_score(noisy_spec, clean_spec)
# Save scores of noisy wavs
self.save_noisy_scores(ids, target_score)
if stage == sb.Stage.TRAIN:
# Compute the cost
cost = self.hparams.compute_cost(est_score, target_score)
if optim_name == "generator":
cost += self.hparams.mse_weight * mse_cost
self.metrics["G"].append(cost.detach())
else:
self.metrics["D"].append(cost.detach())
# On validation data compute scores
if stage != sb.Stage.TRAIN:
cost = self.hparams.compute_si_snr(predict_wav, clean_wav, lens)
# Evaluate speech quality/intelligibility
self.stoi_metric.append(
batch.id, predict_wav, clean_wav, lens, reduction="batch"
)
self.pesq_metric.append(
batch.id, predict=predict_wav, target=clean_wav, lengths=lens
)
# Write wavs to file, for evaluation
lens = lens * clean_wav.shape[1]
for name, pred_wav, length in zip(batch.id, predict_wav, lens):
name += ".wav"
enhance_path = os.path.join(self.hparams.enhanced_folder, name)
torchaudio.save(
enhance_path,
torch.unsqueeze(pred_wav[: int(length)].cpu(), 0),
16000,
)
return cost
def compute_ids(self, batch_id, optim_name):
"""Returns the list of ids, edited via optimizer name."""
if optim_name == "D_enh":
return [f"{uid}@{self.epoch}" for uid in batch_id]
return batch_id
def save_noisy_scores(self, batch_id, scores):
for i, score in zip(batch_id, scores):
self.noisy_scores[i] = score
def score(self, batch_id, deg_wav, ref_wav, lens):
"""Returns actual metric score, either pesq or stoi
Arguments
---------
batch_id : list of str
A list of the utterance ids for the batch
deg_wav : torch.Tensor
The degraded waveform to score
ref_wav : torch.Tensor
The reference waveform to use for scoring
length : torch.Tensor
The relative lengths of the utterances
"""
new_ids = [
i
for i, d in enumerate(batch_id)
if d not in self.historical_set and d not in self.noisy_scores
]
if len(new_ids) == 0:
pass
elif self.hparams.target_metric == "pesq":
self.target_metric.append(
ids=[batch_id[i] for i in new_ids],
predict=deg_wav[new_ids].detach(),
target=ref_wav[new_ids].detach(),
lengths=lens[new_ids],
)
score = torch.tensor(
[[s] for s in self.target_metric.scores], device=self.device,
)
elif self.hparams.target_metric == "stoi":
self.target_metric.append(
[batch_id[i] for i in new_ids],
deg_wav[new_ids],
ref_wav[new_ids],
lens[new_ids],
reduction="batch",
)
score = torch.tensor(
[[-s] for s in self.target_metric.scores], device=self.device,
)
else:
raise ValueError("Expected 'pesq' or 'stoi' for target_metric")
# Clear metric scores to prepare for next batch
self.target_metric.clear()
# Combine old scores and new
final_score = []
for i, d in enumerate(batch_id):
if d in self.historical_set:
final_score.append([self.historical_set[d]["score"]])
elif d in self.noisy_scores:
final_score.append([self.noisy_scores[d]])
else:
final_score.append([score[new_ids.index(i)]])
return torch.tensor(final_score, device=self.device)
def est_score(self, deg_spec, ref_spec):
"""Returns score as estimated by discriminator
Arguments
---------
deg_spec : torch.Tensor
The spectral features of the degraded utterance
ref_spec : torch.Tensor
The spectral features of the reference utterance
"""
combined_spec = torch.cat(
[deg_spec.unsqueeze(1), ref_spec.unsqueeze(1)], 1
)
return self.modules.discriminator(combined_spec)
def write_wavs(self, clean_id, batch_id, wavs, scores, lens):
"""Write wavs to files, for historical discriminator training
Arguments
---------
batch_id : list of str
A list of the utterance ids for the batch
wavs : torch.Tensor
The wavs to write to files
scores : torch.Tensor
The actual scores for the corresponding utterances
lens : torch.Tensor
The relative lengths of each utterance
"""
lens = lens * wavs.shape[1]
record = {}
for i, (cleanid, name, pred_wav, length) in enumerate(
zip(clean_id, batch_id, wavs, lens)
):
path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav")
data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0)
torchaudio.save(path, data, self.hparams.Sample_rate)
# Make record of path and score for historical training
score = float(scores[i][0])
clean_path = os.path.join(
self.hparams.train_clean_folder, cleanid + ".wav"
)
record[name] = {
"enh_wav": path,
"score": score,
"clean_wav": clean_path,
}
# Update records for historical training
self.historical_set.update(record)
with open(self.hparams.historical_file, "wb") as fp: # Pickling
pickle.dump(self.historical_set, fp)
def fit_batch(self, batch):
"Compute gradients and update either D or G based on sub-stage."
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss_tracker = 0
if self.sub_stage == SubStage.CURRENT:
for mode in ["clean", "enh", "noisy"]:
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, f"D_{mode}"
)
self.d_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.d_optimizer.step()
loss_tracker += loss.detach() / 3
elif self.sub_stage == SubStage.HISTORICAL:
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "D_enh"
)
self.d_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.d_optimizer.step()
loss_tracker += loss.detach()
elif self.sub_stage == SubStage.GENERATOR:
for name, param in self.modules.generator.named_parameters():
if "Learnable_sigmoid" in name:
param.data = torch.clamp(
param, max=3.5
) # to prevent gradient goes to infinity
param.data[param != param] = 3.5 # set 'nan' to 3.5
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "generator"
)
self.g_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.g_optimizer.step()
loss_tracker += loss.detach()
return loss_tracker
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch
This method calls ``fit()`` again to train the discriminator
before proceeding with generator training.
"""
self.mse_metric = MetricStats(metric=self.hparams.compute_cost)
self.metrics = {"G": [], "D": []}
if stage == sb.Stage.TRAIN:
if self.hparams.target_metric == "pesq":
self.target_metric = MetricStats(
metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False
)
elif self.hparams.target_metric == "stoi":
self.target_metric = MetricStats(metric=stoi_loss)
else:
raise NotImplementedError(
"Right now we only support 'pesq' and 'stoi'"
)
# Train discriminator before we start generator training
if self.sub_stage == SubStage.GENERATOR:
self.epoch = epoch
self.train_discriminator()
self.sub_stage = SubStage.GENERATOR
print("Generator training by current data...")
if stage != sb.Stage.TRAIN:
self.pesq_metric = MetricStats(
metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False
)
self.stoi_metric = MetricStats(metric=stoi_loss)
def train_discriminator(self):
"""A total of 3 data passes to update discriminator."""
# First, iterate train subset w/ updates for clean, enh, noisy
print("Discriminator training by current data...")
self.sub_stage = SubStage.CURRENT
self.fit(
range(1),
self.train_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
# Next, iterate historical subset w/ updates for enh
if self.historical_set:
print("Discriminator training by historical data...")
self.sub_stage = SubStage.HISTORICAL
self.fit(
range(1),
self.historical_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
# Finally, iterate train set again. Should iterate same
# samples as before, due to ReproducibleRandomSampler
print("Discriminator training by current data again...")
self.sub_stage = SubStage.CURRENT
self.fit(
range(1),
self.train_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
def on_stage_end(self, stage, stage_loss, epoch=None):
"Called at the end of each stage to summarize progress"
if self.sub_stage != SubStage.GENERATOR:
return
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
g_loss = torch.tensor(self.metrics["G"]) # batch_size
d_loss = torch.tensor(self.metrics["D"]) # batch_size
print("Avg G loss: %.3f" % torch.mean(g_loss))
print("Avg D loss: %.3f" % torch.mean(d_loss))
else:
stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
if self.hparams.use_tensorboard:
valid_stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
}
self.hparams.tensorboard_train_logger.log_stats(valid_stats)
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
self.checkpointer.save_and_keep_only(
meta=stats, max_keys=[self.hparams.target_metric]
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def make_dataloader(
self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs
):
"Override dataloader to insert custom sampler/dataset"
if stage == sb.Stage.TRAIN:
# Create a new dataset each time, this set grows
if self.sub_stage == SubStage.HISTORICAL:
dataset = sb.dataio.dataset.DynamicItemDataset(
data=dataset,
dynamic_items=[enh_pipeline],
output_keys=["id", "enh_sig", "clean_sig", "score"],
)
samples = round(len(dataset) * self.hparams.history_portion)
else:
samples = self.hparams.number_of_samples
# This sampler should give the same samples for D and G
epoch = self.hparams.epoch_counter.current
# Equal weights for all samples, we use "Weighted" so we can do
# both "replacement=False" and a set number of samples, reproducibly
weights = torch.ones(len(dataset))
sampler = ReproducibleWeightedRandomSampler(
weights, epoch=epoch, replacement=False, num_samples=samples
)
loader_kwargs["sampler"] = sampler
if self.sub_stage == SubStage.GENERATOR:
self.train_sampler = sampler
# Make the dataloader as normal
return super().make_dataloader(
dataset, stage, ckpt_prefix, **loader_kwargs
)
def on_fit_start(self):
"Override to prevent this from running for D training"
if self.sub_stage == SubStage.GENERATOR:
super().on_fit_start()
def init_optimizers(self):
"Initializes the generator and discriminator optimizers"
self.g_optimizer = self.hparams.g_opt_class(
self.modules.generator.parameters()
)
self.d_optimizer = self.hparams.d_opt_class(
self.modules.discriminator.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("g_opt", self.g_optimizer)
self.checkpointer.add_recoverable("d_opt", self.d_optimizer)
def zero_grad(self, set_to_none=False):
self.g_optimizer.zero_grad(set_to_none)
self.d_optimizer.zero_grad(set_to_none)
# Define audio piplines
@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav")
@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig")
def audio_pipeline(noisy_wav, clean_wav):
yield sb.dataio.dataio.read_audio(noisy_wav)
yield sb.dataio.dataio.read_audio(clean_wav)
# For historical data
@sb.utils.data_pipeline.takes("enh_wav", "clean_wav")
@sb.utils.data_pipeline.provides("enh_sig", "clean_sig")
def enh_pipeline(enh_wav, clean_wav):
yield sb.dataio.dataio.read_audio(enh_wav)
yield sb.dataio.dataio.read_audio(clean_wav)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class."""
# Define datasets
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline],
output_keys=["id", "noisy_sig", "clean_sig"],
)
return datasets
def create_folder(folder):
if not os.path.isdir(folder):
os.makedirs(folder)
# Recipe begins!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Data preparation
from voicebank_prepare import prepare_voicebank # noqa
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["data_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects
datasets = dataio_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
# Create the folder to save enhanced files (+ support for DDP)
run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]})
se_brain = MetricGanBrain(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
se_brain.train_set = datasets["train"]
se_brain.historical_set = {}
se_brain.noisy_scores = {}
se_brain.batch_size = hparams["dataloader_options"]["batch_size"]
se_brain.sub_stage = SubStage.GENERATOR
if not os.path.isfile(hparams["historical_file"]):
shutil.rmtree(hparams["MetricGAN_folder"])
run_on_main(create_folder, kwargs={"folder": hparams["MetricGAN_folder"]})
se_brain.load_history()
# Load latest checkpoint to resume training
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["valid_dataloader_options"],
)
# Load best checkpoint for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key=hparams["target_metric"],
test_loader_kwargs=hparams["dataloader_options"],
)
| 22,525 | 35.687296 | 80 | py |
speechbrain | speechbrain-main/recipes/Voicebank/enhance/spectral_mask/train.py | #!/usr/bin/env/python3
"""Recipe for training a speech enhancement system with the Voicebank dataset.
To run this recipe, do the following:
> python train.py hparams/{hyperparam_file}.yaml
Authors
* Szu-Wei Fu 2020
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from pesq import pesq
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.processing.features import spectral_magnitude
from speechbrain.nnet.loss.stoi_loss import stoi_loss
from speechbrain.utils.distributed import run_on_main
# Brain class for speech enhancement training
class SEBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the enhanced output."""
batch = batch.to(self.device)
noisy_wavs, lens = batch.noisy_sig
noisy_feats = self.compute_feats(noisy_wavs)
# mask with "signal approximation (SA)"
mask = self.modules.model(noisy_feats)
mask = torch.squeeze(mask, 2)
predict_spec = torch.mul(mask, noisy_feats)
# Also return predicted wav
predict_wav = self.hparams.resynth(
torch.expm1(predict_spec), noisy_wavs
)
return predict_spec, predict_wav
def compute_feats(self, wavs):
"""Feature computation pipeline"""
feats = self.hparams.compute_STFT(wavs)
feats = spectral_magnitude(feats, power=0.5)
feats = torch.log1p(feats)
return feats
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs"""
predict_spec, predict_wav = predictions
clean_wavs, lens = batch.clean_sig
if getattr(self.hparams, "waveform_target", False):
loss = self.hparams.compute_cost(predict_wav, clean_wavs, lens)
self.loss_metric.append(
batch.id, predict_wav, clean_wavs, lens, reduction="batch"
)
else:
clean_spec = self.compute_feats(clean_wavs)
loss = self.hparams.compute_cost(predict_spec, clean_spec, lens)
self.loss_metric.append(
batch.id, predict_spec, clean_spec, lens, reduction="batch"
)
if stage != sb.Stage.TRAIN:
# Evaluate speech quality/intelligibility
self.stoi_metric.append(
batch.id, predict_wav, clean_wavs, lens, reduction="batch"
)
self.pesq_metric.append(
batch.id, predict=predict_wav, target=clean_wavs, lengths=lens
)
# Write wavs to file
if stage == sb.Stage.TEST:
lens = lens * clean_wavs.shape[1]
for name, pred_wav, length in zip(batch.id, predict_wav, lens):
name += ".wav"
enhance_path = os.path.join(
self.hparams.enhanced_folder, name
)
torchaudio.save(
enhance_path,
torch.unsqueeze(pred_wav[: int(length)].cpu(), 0),
16000,
)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch"""
self.loss_metric = MetricStats(metric=self.hparams.compute_cost)
self.stoi_metric = MetricStats(metric=stoi_loss)
# Define function taking (prediction, target) for parallel eval
def pesq_eval(pred_wav, target_wav):
"""Computes the PESQ evaluation metric"""
return pesq(
fs=16000,
ref=target_wav.numpy(),
deg=pred_wav.numpy(),
mode="wb",
)
if stage != sb.Stage.TRAIN:
self.pesq_metric = MetricStats(
metric=pesq_eval, n_jobs=1, batch_eval=False
)
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {"loss": self.loss_metric.scores}
else:
stats = {
"loss": stage_loss,
"pesq": self.pesq_metric.summarize("average"),
"stoi": -self.stoi_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
if self.hparams.use_tensorboard:
valid_stats = {
"loss": self.loss_metric.scores,
"stoi": self.stoi_metric.scores,
"pesq": self.pesq_metric.scores,
}
self.hparams.tensorboard_train_logger.log_stats(
{"Epoch": epoch}, self.train_stats, valid_stats
)
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"])
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# Define audio pipelines
@sb.utils.data_pipeline.takes("noisy_wav")
@sb.utils.data_pipeline.provides("noisy_sig")
def noisy_pipeline(noisy_wav):
return sb.dataio.dataio.read_audio(noisy_wav)
@sb.utils.data_pipeline.takes("clean_wav")
@sb.utils.data_pipeline.provides("clean_sig")
def clean_pipeline(clean_wav):
return sb.dataio.dataio.read_audio(clean_wav)
# Define datasets
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[noisy_pipeline, clean_pipeline],
output_keys=["id", "noisy_sig", "clean_sig"],
)
# Sort train dataset
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending"
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
return datasets
def create_folder(folder):
if not os.path.isdir(folder):
os.makedirs(folder)
# Recipe begins!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Data preparation
from voicebank_prepare import prepare_voicebank # noqa
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects
datasets = dataio_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
# Create the folder to save enhanced files (+ support for DDP)
run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]})
se_brain = SEBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Load latest checkpoint to resume training
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load best checkpoint for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key="pesq",
test_loader_kwargs=hparams["dataloader_options"],
)
| 9,102 | 33.481061 | 84 | py |
speechbrain | speechbrain-main/recipes/Voicebank/enhance/waveform_map/train.py | #!/usr/bin/env/python3
"""Recipe for training a waveform-based speech enhancement
system with the Voicebank dataset.
To run this recipe, do the following:
> python train.py hparams/{hyperparam_file}.yaml
Authors
* Szu-Wei Fu 2020
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from pesq import pesq
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.nnet.loss.stoi_loss import stoi_loss
from speechbrain.utils.distributed import run_on_main
# Brain class for speech enhancement training
class SEBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the enhanced output"""
batch = batch.to(self.device)
noisy_wavs, lens = batch.noisy_sig
noisy_wavs = torch.unsqueeze(noisy_wavs, -1)
predict_wavs = self.modules.model(noisy_wavs)[:, :, 0]
return predict_wavs
def compute_objectives(self, predict_wavs, batch, stage):
"""Computes the loss given the predicted and targeted outputs"""
clean_wavs, lens = batch.clean_sig
loss = self.hparams.compute_cost(predict_wavs, clean_wavs, lens)
self.loss_metric.append(
batch.id, predict_wavs, clean_wavs, lens, reduction="batch"
)
if stage != sb.Stage.TRAIN:
# Evaluate speech quality/intelligibility
self.stoi_metric.append(
batch.id, predict_wavs, clean_wavs, lens, reduction="batch"
)
self.pesq_metric.append(
batch.id, predict=predict_wavs, target=clean_wavs, lengths=lens
)
# Write wavs to file
if stage == sb.Stage.TEST:
lens = lens * clean_wavs.shape[1]
for name, pred_wav, length in zip(batch.id, predict_wavs, lens):
name += ".wav"
enhance_path = os.path.join(
self.hparams.enhanced_folder, name
)
pred_wav = pred_wav / torch.max(torch.abs(pred_wav)) * 0.99
torchaudio.save(
enhance_path,
torch.unsqueeze(pred_wav[: int(length)].cpu(), 0),
16000,
)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch"""
self.loss_metric = MetricStats(metric=self.hparams.compute_cost)
self.stoi_metric = MetricStats(metric=stoi_loss)
# Define function taking (prediction, target) for parallel eval
def pesq_eval(pred_wav, target_wav):
"""Computes the PESQ evaluation metric"""
return pesq(
fs=16000,
ref=target_wav.numpy(),
deg=pred_wav.numpy(),
mode="wb",
)
if stage != sb.Stage.TRAIN:
self.pesq_metric = MetricStats(
metric=pesq_eval, n_jobs=1, batch_eval=False
)
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {"loss": self.loss_metric.scores}
else:
stats = {
"loss": stage_loss,
"pesq": self.pesq_metric.summarize("average"),
"stoi": -self.stoi_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
if self.hparams.use_tensorboard:
valid_stats = {
"loss": self.loss_metric.scores,
"stoi": self.stoi_metric.scores,
"pesq": self.pesq_metric.scores,
}
self.hparams.tensorboard_train_logger.log_stats(
{"Epoch": epoch}, self.train_stats, valid_stats
)
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"])
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# Define audio pipelines
@sb.utils.data_pipeline.takes("noisy_wav")
@sb.utils.data_pipeline.provides("noisy_sig")
def noisy_pipeline(noisy_wav):
return sb.dataio.dataio.read_audio(noisy_wav)
@sb.utils.data_pipeline.takes("clean_wav")
@sb.utils.data_pipeline.provides("clean_sig")
def clean_pipeline(clean_wav):
return sb.dataio.dataio.read_audio(clean_wav)
# Define datasets
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[noisy_pipeline, clean_pipeline],
output_keys=["id", "noisy_sig", "clean_sig"],
)
# Sort train dataset
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending"
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
return datasets
def create_folder(folder):
if not os.path.isdir(folder):
os.makedirs(folder)
# Recipe begins!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Data preparation
from voicebank_prepare import prepare_voicebank # noqa
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects
datasets = dataio_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
# Create the folder to save enhanced files (+ support for DDP)
run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]})
se_brain = SEBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Load latest checkpoint to resume training
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load best checkpoint for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key="pesq",
test_loader_kwargs=hparams["dataloader_options"],
)
| 8,228 | 33.145228 | 83 | py |
speechbrain | speechbrain-main/recipes/Voicebank/enhance/MetricGAN-U/voicebank_prepare.py | # -*- coding: utf-8 -*-
"""
Data preparation.
Download and resample, use ``download_vctk`` below.
https://datashare.is.ed.ac.uk/handle/10283/2791
Authors:
* Szu-Wei Fu, 2020
* Peter Plantinga, 2020
"""
import os
import json
import string
import urllib
import shutil
import logging
import tempfile
import torchaudio
from torchaudio.transforms import Resample
from speechbrain.utils.data_utils import get_all_files, download_file
from speechbrain.dataio.dataio import read_audio
logger = logging.getLogger(__name__)
LEXICON_URL = "http://www.openslr.org/resources/11/librispeech-lexicon.txt"
TRAIN_JSON = "train.json"
TEST_JSON = "test.json"
VALID_JSON = "valid.json"
SAMPLERATE = 16000
TRAIN_SPEAKERS = [
"p226",
"p287",
"p227",
"p228",
"p230",
"p231",
"p233",
"p236",
"p239",
"p243",
"p244",
"p250",
"p254",
"p256",
"p258",
"p259",
"p267",
"p268",
"p269",
"p270",
"p273",
"p274",
"p276",
"p277",
"p278",
"p279",
"p282",
"p286",
]
# Lexicon missing entries
MISSING_LEXICON = {
"CRUCIALLY": "K R UW SH AH L IY",
"PAEDOPHILES": "P EH D OW F AY L S",
"MR": "M IH S T ER",
"BBC": "B IY B IY S IY",
"EUPHORIC": "Y UW F AO R IH K",
"RACISM": "R EY S IH S M",
"MP": "EH M P IY",
"RESTRUCTURING": "R IY S T R AH K CH ER IH NG",
"OSAMA": "OW S AH M AH",
"GUITARIST": "G IH T AA R IH S T",
"BLUESHE": "B L UW SH IY",
"FLANKER": "F L AY N K ER",
"SADDAM": "S AA D AA M",
"COVERUP": "K UH V ER UH P",
"FBI": "EH F B IY AY",
"PREEMPTIVE": "P R IY EH M P T IH V",
"FOURYEAR": "F AO R Y IY R",
"XRAY": "EH K S R AY",
"TALIBAN": "T AE L IH B AA N",
"SUPERIMPOSITION": "S UW P ER IH M P OW S IH SH AH N",
"GUIDELINES": "G AY D L AY N S",
"FINALISED": "F AY N AH L AY Z D",
"HALFTIME": "H AE F T AY M",
"WINGERS": "W IH NG ER Z",
"GM": "J IY EH M",
"MCGREGOR": "M AH K G R EH G AO R",
"TWODAY": "T UW D EY",
"DATABASE": "D EY T AH B EY S",
"TELECOM": "T EH L AH K AO M",
"SHORTTERM": "SH AO R T ER M",
"SHORTFALL": "SH AO R T F AH L",
"MCCALL": "M AH K AH L",
"HEADTEACHER": "H EH D T IY CH ER",
"TAKEOVER": "T EY K OW V ER",
"ONETHIRD": "W AH N TH ER D",
"TV": "T IY V IY",
"SCREENPLAY": "S K R IY N P L EY",
"YUGOSLAV": "Y UW G OW S L AA V",
"HIBS": "HH IH B Z",
"DISPOSALS": "D IH S P OW S AH L Z",
"MODERNISATION": "M AA D ER N AH Z EY SH AH N",
"REALLIFE": "R IY L AY F",
"ONEYEAR": "W AH N Y IY R",
"GRASSROOTS": "G R AE S R UW T S",
"ARNIE": "AH R N IY",
"PARTTIME": "P AH R T AY M",
"SHORTLIST": "SH AO R T L IH S T",
"OUTPERFORMED": "OW T P ER F AO R M D",
"LONGTERM": "L AO NG T ER M",
"DAYTODAY": "D EY T UW D EY",
"MCPHERSON": "M AH K F ER S AH N",
"OUTSOURCING": "OW T S AO R S IH NG",
"FULLSCALE": "F UH L S K EY L",
"SERGIO": "S ER J IY OW",
"HENMAN": "HH EH N M AA N",
"MCLEOD": "M AH K L IY AO D",
"TIMESCALE": "T AY M S K EY L",
"REFURBISHMENT": "R IY F UH R B IH SH M AH N T",
"LINEUP": "L AY N UH P",
"DOWNBEAT": "D OW N B IY T",
"MANDELA": "M AE N D EH L AH",
"UNDERAGE": "UH N D ER EY J",
"MCNAUGHTON": "M AH K N AW T AH N",
"MICKELSON": "M IH K L S AH N",
"THREEQUARTERS": "TH R IY K AO R T ER Z",
"WEBSITE": "W EH B S AY T",
"BLUEITS": "B L UW IH T S",
"CEASEFIRE": "S IY S F AY R",
"FULLTIME": "F UH L T AY M",
"DOCHERTY": "D AH K ER T IY",
"RUNNERUP": "R UH N ER AH P",
"DOWNTURN": "D OW N T ER N",
"EUROS": "Y ER OW S",
"FOOTANDMOUTH": "F UH T AE N D M OW TH",
"HIGHLIGHTED": "HH AY L AY T AH D",
"MIDFIELD": "M IH D F IY L D",
"MCKENZIE": "M AH K EH N Z IY",
"BENCHMARK": "B EH N CH M AA R K",
"MCCONNELL": "M AH K AW N EH L",
"UPGRADING": "UH P G R EY D IH NG",
"BLUNKETT": "B L UH N K AH T",
"RETHINK": "R IY TH IH N K",
"UPBEAT": "AH P B IY T",
"TELECOMS": "T EH L AH K AO M Z",
"APARTHEID": "AH P AH R T HH AY D",
"AIRDRIE": "EY R D R IY",
"RETHINK": "R IY TH IH N K",
"HELPLINE": "HH EH L P L AY N",
"CLEARCUT": "K L IY R K UH T",
}
def prepare_voicebank(
data_folder, save_folder, valid_speaker_count=2, skip_prep=False
):
"""
Prepares the json files for the Voicebank dataset.
Expects the data folder to be the same format as the output of
``download_vctk()`` below.
Arguments
---------
data_folder : str
Path to the folder where the original Voicebank dataset is stored.
save_folder : str
The directory where to store the json files.
valid_speaker_count : int
The number of validation speakers to use (out of 28 in train set).
skip_prep: bool
If True, skip data preparation.
Example
-------
>>> data_folder = '/path/to/datasets/Voicebank'
>>> save_folder = 'exp/Voicebank_exp'
>>> prepare_voicebank(data_folder, save_folder)
"""
if skip_prep:
return
# Setting ouput files
save_json_train = os.path.join(save_folder, TRAIN_JSON)
save_json_valid = os.path.join(save_folder, VALID_JSON)
save_json_test = os.path.join(save_folder, TEST_JSON)
# Check if this phase is already done (if so, skip it)
if skip(save_json_train, save_json_test, save_json_valid):
logger.info("Preparation completed in previous run, skipping.")
return
train_clean_folder = os.path.join(
data_folder, "clean_trainset_28spk_wav_16k"
)
train_noisy_folder = os.path.join(
data_folder, "noisy_trainset_28spk_wav_16k"
)
train_txts = os.path.join(data_folder, "trainset_28spk_txt")
test_clean_folder = os.path.join(data_folder, "clean_testset_wav_16k")
test_noisy_folder = os.path.join(data_folder, "noisy_testset_wav_16k")
test_txts = os.path.join(data_folder, "testset_txt")
# Setting the save folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Additional checks to make sure the data folder contains Voicebank
check_voicebank_folders(
train_clean_folder,
train_noisy_folder,
train_txts,
test_clean_folder,
test_noisy_folder,
test_txts,
)
logger.debug("Creating lexicon...")
lexicon = create_lexicon(os.path.join(data_folder, "lexicon.txt"))
logger.info("Creating json files for noisy VoiceBank...")
logger.debug("Collecting files...")
extension = [".wav"]
valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count]
wav_lst_train = get_all_files(
train_noisy_folder, match_and=extension, exclude_or=valid_speakers,
)
wav_lst_valid = get_all_files(
train_noisy_folder, match_and=extension, match_or=valid_speakers,
)
wav_lst_test = get_all_files(test_noisy_folder, match_and=extension)
logger.debug("Creating json files for noisy VoiceBank...")
create_json(
wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon
)
create_json(
wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon
)
create_json(
wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon
)
def skip(*filenames):
"""
Detects if the Voicebank data_preparation has been already done.
If the preparation has been done, we can skip it.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
for filename in filenames:
if not os.path.isfile(filename):
return False
return True
def remove_punctuation(a_string):
"""Remove all punctuation from string"""
return a_string.translate(str.maketrans("", "", string.punctuation))
def create_lexicon(lexicon_save_filepath):
"""
Creates the lexicon object, downloading if it hasn't been done yet.
Arguments
---------
lexicon_save_filepath : str
Path to save the lexicon when downloading
"""
if not os.path.isfile(lexicon_save_filepath):
download_file(LEXICON_URL, lexicon_save_filepath)
# Iterate lexicon file and add the first pronunciation in the file for
# each word to our lexicon dictionary
lexicon = MISSING_LEXICON
delayed_words = {}
for line in open(lexicon_save_filepath):
line = line.split()
phns = " ".join(p.strip("012") for p in line[1:])
# Don't add words with punctuation until we can be sure they won't
# overwrite words without punctuation.
clean_word = remove_punctuation(line[0])
if clean_word != line[0] and clean_word not in delayed_words:
delayed_words[clean_word] = phns
elif clean_word == line[0] and clean_word not in lexicon:
lexicon[clean_word] = phns
# Add words with punctuation if they won't overwrite non-punctuated words
for word, phns in delayed_words.items():
if word not in lexicon:
lexicon[word] = phns
return lexicon
def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon):
"""
Creates the json file given a list of wav files.
Arguments
---------
wav_lst : list
The list of wav files.
json_file : str
The path of the output json file
clean_folder : str
The location of parallel clean samples.
txt_folder : str
The location of the transcript files.
"""
logger.debug(f"Creating json lists in {json_file}")
# Processing all the wav files in the list
json_dict = {}
for wav_file in wav_lst: # ex:p203_122.wav
# Example wav_file: p232_001.wav
noisy_path, filename = os.path.split(wav_file)
_, noisy_dir = os.path.split(noisy_path)
_, clean_dir = os.path.split(clean_folder)
noisy_rel_path = os.path.join("{data_root}", noisy_dir, filename)
clean_rel_path = os.path.join("{data_root}", clean_dir, filename)
# Reading the signal (to retrieve duration in seconds)
signal = read_audio(wav_file)
duration = signal.shape[0] / SAMPLERATE
# Read text
snt_id = filename.replace(".wav", "")
with open(os.path.join(txt_folder, snt_id + ".txt")) as f:
word_string = f.read()
word_string = remove_punctuation(word_string).strip().upper()
phones = [
phn for word in word_string.split() for phn in lexicon[word].split()
]
# Remove duplicate phones
phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j]
phone_string = " ".join(phones)
json_dict[snt_id] = {
"noisy_wav": noisy_rel_path,
"clean_wav": clean_rel_path,
"length": duration,
"words": word_string,
"phones": phone_string,
}
# Writing the json lines
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, indent=2)
logger.info(f"{json_file} successfully created!")
def check_voicebank_folders(*folders):
"""Raises FileNotFoundError if any passed folder does not exist."""
for folder in folders:
if not os.path.exists(folder):
raise FileNotFoundError(
f"the folder {folder} does not exist (it is expected in "
"the Voicebank dataset)"
)
def download_vctk(destination, tmp_dir=None, device="cpu"):
"""Download dataset and perform resample to 16000 Hz.
Arguments
---------
destination : str
Place to put final zipped dataset.
tmp_dir : str
Location to store temporary files. Will use `tempfile` if not provided.
device : str
Passed directly to pytorch's ``.to()`` method. Used for resampling.
"""
dataset_name = "noisy-vctk-16k"
if tmp_dir is None:
tmp_dir = tempfile.gettempdir()
final_dir = os.path.join(tmp_dir, dataset_name)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
if not os.path.isdir(final_dir):
os.mkdir(final_dir)
prefix = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/2791/"
noisy_vctk_urls = [
prefix + "clean_testset_wav.zip",
prefix + "noisy_testset_wav.zip",
prefix + "testset_txt.zip",
prefix + "clean_trainset_28spk_wav.zip",
prefix + "noisy_trainset_28spk_wav.zip",
prefix + "trainset_28spk_txt.zip",
]
zip_files = []
for url in noisy_vctk_urls:
filename = os.path.join(tmp_dir, url.split("/")[-1])
zip_files.append(filename)
if not os.path.isfile(filename):
logger.info("Downloading " + url)
with urllib.request.urlopen(url) as response:
with open(filename, "wb") as tmp_file:
logger.info("... to " + tmp_file.name)
shutil.copyfileobj(response, tmp_file)
# Unzip
for zip_file in zip_files:
logger.info("Unzipping " + zip_file)
shutil.unpack_archive(zip_file, tmp_dir, "zip")
os.remove(zip_file)
# Move transcripts to final dir
shutil.move(os.path.join(tmp_dir, "testset_txt"), final_dir)
shutil.move(os.path.join(tmp_dir, "trainset_28spk_txt"), final_dir)
# Downsample
dirs = [
"noisy_testset_wav",
"clean_testset_wav",
"noisy_trainset_28spk_wav",
"clean_trainset_28spk_wav",
]
downsampler = Resample(orig_freq=48000, new_freq=16000)
for directory in dirs:
logger.info("Resampling " + directory)
dirname = os.path.join(tmp_dir, directory)
# Make directory to store downsampled files
dirname_16k = os.path.join(final_dir, directory + "_16k")
if not os.path.isdir(dirname_16k):
os.mkdir(dirname_16k)
# Load files and downsample
for filename in get_all_files(dirname, match_and=[".wav"]):
signal, rate = torchaudio.load(filename)
downsampled_signal = downsampler(signal.view(1, -1).to(device))
# Save downsampled file
torchaudio.save(
os.path.join(dirname_16k, filename[-12:]),
downsampled_signal[0].cpu(),
sample_rate=16000,
channels_first=False,
)
# Remove old file
os.remove(filename)
# Remove old directory
os.rmdir(dirname)
logger.info("Zipping " + final_dir)
final_zip = shutil.make_archive(
base_name=final_dir,
format="zip",
root_dir=os.path.dirname(final_dir),
base_dir=os.path.basename(final_dir),
)
logger.info(f"Moving {final_zip} to {destination}")
shutil.move(final_zip, os.path.join(destination, dataset_name + ".zip"))
| 14,812 | 30.054507 | 80 | py |
speechbrain | speechbrain-main/recipes/Voicebank/enhance/MetricGAN-U/train.py | #!/usr/bin/env/python3
"""
Recipe for training MetricGAN-U (Unsupervised) with the Voicebank dataset.
To run this recipe, do the following:
> python train.py hparams/{hyperparam_file}.yaml
Authors
* Szu-Wei Fu 2021/09
"""
import os
import sys
import shutil
import torch
import torchaudio
import speechbrain as sb
import numpy as np
import json
import pickle
import requests
import time
from urllib.parse import urlparse, urljoin
from srmrpy import srmr
from pesq import pesq
from enum import Enum, auto
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.processing.features import spectral_magnitude
from speechbrain.nnet.loss.stoi_loss import stoi_loss
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler
### For DNSMSOS
# URL for the web service
SCORING_URI = "https://dnsmos-4.azurewebsites.net/score"
# If the service is authenticated, set the key or token
AUTH_KEY = ""
if AUTH_KEY == "":
print(
"To access DNSMOS, you have to ask the key from the DNS organizer: dns_challenge@microsoft.com"
)
# Set the content type
headers = {"Content-Type": "application/json"}
# If authentication is enabled, set the authorization header
headers["Authorization"] = f"Basic {AUTH_KEY }"
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
def pesq_eval(predict, target):
"""Normalized PESQ (to 0-1)"""
return (
pesq(fs=16000, ref=target.numpy(), deg=predict.numpy(), mode="wb") + 0.5
) / 5
def srmrpy_eval(predict, target):
""" Note target is not used in the srmr function !!!
Normalize the score to 0~1 for training.
"""
return float(
sigmoid(
0.1
* srmr(
predict.numpy(),
fs=16000,
n_cochlear_filters=23,
low_freq=125,
min_cf=4,
max_cf=128,
fast=True,
norm=False,
)[0]
)
)
def srmrpy_eval_valid(predict, target):
""" Note target is not used in the srmr function !!!
Show the unnormalized score for valid and test set.
"""
return float(
srmr(
predict.numpy(),
fs=16000,
n_cochlear_filters=23,
low_freq=125,
min_cf=4,
max_cf=128,
fast=True,
norm=False,
)[0]
)
def dnsmos_eval(predict, target):
""" Note target is not used in the dnsmos function !!!
Normalize the score to 0~1 for training.
"""
pred_wav = predict
pred_wav = pred_wav.numpy()
pred_wav = pred_wav / max(abs(pred_wav))
data = {"data": pred_wav.tolist()}
input_data = json.dumps(data)
while True:
try:
u = urlparse(SCORING_URI)
resp = requests.post(
urljoin("https://" + u.netloc, "score"),
data=input_data,
headers=headers,
)
score_dict = resp.json()
score = float(
sigmoid(score_dict["mos"])
) # normalize the score to 0~1
break
except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service.
print(e)
time.sleep(10) # wait for 10 secs
return score
def dnsmos_eval_valid(predict, target):
""" Note target is not used in the dnsmos function !!!
Show the unnormalized score for valid and test set.
"""
pred_wav = predict
pred_wav = pred_wav.numpy()
pred_wav = pred_wav / max(abs(pred_wav))
data = {"data": pred_wav.tolist()}
input_data = json.dumps(data)
while True:
try:
u = urlparse(SCORING_URI)
resp = requests.post(
urljoin("https://" + u.netloc, "score"),
data=input_data,
headers=headers,
)
score_dict = resp.json()
score = float(score_dict["mos"])
break
except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service.
print(e)
time.sleep(10) # wait for 10 secs
return score
class SubStage(Enum):
"""For keeping track of training stage progress"""
GENERATOR = auto()
CURRENT = auto()
HISTORICAL = auto()
class MetricGanBrain(sb.Brain):
def load_history(self):
if os.path.isfile(self.hparams.historical_file):
with open(self.hparams.historical_file, "rb") as fp: # Unpickling
self.historical_set = pickle.load(fp)
def compute_feats(self, wavs):
"""Feature computation pipeline"""
feats = self.hparams.compute_STFT(wavs)
spec = spectral_magnitude(feats, power=0.5)
return spec
def compute_forward(self, batch, stage):
"Given an input batch computes the enhanced signal"
batch = batch.to(self.device)
if self.sub_stage == SubStage.HISTORICAL:
predict_wav, lens = batch.enh_sig
return predict_wav
else:
noisy_wav, lens = batch.noisy_sig
noisy_spec = self.compute_feats(noisy_wav)
mask = self.modules.generator(noisy_spec, lengths=lens)
mask = mask.clamp(min=self.hparams.min_mask).squeeze(2)
predict_spec = torch.mul(mask, noisy_spec)
# Also return predicted wav
predict_wav = self.hparams.resynth(predict_spec, noisy_wav)
return predict_wav, mask
def compute_objectives(self, predictions, batch, stage, optim_name=""):
"Given the network predictions and targets compute the total loss"
if self.sub_stage == SubStage.HISTORICAL:
predict_wav = predictions
else:
predict_wav, mask = predictions
predict_spec = self.compute_feats(predict_wav)
ids = self.compute_ids(batch.id, optim_name)
if self.sub_stage != SubStage.HISTORICAL:
noisy_wav, lens = batch.noisy_sig
if optim_name == "generator":
est_score = self.est_score(predict_spec)
target_score = self.hparams.target_score * torch.ones(
self.batch_size, 1, device=self.device
)
noisy_wav, lens = batch.noisy_sig
noisy_spec = self.compute_feats(noisy_wav)
mse_cost = self.hparams.compute_cost(predict_spec, noisy_spec, lens)
# D Learns to estimate the scores of enhanced speech
elif optim_name == "D_enh" and self.sub_stage == SubStage.CURRENT:
target_score = self.score(
ids, predict_wav, predict_wav, lens
) # no clean_wav is needed
est_score = self.est_score(predict_spec)
# Write enhanced wavs during discriminator training, because we
# compute the actual score here and we can save it
self.write_wavs(ids, predict_wav, target_score, lens)
# D Relearns to estimate the scores of previous epochs
elif optim_name == "D_enh" and self.sub_stage == SubStage.HISTORICAL:
target_score = batch.score.unsqueeze(1).float()
est_score = self.est_score(predict_spec)
# D Learns to estimate the scores of noisy speech
elif optim_name == "D_noisy":
noisy_spec = self.compute_feats(noisy_wav)
target_score = self.score(
ids, noisy_wav, noisy_wav, lens
) # no clean_wav is needed
est_score = self.est_score(noisy_spec)
# Save scores of noisy wavs
self.save_noisy_scores(ids, target_score)
if stage == sb.Stage.TRAIN:
# Compute the cost
cost = self.hparams.compute_cost(est_score, target_score)
if optim_name == "generator":
cost += self.hparams.mse_weight * mse_cost
self.metrics["G"].append(cost.detach())
else:
self.metrics["D"].append(cost.detach())
# Compute scores on validation data
if stage != sb.Stage.TRAIN:
clean_wav, lens = batch.clean_sig
cost = self.hparams.compute_si_snr(predict_wav, clean_wav, lens)
# Evaluate speech quality/intelligibility
self.stoi_metric.append(
batch.id, predict_wav, clean_wav, lens, reduction="batch"
)
self.pesq_metric.append(
batch.id, predict=predict_wav, target=clean_wav, lengths=lens
)
if (
self.hparams.calculate_dnsmos_on_validation_set
): # Note: very time consuming........
self.dnsmos_metric.append(
batch.id,
predict=predict_wav,
target=predict_wav,
lengths=lens, # no clean_wav is needed
)
# Write wavs to file, for evaluation
lens = lens * clean_wav.shape[1]
for name, pred_wav, length in zip(batch.id, predict_wav, lens):
name += ".wav"
enhance_path = os.path.join(self.hparams.enhanced_folder, name)
torchaudio.save(
enhance_path,
torch.unsqueeze(pred_wav[: int(length)].cpu(), 0),
16000,
)
return cost
def compute_ids(self, batch_id, optim_name):
"""Returns the list of ids, edited via optimizer name."""
if optim_name == "D_enh":
return [f"{uid}@{self.epoch}" for uid in batch_id]
return batch_id
def save_noisy_scores(self, batch_id, scores):
for i, score in zip(batch_id, scores):
self.noisy_scores[i] = score
def score(self, batch_id, deg_wav, ref_wav, lens):
"""Returns actual metric score, either pesq or stoi
Arguments
---------
batch_id : list of str
A list of the utterance ids for the batch
deg_wav : torch.Tensor
The degraded waveform to score
ref_wav : torch.Tensor
The reference waveform to use for scoring
length : torch.Tensor
The relative lengths of the utterances
"""
new_ids = [
i
for i, d in enumerate(batch_id)
if d not in self.historical_set and d not in self.noisy_scores
]
if len(new_ids) == 0:
pass
elif self.hparams.target_metric == "srmr" or "dnsmos":
self.target_metric.append(
ids=[batch_id[i] for i in new_ids],
predict=deg_wav[new_ids].detach(),
target=ref_wav[
new_ids
].detach(), # target is not used in the function !!!
lengths=lens[new_ids],
)
score = torch.tensor(
[[s] for s in self.target_metric.scores], device=self.device,
)
else:
raise ValueError("Expected 'srmr' or 'dnsmos' for target_metric")
# Clear metric scores to prepare for next batch
self.target_metric.clear()
# Combine old scores and new
final_score = []
for i, d in enumerate(batch_id):
if d in self.historical_set:
final_score.append([self.historical_set[d]["score"]])
elif d in self.noisy_scores:
final_score.append([self.noisy_scores[d]])
else:
final_score.append([score[new_ids.index(i)]])
return torch.tensor(final_score, device=self.device)
def est_score(self, deg_spec):
"""Returns score as estimated by discriminator
Arguments
---------
deg_spec : torch.Tensor
The spectral features of the degraded utterance
ref_spec : torch.Tensor
The spectral features of the reference utterance
"""
"""
combined_spec = torch.cat(
[deg_spec.unsqueeze(1), ref_spec.unsqueeze(1)], 1
)
"""
return self.modules.discriminator(deg_spec.unsqueeze(1))
def write_wavs(self, batch_id, wavs, score, lens):
"""Write wavs to files, for historical discriminator training
Arguments
---------
batch_id : list of str
A list of the utterance ids for the batch
wavs : torch.Tensor
The wavs to write to files
score : torch.Tensor
The actual scores for the corresponding utterances
lens : torch.Tensor
The relative lengths of each utterance
"""
lens = lens * wavs.shape[1]
record = {}
for i, (name, pred_wav, length) in enumerate(zip(batch_id, wavs, lens)):
path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav")
data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0)
torchaudio.save(path, data, self.hparams.Sample_rate)
# Make record of path and score for historical training
score = float(score[i][0])
record[name] = {
"enh_wav": path,
"score": score,
}
# Update records for historical training
self.historical_set.update(record)
with open(self.hparams.historical_file, "wb") as fp: # Pickling
pickle.dump(self.historical_set, fp)
def fit_batch(self, batch):
"Compute gradients and update either D or G based on sub-stage."
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss_tracker = 0
if self.sub_stage == SubStage.CURRENT:
for mode in ["enh", "noisy"]:
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, f"D_{mode}"
)
self.d_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.d_optimizer.step()
loss_tracker += loss.detach() / 3
elif self.sub_stage == SubStage.HISTORICAL:
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "D_enh"
)
self.d_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.d_optimizer.step()
loss_tracker += loss.detach()
elif self.sub_stage == SubStage.GENERATOR:
for name, param in self.modules.generator.named_parameters():
if "Learnable_sigmoid" in name:
param.data = torch.clamp(
param, max=3.5
) # to prevent gradient goes to infinity
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "generator"
)
self.g_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.g_optimizer.step()
loss_tracker += loss.detach()
return loss_tracker
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch
This method calls ``fit()`` again to train the discriminator
before proceeding with generator training.
"""
self.metrics = {"G": [], "D": []}
if stage == sb.Stage.TRAIN:
if self.hparams.target_metric == "srmr":
self.target_metric = MetricStats(
metric=srmrpy_eval,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
elif self.hparams.target_metric == "dnsmos":
self.target_metric = MetricStats(
metric=dnsmos_eval,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
else:
raise NotImplementedError(
"Right now we only support 'srmr' and 'dnsmos'"
)
# Train discriminator before we start generator training
if self.sub_stage == SubStage.GENERATOR:
self.epoch = epoch
self.train_discriminator()
self.sub_stage = SubStage.GENERATOR
print("Generator training by current data...")
if stage != sb.Stage.TRAIN:
self.pesq_metric = MetricStats(
metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False
)
self.stoi_metric = MetricStats(metric=stoi_loss)
self.srmr_metric = MetricStats(
metric=srmrpy_eval_valid,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
self.dnsmos_metric = MetricStats(
metric=dnsmos_eval_valid,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
def train_discriminator(self):
"""A total of 3 data passes to update discriminator."""
# First, iterate train subset w/ updates for enh, noisy
print("Discriminator training by current data...")
self.sub_stage = SubStage.CURRENT
self.fit(
range(1),
self.train_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
# Next, iterate historical subset w/ updates for enh
if self.historical_set:
print("Discriminator training by historical data...")
self.sub_stage = SubStage.HISTORICAL
self.fit(
range(1),
self.historical_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
# Finally, iterate train set again. Should iterate same
# samples as before, due to ReproducibleRandomSampler
print("Discriminator training by current data again...")
self.sub_stage = SubStage.CURRENT
self.fit(
range(1),
self.train_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
def on_stage_end(self, stage, stage_loss, epoch=None):
"Called at the end of each stage to summarize progress"
if self.sub_stage != SubStage.GENERATOR:
return
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
g_loss = torch.tensor(self.metrics["G"]) # batch_size
d_loss = torch.tensor(self.metrics["D"]) # batch_size
print("Avg G loss: %.3f" % torch.mean(g_loss))
print("Avg D loss: %.3f" % torch.mean(d_loss))
else:
if self.hparams.calculate_dnsmos_on_validation_set:
stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
"dnsmos": self.dnsmos_metric.summarize("average"),
}
else:
stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(5.0 - stats["pesq"])
sb.nnet.schedulers.update_learning_rate(self.g_optimizer, new_lr)
if self.hparams.use_tensorboard:
if (
self.hparams.calculate_dnsmos_on_validation_set
): # Note: very time consuming........
valid_stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
"dnsmos": self.dnsmos_metric.summarize("average"),
}
else:
valid_stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
}
self.hparams.tensorboard_train_logger.log_stats(
{"lr": old_lr}, valid_stats
)
self.hparams.train_logger.log_stats(
{"Epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"])
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def make_dataloader(
self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs
):
"Override dataloader to insert custom sampler/dataset"
if stage == sb.Stage.TRAIN:
# Create a new dataset each time, this set grows
if self.sub_stage == SubStage.HISTORICAL:
dataset = sb.dataio.dataset.DynamicItemDataset(
data=dataset,
dynamic_items=[enh_pipeline],
output_keys=["id", "enh_sig", "score"],
)
samples = round(len(dataset) * self.hparams.history_portion)
else:
samples = self.hparams.number_of_samples
# This sampler should give the same samples for D and G
epoch = self.hparams.epoch_counter.current
# Equal weights for all samples, we use "Weighted" so we can do
# both "replacement=False" and a set number of samples, reproducibly
weights = torch.ones(len(dataset))
sampler = ReproducibleWeightedRandomSampler(
weights, epoch=epoch, replacement=False, num_samples=samples
)
loader_kwargs["sampler"] = sampler
if self.sub_stage == SubStage.GENERATOR:
self.train_sampler = sampler
# Make the dataloader as normal
return super().make_dataloader(
dataset, stage, ckpt_prefix, **loader_kwargs
)
def on_fit_start(self):
"Override to prevent this from running for D training"
if self.sub_stage == SubStage.GENERATOR:
super().on_fit_start()
def init_optimizers(self):
"Initializes the generator and discriminator optimizers"
self.g_optimizer = self.hparams.g_opt_class(
self.modules.generator.parameters()
)
self.d_optimizer = self.hparams.d_opt_class(
self.modules.discriminator.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("g_opt", self.g_optimizer)
self.checkpointer.add_recoverable("d_opt", self.d_optimizer)
def zero_grad(self, set_to_none=False):
self.g_optimizer.zero_grad(set_to_none)
self.d_optimizer.zero_grad(set_to_none)
# Define audio piplines for training set
@sb.utils.data_pipeline.takes("noisy_wav")
@sb.utils.data_pipeline.provides("noisy_sig")
def audio_pipeline_train(noisy_wav):
yield sb.dataio.dataio.read_audio(noisy_wav)
# Define audio piplines for validation/test set
@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav")
@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig")
def audio_pipeline_valid(noisy_wav, clean_wav):
yield sb.dataio.dataio.read_audio(noisy_wav)
yield sb.dataio.dataio.read_audio(clean_wav)
# For historical data
@sb.utils.data_pipeline.takes("enh_wav")
@sb.utils.data_pipeline.provides("enh_sig")
def enh_pipeline(enh_wav):
yield sb.dataio.dataio.read_audio(enh_wav)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class."""
# Define datasets
datasets = {}
datasets["train"] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline_train],
output_keys=["id", "noisy_sig"],
)
datasets["valid"] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline_valid],
output_keys=["id", "noisy_sig", "clean_sig"],
)
datasets["test"] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline_valid],
output_keys=["id", "noisy_sig", "clean_sig"],
)
return datasets
def create_folder(folder):
if not os.path.isdir(folder):
os.makedirs(folder)
# Recipe begins!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Data preparation
from voicebank_prepare import prepare_voicebank # noqa
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["data_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects
datasets = dataio_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
# Create the folder to save enhanced files (+ support for DDP)
run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]})
se_brain = MetricGanBrain(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
se_brain.train_set = datasets["train"]
se_brain.historical_set = {}
se_brain.noisy_scores = {}
se_brain.batch_size = hparams["dataloader_options"]["batch_size"]
se_brain.sub_stage = SubStage.GENERATOR
if not os.path.isfile(hparams["historical_file"]):
shutil.rmtree(hparams["MetricGAN_folder"])
run_on_main(create_folder, kwargs={"folder": hparams["MetricGAN_folder"]})
se_brain.load_history()
# Load latest checkpoint to resume training
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["valid_dataloader_options"],
)
# Load best checkpoint for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key="pesq",
test_loader_kwargs=hparams["dataloader_options"],
)
| 27,522 | 34.331194 | 103 | py |
speechbrain | speechbrain-main/recipes/Voicebank/MTL/ASR_enhance/train.py | #!/usr/bin/env python3
"""Recipe for multi-task learning, using seq2seq and enhancement objectives.
To run this recipe, do the following:
> python train.py hparams/{config file} --data_folder /path/to/noisy-vctk
There's three provided files for three stages of training:
> python train.py hparams/pretrain_perceptual.yaml
> python train.py hparams/enhance_mimic.yaml
> python train.py hparams/robust_asr.yaml
Use your own hyperparameter file or the provided files.
The different losses can be turned on and off, and pre-trained models
can be used for enhancement or ASR models.
Authors
* Peter Plantinga 2020, 2021
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from pesq import pesq
from pystoi import stoi
from composite_eval import eval_composite
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
def pesq_eval(pred_wav, target_wav):
return pesq(
fs=16000, ref=target_wav.numpy(), deg=pred_wav.numpy(), mode="wb",
)
def estoi_eval(pred_wav, target_wav):
return stoi(
x=target_wav.numpy(), y=pred_wav.numpy(), fs_sig=16000, extended=False
)
def composite_eval(pred_wav, target_wav):
return eval_composite(target_wav.numpy(), pred_wav.numpy())
class CompositeStats(sb.utils.metric_stats.MetricStats):
def summarize(self, field=None):
self.summary = {
"csig": sum([s["csig"] for s in self.scores]) / len(self.scores),
"cbak": sum([s["cbak"] for s in self.scores]) / len(self.scores),
"covl": sum([s["covl"] for s in self.scores]) / len(self.scores),
}
if field is not None:
return self.summary[field]
else:
return self.summary
def write_stats(self, filestream):
if not self.summary:
self.summarize()
filestream.write(str(self.summary) + "\n")
# Define training procedure
class MTLbrain(sb.Brain):
def compute_forward(self, batch, stage):
"""The forward pass computes enhanced feats and targets"""
batch = batch.to(self.device)
self.stage = stage
predictions = {}
if self.hparams.enhance_type is not None:
noisy_wavs, lens = self.prepare_wavs(batch.noisy_sig)
# Mask with "signal approximation (SA)"
if self.hparams.enhance_type == "masking":
(
predictions["wavs"],
predictions["feats"],
) = self.modules.enhance_model(noisy_wavs)
elif self.hparams.enhance_type == "noisy":
predictions["wavs"] = noisy_wavs
elif self.hparams.enhance_type == "clean":
predictions["wavs"], _ = self.prepare_wavs(batch.clean_sig)
# Generate clean features for ASR pre-training
if self.hparams.ctc_type == "clean" or self.hparams.seq_type == "clean":
clean_wavs, lens = self.prepare_wavs(batch.clean_sig)
clean_feats = self.prepare_feats(clean_wavs)
# Compute seq outputs
if self.hparams.seq_type is not None:
# Prepare target inputs
tokens, token_lens = self.prepare_targets(batch.tokens_bos)
tokens = self.modules.tgt_embedding(tokens)
if self.hparams.seq_type == "clean":
if hasattr(self.hparams, "perceptual_fbank"):
clean_feats = self.hparams.fbank(clean_feats)
embed = self.modules.src_embedding(clean_feats)
if self.hparams.seq_type == "joint":
asr_feats = predictions["wavs"]
if stage == sb.Stage.TRAIN:
asr_feats = self.hparams.augment(asr_feats, lens)
asr_feats = self.hparams.fbank(asr_feats)
asr_feats = self.hparams.normalizer(asr_feats, lens)
embed = self.modules.src_embedding(asr_feats)
dec_out = self.modules.recognizer(tokens, embed, lens)
out = self.modules.seq_output(dec_out[0])
predictions["seq_pout"] = torch.log_softmax(out, dim=-1)
if self.hparams.ctc_type is not None:
out = self.modules.ctc_output(embed)
predictions["ctc_pout"] = torch.log_softmax(out, dim=-1)
if stage != sb.Stage.TRAIN:
predictions["hyps"], _ = self.hparams.beam_searcher(
embed.detach(), lens
)
elif self.hparams.ctc_type is not None:
if self.hparams.ctc_type == "clean":
embed = self.modules.src_embedding(clean_feats)
elif self.hparams.ctc_type == "joint":
enh_feats = self.hparams.spectral_magnitude(
predictions["feats"]
)
enh_feats = torch.log1p(enh_feats)
embed = self.modules.src_embedding(enh_feats)
out = self.modules.ctc_output(embed)
predictions["ctc_pout"] = torch.log_softmax(out, dim=-1)
return predictions
def prepare_wavs(self, signal, augment=True):
"""Prepare possibly enhanced waveforms"""
wavs, wav_lens = signal
if self.stage == sb.Stage.TRAIN and hasattr(self.hparams, "env_corr"):
if augment:
wavs_noise = self.hparams.env_corr(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
else:
wavs = torch.cat([wavs, wavs], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
return wavs, wav_lens
def prepare_feats(self, wavs):
"""Prepare log-magnitude spectral features expected by perceptual model"""
stft = self.hparams.compute_stft(wavs)
feats = self.hparams.spectral_magnitude(stft)
feats = torch.log1p(feats)
return feats
def prepare_targets(self, tokens):
"""Prepare target by concatenating self if "env_corr" is used"""
tokens, token_lens = tokens
if self.stage == sb.Stage.TRAIN and hasattr(self.hparams, "env_corr"):
tokens = torch.cat([tokens, tokens], dim=0)
token_lens = torch.cat([token_lens, token_lens])
return tokens, token_lens
def compute_objectives(self, predictions, batch, stage):
"""Compute possibly several loss terms: enhance, mimic, ctc, seq"""
# Do not augment targets
clean_wavs, lens = self.prepare_wavs(batch.clean_sig, augment=False)
loss = 0
# Compute enhancement loss
if self.hparams.enhance_weight > 0:
clean_stft = self.modules.enhance_model.stft(clean_wavs)
clean_feats = self.modules.enhance_model.extract_feats(clean_stft)
enhance_loss = self.hparams.enhance_loss(
predictions["feats"], clean_feats, lens
)
loss += self.hparams.enhance_weight * enhance_loss
if stage != sb.Stage.TRAIN:
self.enh_metrics.append(
batch.id, predictions["feats"], clean_feats, lens
)
self.stoi_metrics.append(
ids=batch.id,
predict=predictions["wavs"],
target=clean_wavs,
lengths=lens,
)
self.pesq_metrics.append(
ids=batch.id,
predict=predictions["wavs"],
target=clean_wavs,
lengths=lens,
)
if stage == sb.Stage.TEST:
self.composite_metrics.append(
ids=batch.id,
predict=predictions["wavs"],
target=clean_wavs,
lengths=lens,
)
if hasattr(self.hparams, "enh_dir"):
abs_lens = lens * predictions["wavs"].size(1)
for i, uid in enumerate(batch.id):
length = int(abs_lens[i])
wav = predictions["wavs"][i, :length].unsqueeze(0)
path = os.path.join(self.hparams.enh_dir, uid + ".wav")
torchaudio.save(path, wav.cpu(), sample_rate=16000)
# Compute mimic loss
if self.hparams.mimic_weight > 0:
enhance_mag = predictions["feats"]
if hasattr(self.hparams, "perceptual_fbank"):
enhance_mag = self.hparams.perceptual_fbank(enhance_mag)
clean_feats = self.hparams.perceptual_fbank(clean_feats)
clean_embed = self.modules.src_embedding.CNN(clean_feats)
enh_embed = self.modules.src_embedding.CNN(enhance_mag)
mimic_loss = self.hparams.mimic_loss(enh_embed, clean_embed, lens)
loss += self.hparams.mimic_weight * mimic_loss
if stage != sb.Stage.TRAIN:
self.mimic_metrics.append(
batch.id, enh_embed, clean_embed, lens
)
# Compute hard ASR loss
if self.hparams.ctc_weight > 0 and (
not hasattr(self.hparams, "ctc_epochs")
or self.hparams.epoch_counter.current < self.hparams.ctc_epochs
):
tokens, token_lens = self.prepare_targets(batch.tokens)
ctc_loss = sb.nnet.losses.ctc_loss(
predictions["ctc_pout"],
tokens,
lens,
token_lens,
self.hparams.blank_index,
)
loss += self.hparams.ctc_weight * ctc_loss
if stage != sb.Stage.TRAIN and self.hparams.seq_weight == 0:
predict = sb.decoders.ctc_greedy_decode(
predictions["ctc_pout"],
lens,
blank_id=self.hparams.blank_index,
)
self.err_rate_metrics.append(
ids=batch.id,
predict=predict,
target=tokens,
target_len=token_lens,
ind2lab=self.token_encoder.decode_ndim,
)
# Compute nll loss for seq2seq model
if self.hparams.seq_weight > 0:
tokens, token_lens = self.prepare_targets(batch.tokens_eos)
seq_loss = self.hparams.seq_loss(
predictions["seq_pout"], tokens, token_lens
)
loss += self.hparams.seq_weight * seq_loss
if stage != sb.Stage.TRAIN:
if hasattr(self.hparams, "tokenizer"):
pred_words = [
self.token_encoder.decode_ids(token_seq)
for token_seq in predictions["hyps"]
]
target_words = [
self.token_encoder.decode_ids(token_seq)
for token_seq in undo_padding(*batch.tokens)
]
self.err_rate_metrics.append(
batch.id, pred_words, target_words
)
else:
self.err_rate_metrics.append(
ids=batch.id,
predict=predictions["hyps"],
target=tokens,
target_len=token_lens,
ind2lab=self.token_encoder.decode_ndim,
)
return loss
def on_stage_start(self, stage, epoch):
if stage != sb.Stage.TRAIN:
if self.hparams.enhance_weight > 0:
self.enh_metrics = self.hparams.enhance_stats()
self.stoi_metrics = self.hparams.estoi_stats()
self.pesq_metrics = self.hparams.pesq_stats()
self.composite_metrics = self.hparams.composite_stats()
if self.hparams.mimic_weight > 0:
self.mimic_metrics = self.hparams.mimic_stats()
if self.hparams.ctc_weight > 0 or self.hparams.seq_weight > 0:
self.err_rate_metrics = self.hparams.err_rate_stats()
# Freeze models before training
else:
for model in self.hparams.frozen_models:
if (
hasattr(self.hparams, "unfreeze_epoch")
and epoch >= self.hparams.unfreeze_epoch
and (
not hasattr(self.hparams, "unfrozen_models")
or model in self.hparams.unfrozen_models
)
):
self.modules[model].train()
for p in self.modules[model].parameters():
p.requires_grad = True
else:
self.modules[model].eval()
for p in self.modules[model].parameters():
p.requires_grad = False
def on_stage_end(self, stage, stage_loss, epoch):
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
stage_stats = {"loss": stage_loss}
max_keys = []
min_keys = []
if self.hparams.enhance_weight > 0:
stage_stats["enhance"] = self.enh_metrics.summarize("average")
stage_stats["stoi"] = self.stoi_metrics.summarize("average")
stage_stats["pesq"] = self.pesq_metrics.summarize("average")
max_keys.extend(["pesq", "stoi"])
if stage == sb.Stage.TEST:
stage_stats["csig"] = self.composite_metrics.summarize(
"csig"
)
stage_stats["cbak"] = self.composite_metrics.summarize(
"cbak"
)
stage_stats["covl"] = self.composite_metrics.summarize(
"covl"
)
max_keys.extend(["csig", "cbak", "covl"])
if self.hparams.mimic_weight > 0:
stage_stats["mimic"] = self.mimic_metrics.summarize("average")
min_keys.append("mimic")
if self.hparams.ctc_weight > 0 or self.hparams.seq_weight > 0:
err_rate = self.err_rate_metrics.summarize("error_rate")
err_rate_type = self.hparams.target_type + "ER"
stage_stats[err_rate_type] = err_rate
min_keys.append(err_rate_type)
if stage == sb.Stage.VALID:
stats_meta = {"epoch": epoch}
if hasattr(self.hparams, "lr_annealing"):
old_lr, new_lr = self.hparams.lr_annealing(epoch - 1)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
stats_meta["lr"] = old_lr
self.hparams.train_logger.log_stats(
stats_meta=stats_meta,
train_stats={"loss": self.train_loss},
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta=stage_stats,
max_keys=max_keys,
min_keys=min_keys,
num_to_keep=self.hparams.checkpoint_avg,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.stats_file + ".txt", "w") as w:
if self.hparams.enhance_weight > 0:
w.write("\nstoi stats:\n")
self.stoi_metrics.write_stats(w)
w.write("\npesq stats:\n")
self.pesq_metrics.write_stats(w)
w.write("\ncomposite stats:\n")
self.composite_metrics.write_stats(w)
if self.hparams.mimic_weight > 0:
w.write("\nmimic stats:\n")
self.mimic_metrics.write_stats(w)
if self.hparams.seq_weight > 0:
self.err_rate_metrics.write_stats(w)
print("stats written to ", self.hparams.stats_file)
def on_evaluate_start(self, max_key=None, min_key=None):
self.checkpointer.recover_if_possible(max_key=max_key, min_key=min_key)
checkpoints = self.checkpointer.find_checkpoints(
max_key=max_key,
min_key=min_key,
max_num_checkpoints=self.hparams.checkpoint_avg,
)
for model in self.modules:
if (
model not in self.hparams.frozen_models
or hasattr(self.hparams, "unfrozen_models")
and model in self.hparams.unfrozen_models
):
model_state_dict = sb.utils.checkpoints.average_checkpoints(
checkpoints, model
)
self.modules[model].load_state_dict(model_state_dict)
def dataio_prep(hparams, token_encoder):
"""Creates the datasets and their data processing pipelines"""
# Define pipelines
@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav")
@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig")
def audio_pipeline(noisy_wav, clean_wav):
yield sb.dataio.dataio.read_audio(noisy_wav)
yield sb.dataio.dataio.read_audio(clean_wav)
token_keys = ["tokens_bos", "tokens_eos", "tokens"]
@sb.utils.data_pipeline.takes(hparams["target_type"])
@sb.utils.data_pipeline.provides("tokens_list", *[t for t in token_keys])
def target_pipeline(target):
if "tokenizer" in hparams:
tokens_list = token_encoder.encode_as_ids(target)
yield tokens_list
else:
tokens_list = target.strip().split()
yield tokens_list
tokens_list = token_encoder.encode_sequence(tokens_list)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
# Create datasets
data = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
data[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, target_pipeline],
output_keys=["id", "noisy_sig", "clean_sig"] + token_keys,
)
if dataset != "train":
data[dataset] = data[dataset].filtered_sorted(sort_key="length")
# Sort train dataset and ensure it doesn't get un-sorted
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
data["train"] = data["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending",
)
hparams["train_loader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
# Update token_encoder
if "tokenizer" not in hparams:
token_encoder.insert_blank()
token_encoder.update_from_didataset(
data["train"], output_key="tokens_list"
)
return data
# Begin Recipe!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Prepare data
from voicebank_prepare import prepare_voicebank # noqa E402
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["data_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Load pretrained models
for model in ["asr", "enhance", "perceptual"]:
pretrained = f"{model}_pretrained"
if pretrained in hparams:
# We download the model from HuggingFace (by default).
run_on_main(hparams[pretrained].collect_files)
hparams[pretrained].load_collected()
# Switch encoder based on task
if "tokenizer" in hparams:
token_encoder = hparams["tokenizer"]
else:
token_encoder = sb.dataio.encoder.CTCTextEncoder()
datasets = dataio_prep(hparams, token_encoder)
# Initialize trainer
mtl_brain = MTLbrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
run_opts=run_opts,
hparams=hparams,
checkpointer=hparams["checkpointer"],
)
mtl_brain.token_encoder = token_encoder
# Fit dataset
mtl_brain.fit(
epoch_counter=mtl_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["train_loader_options"],
valid_loader_kwargs=hparams["valid_loader_options"],
)
# Evaluate best checkpoint, using lowest or highest value on validation
outdir = mtl_brain.hparams.output_folder
for dset in ["valid", "test"]:
mtl_brain.hparams.stats_file = os.path.join(outdir, f"{dset}_stats")
mtl_brain.evaluate(
datasets[dset],
max_key=hparams["eval_max_key"],
min_key=hparams["eval_min_key"],
test_loader_kwargs=hparams["test_loader_options"],
)
| 21,897 | 37.826241 | 82 | py |
speechbrain | speechbrain-main/recipes/Voicebank/ASR/CTC/train.py | # /usr/bin/env python3
"""Recipe for doing ASR with phoneme targets and CTC loss on Voicebank
To run this recipe, do the following:
> python train.py hparams/{hyperparameter file} --data_folder /path/to/noisy-vctk
Use your own hyperparameter file or the provided `hyperparams.yaml`
To use noisy inputs, change `input_type` field from `clean_wav` to `noisy_wav`.
To use pretrained model, enter the path in `pretrained` field.
Authors
* Peter Plantinga 2020
"""
import os
import sys
import torch
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
# Define training procedure
class ASR_Brain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the phoneme probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs = self.modules.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
out = self.modules.model(feats)
out = self.modules.output(out)
pout = self.hparams.log_softmax(out)
return pout, wav_lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CTC loss."
pout, pout_lens = predictions
phns, phn_lens = batch.phn_encoded
loss = self.hparams.compute_cost(pout, phns, pout_lens, phn_lens)
self.ctc_metrics.append(batch.id, pout, phns, pout_lens, phn_lens)
if stage != sb.Stage.TRAIN:
sequence = sb.decoders.ctc_greedy_decode(
pout, pout_lens, blank_id=self.hparams.blank_index
)
self.per_metrics.append(
ids=batch.id,
predict=sequence,
target=phns,
target_len=phn_lens,
ind2lab=self.label_encoder.decode_ndim,
)
return loss
def on_stage_start(self, stage, epoch):
"Gets called when a stage (either training, validation, test) starts."
self.ctc_metrics = self.hparams.ctc_stats()
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
per = self.per_metrics.summarize("error_rate")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(per)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={"loss": stage_loss, "PER": per},
)
self.checkpointer.save_and_keep_only(
meta={"PER": per}, min_keys=["PER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "PER": per},
)
with open(self.hparams.per_file, "w") as w:
w.write("CTC loss stats:\n")
self.ctc_metrics.write_stats(w)
w.write("\nPER stats:\n")
self.per_metrics.write_stats(w)
print("CTC and PER stats written to ", self.hparams.per_file)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 1. Define audio pipeline:
@sb.utils.data_pipeline.takes(hparams["input_type"])
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
# 2. Define text pipeline:
@sb.utils.data_pipeline.takes("phones")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phones):
phn_list = phones.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
# 3. Create datasets
data = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
data[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, text_pipeline],
output_keys=["id", "sig", "phn_encoded"],
)
# Sort train dataset and ensure it doesn't get un-sorted
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
data["train"] = data["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending",
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
# 4. Fit encoder:
# Load or compute the label encoder (with multi-gpu dpp support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[data["train"]],
output_key="phn_list",
special_labels={"blank_label": hparams["blank_index"]},
sequence_input=True,
)
return data, label_encoder
# Begin Recipe!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Prepare data on one process
from voicebank_prepare import prepare_voicebank # noqa E402
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
datasets, label_encoder = dataio_prep(hparams)
# Load pretrained model
if "pretrained" in hparams:
state_dict = torch.load(hparams["pretrained"])
hparams["modules"]["model"].load_state_dict(state_dict)
asr_brain = ASR_Brain(
modules=hparams["modules"],
run_opts=run_opts,
opt_class=hparams["opt_class"],
hparams=hparams,
checkpointer=hparams["checkpointer"],
)
asr_brain.label_encoder = label_encoder
# Fit the data
asr_brain.fit(
epoch_counter=asr_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Test the checkpoint that does best on validation data (lowest PER)
asr_brain.evaluate(
datasets["test"],
min_key="PER",
test_loader_kwargs=hparams["dataloader_options"],
)
| 7,651 | 33.781818 | 81 | py |
speechbrain | speechbrain-main/recipes/Voicebank/dereverb/spectral_mask/train.py | #!/usr/bin/env/python3
"""Recipe for training a speech enhancement system with the Voicebank dataset.
To run this recipe, do the following:
> python train.py hparams/{hyperparam_file}.yaml
Authors
* Szu-Wei Fu 2020
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from pesq import pesq
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.processing.features import spectral_magnitude
from speechbrain.nnet.loss.stoi_loss import stoi_loss
from speechbrain.utils.distributed import run_on_main
# Brain class for speech enhancement training
class SEBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the enhanced output."""
batch = batch.to(self.device)
noisy_wavs, lens = batch.noisy_sig
noisy_feats = self.compute_feats(noisy_wavs)
# mask with "signal approximation (SA)"
mask = self.modules.generator(noisy_feats, lengths=lens)
mask = torch.squeeze(mask, 2)
predict_spec = torch.mul(mask, noisy_feats)
# Also return predicted wav
predict_wav = self.hparams.resynth(
torch.expm1(predict_spec), noisy_wavs
)
return predict_spec, predict_wav
def compute_feats(self, wavs):
"""Feature computation pipeline"""
feats = self.hparams.compute_STFT(wavs)
feats = spectral_magnitude(feats, power=0.5)
feats = torch.log1p(feats)
return feats
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs"""
predict_spec, predict_wav = predictions
clean_wavs, lens = batch.clean_sig
if getattr(self.hparams, "waveform_target", False):
loss = self.hparams.compute_cost(predict_wav, clean_wavs, lens)
self.loss_metric.append(
batch.id, predict_wav, clean_wavs, lens, reduction="batch"
)
else:
clean_spec = self.compute_feats(clean_wavs)
loss = self.hparams.compute_cost(predict_spec, clean_spec, lens)
self.loss_metric.append(
batch.id, predict_spec, clean_spec, lens, reduction="batch"
)
if stage != sb.Stage.TRAIN:
# Evaluate speech quality/intelligibility
self.stoi_metric.append(
batch.id, predict_wav, clean_wavs, lens, reduction="batch"
)
self.pesq_metric.append(
batch.id, predict=predict_wav, target=clean_wavs, lengths=lens
)
# Write wavs to file
if stage == sb.Stage.TEST:
lens = lens * clean_wavs.shape[1]
for name, pred_wav, length in zip(batch.id, predict_wav, lens):
name += ".wav"
enhance_path = os.path.join(
self.hparams.enhanced_folder, name
)
torchaudio.save(
enhance_path,
torch.unsqueeze(pred_wav[: int(length)].cpu(), 0),
16000,
)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch"""
self.loss_metric = MetricStats(metric=self.hparams.compute_cost)
# Define function taking (prediction, target) for parallel eval
def pesq_eval(pred_wav, target_wav):
"""Computes the PESQ evaluation metric"""
return pesq(
fs=16000,
ref=target_wav.numpy(),
deg=pred_wav.numpy(),
mode="wb",
)
if stage != sb.Stage.TRAIN:
self.pesq_metric = MetricStats(
metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False
)
self.stoi_metric = MetricStats(metric=stoi_loss)
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {"loss": self.loss_metric.scores}
else:
stats = {
"loss": stage_loss,
"pesq": self.pesq_metric.summarize("average"),
"stoi": -self.stoi_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
if self.hparams.use_tensorboard:
valid_stats = {
"loss": stage_loss,
"stoi": -self.stoi_metric.summarize("average"),
"pesq": self.pesq_metric.summarize("average"),
}
self.hparams.tensorboard_train_logger.log_stats(valid_stats)
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"])
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def init_optimizers(self):
"Initializes the generator and discriminator optimizers"
self.optimizer = self.hparams.g_opt_class(
self.modules.generator.parameters()
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# Define audio pipelines
@sb.utils.data_pipeline.takes("noisy_wav")
@sb.utils.data_pipeline.provides("noisy_sig")
def noisy_pipeline(noisy_wav):
return sb.dataio.dataio.read_audio(noisy_wav)
@sb.utils.data_pipeline.takes("clean_wav")
@sb.utils.data_pipeline.provides("clean_sig")
def clean_pipeline(clean_wav):
return sb.dataio.dataio.read_audio(clean_wav)
# Define datasets
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[noisy_pipeline, clean_pipeline],
output_keys=["id", "noisy_sig", "clean_sig"],
)
# Sort train dataset
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending"
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
return datasets
def create_folder(folder):
if not os.path.isdir(folder):
os.makedirs(folder)
# Recipe begins!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Data preparation
from voicebank_revb_prepare import prepare_voicebank # noqa
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects
datasets = dataio_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
# Create the folder to save enhanced files (+ support for DDP)
run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]})
se_brain = SEBrain(
modules=hparams["modules"],
opt_class=hparams["g_opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Load latest checkpoint to resume training
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load best checkpoint for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key="pesq",
test_loader_kwargs=hparams["dataloader_options"],
)
| 9,295 | 33.686567 | 84 | py |
speechbrain | speechbrain-main/recipes/Voicebank/dereverb/MetricGAN-U/train.py | #!/usr/bin/env/python3
"""
Recipe for training MetricGAN-U (Unsupervised) with the Voicebank dataset.
To run this recipe, do the following:
> python train.py hparams/{hyperparam_file}.yaml
Authors
* Szu-Wei Fu 2021/09
"""
import os
import sys
import shutil
import torch
import torchaudio
import speechbrain as sb
import numpy as np
import json
import pickle
import requests
import time
from urllib.parse import urlparse, urljoin
from srmrpy import srmr
from pesq import pesq
from enum import Enum, auto
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import MetricStats
from speechbrain.processing.features import spectral_magnitude
from speechbrain.nnet.loss.stoi_loss import stoi_loss
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.sampler import ReproducibleWeightedRandomSampler
### For DNSMSOS
# URL for the web service
SCORING_URI = "https://dnsmos-4.azurewebsites.net/score"
# If the service is authenticated, set the key or token
AUTH_KEY = ""
if AUTH_KEY == "":
print(
"To access DNSMOS, you have to ask the key from the DNS organizer: dns_challenge@microsoft.com"
)
# Set the content type
headers = {"Content-Type": "application/json"}
# If authentication is enabled, set the authorization header
headers["Authorization"] = f"Basic {AUTH_KEY }"
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
def pesq_eval(predict, target):
"""Normalized PESQ (to 0-1)"""
return (
pesq(fs=16000, ref=target.numpy(), deg=predict.numpy(), mode="wb") + 0.5
) / 5
def srmrpy_eval(predict, target):
""" Note target_wav is not used in the srmr function !!!
Normalize the score to 0~1 for training.
"""
return float(
sigmoid(
0.1
* srmr(
predict.numpy(),
fs=16000,
n_cochlear_filters=23,
low_freq=125,
min_cf=4,
max_cf=128,
fast=True,
norm=False,
)[0]
)
)
def srmrpy_eval_valid(predict, target):
""" Note target_wav is not used in the srmr function !!!
Show the unnormalized score for valid and test set.
"""
return float(
srmr(
predict.numpy(),
fs=16000,
n_cochlear_filters=23,
low_freq=125,
min_cf=4,
max_cf=128,
fast=True,
norm=False,
)[0]
)
def dnsmos_eval(predict, target):
""" Note target_wav is not used in the dnsmos function !!!
Normalize the score to 0~1 for training.
"""
pred_wav = predict
pred_wav = pred_wav.numpy()
pred_wav = pred_wav / max(abs(pred_wav))
data = {"data": pred_wav.tolist()}
input_data = json.dumps(data)
while True:
try:
u = urlparse(SCORING_URI)
resp = requests.post(
urljoin("https://" + u.netloc, "score"),
data=input_data,
headers=headers,
)
score_dict = resp.json()
score = float(
sigmoid(score_dict["mos"])
) # normalize the score to 0~1
break
except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service.
print(e)
time.sleep(10) # wait for 10 secs
return score
def dnsmos_eval_valid(predict, target):
""" Note target_wav is not used in the dnsmos function !!!
Show the unnormalized score for valid and test set.
"""
pred_wav = predict
pred_wav = pred_wav.numpy()
pred_wav = pred_wav / max(abs(pred_wav))
data = {"data": pred_wav.tolist()}
input_data = json.dumps(data)
while True:
try:
u = urlparse(SCORING_URI)
resp = requests.post(
urljoin("https://" + u.netloc, "score"),
data=input_data,
headers=headers,
)
score_dict = resp.json()
score = float(score_dict["mos"])
break
except Exception as e: # sometimes, access the dnsmos server too ofen may disable the service.
print(e)
time.sleep(10) # wait for 10 secs
return score
class SubStage(Enum):
"""For keeping track of training stage progress"""
GENERATOR = auto()
CURRENT = auto()
HISTORICAL = auto()
class MetricGanBrain(sb.Brain):
def load_history(self):
if os.path.isfile(self.hparams.historical_file):
with open(self.hparams.historical_file, "rb") as fp: # Unpickling
self.historical_set = pickle.load(fp)
def compute_feats(self, wavs):
"""Feature computation pipeline"""
feats = self.hparams.compute_STFT(wavs)
spec = spectral_magnitude(feats, power=0.5)
return spec
def compute_forward(self, batch, stage):
"Given an input batch computes the enhanced signal"
batch = batch.to(self.device)
if self.sub_stage == SubStage.HISTORICAL:
predict_wav, lens = batch.enh_sig
return predict_wav
else:
noisy_wav, lens = batch.noisy_sig
noisy_spec = self.compute_feats(noisy_wav)
mask = self.modules.generator(noisy_spec, lengths=lens)
mask = mask.clamp(min=self.hparams.min_mask).squeeze(2)
predict_spec = torch.mul(mask, noisy_spec)
# Also return predicted wav
predict_wav = self.hparams.resynth(predict_spec, noisy_wav)
return predict_wav, mask
def compute_objectives(self, predictions, batch, stage, optim_name=""):
"Given the network predictions and targets compute the total loss"
if self.sub_stage == SubStage.HISTORICAL:
predict_wav = predictions
else:
predict_wav, mask = predictions
predict_spec = self.compute_feats(predict_wav)
ids = self.compute_ids(batch.id, optim_name)
if self.sub_stage != SubStage.HISTORICAL:
noisy_wav, lens = batch.noisy_sig
if optim_name == "generator":
est_score = self.est_score(predict_spec)
target_score = self.hparams.target_score * torch.ones(
self.batch_size, 1, device=self.device
)
noisy_wav, lens = batch.noisy_sig
noisy_spec = self.compute_feats(noisy_wav)
mse_cost = self.hparams.compute_cost(predict_spec, noisy_spec, lens)
# D Learns to estimate the scores of enhanced speech
elif optim_name == "D_enh" and self.sub_stage == SubStage.CURRENT:
target_score = self.score(
ids, predict_wav, predict_wav, lens
) # no clean_wav is needed
est_score = self.est_score(predict_spec)
# Write enhanced wavs during discriminator training, because we
# compute the actual score here and we can save it
self.write_wavs(ids, predict_wav, target_score, lens)
# D Relearns to estimate the scores of previous epochs
elif optim_name == "D_enh" and self.sub_stage == SubStage.HISTORICAL:
target_score = batch.score.unsqueeze(1).float()
est_score = self.est_score(predict_spec)
# D Learns to estimate the scores of noisy speech
elif optim_name == "D_noisy":
noisy_spec = self.compute_feats(noisy_wav)
target_score = self.score(
ids, noisy_wav, noisy_wav, lens
) # no clean_wav is needed
est_score = self.est_score(noisy_spec)
# Save scores of noisy wavs
self.save_noisy_scores(ids, target_score)
if stage == sb.Stage.TRAIN:
# Compute the cost
cost = self.hparams.compute_cost(est_score, target_score)
if optim_name == "generator":
cost += self.hparams.mse_weight * mse_cost
self.metrics["G"].append(cost.detach())
else:
self.metrics["D"].append(cost.detach())
# Compute scores on validation data
if stage != sb.Stage.TRAIN:
clean_wav, lens = batch.clean_sig
cost = self.hparams.compute_si_snr(predict_wav, clean_wav, lens)
# Evaluate speech quality/intelligibility
self.stoi_metric.append(
batch.id, predict_wav, clean_wav, lens, reduction="batch"
)
self.pesq_metric.append(
batch.id, predict=predict_wav, target=clean_wav, lengths=lens
)
self.srmr_metric.append(
batch.id,
predict=predict_wav,
target=predict_wav,
lengths=lens, # no clean_wav is needed
)
if (
self.hparams.calculate_dnsmos_on_validation_set
): # Note: very time consuming........
self.dnsmos_metric.append(
batch.id,
predict=predict_wav,
target=predict_wav,
lengths=lens, # no clean_wav is needed
)
# Write wavs to file, for evaluation
lens = lens * clean_wav.shape[1]
for name, pred_wav, length in zip(batch.id, predict_wav, lens):
name += ".wav"
enhance_path = os.path.join(self.hparams.enhanced_folder, name)
torchaudio.save(
enhance_path,
torch.unsqueeze(pred_wav[: int(length)].cpu(), 0),
16000,
)
return cost
def compute_ids(self, batch_id, optim_name):
"""Returns the list of ids, edited via optimizer name."""
if optim_name == "D_enh":
return [f"{uid}@{self.epoch}" for uid in batch_id]
return batch_id
def save_noisy_scores(self, batch_id, scores):
for i, score in zip(batch_id, scores):
self.noisy_scores[i] = score
def score(self, batch_id, deg_wav, ref_wav, lens):
"""Returns actual metric score, either pesq or stoi
Arguments
---------
batch_id : list of str
A list of the utterance ids for the batch
deg_wav : torch.Tensor
The degraded waveform to score
ref_wav : torch.Tensor
The reference waveform to use for scoring
length : torch.Tensor
The relative lengths of the utterances
"""
new_ids = [
i
for i, d in enumerate(batch_id)
if d not in self.historical_set and d not in self.noisy_scores
]
if len(new_ids) == 0:
pass
elif self.hparams.target_metric == "srmr" or "dnsmos":
self.target_metric.append(
ids=[batch_id[i] for i in new_ids],
predict=deg_wav[new_ids].detach(),
target=ref_wav[
new_ids
].detach(), # target is not used in the function !!!
lengths=lens[new_ids],
)
score = torch.tensor(
[[s] for s in self.target_metric.scores], device=self.device,
)
else:
raise ValueError("Expected 'srmr' or 'dnsmos' for target_metric")
# Clear metric scores to prepare for next batch
self.target_metric.clear()
# Combine old scores and new
final_score = []
for i, d in enumerate(batch_id):
if d in self.historical_set:
final_score.append([self.historical_set[d]["score"]])
elif d in self.noisy_scores:
final_score.append([self.noisy_scores[d]])
else:
final_score.append([score[new_ids.index(i)]])
return torch.tensor(final_score, device=self.device)
def est_score(self, deg_spec):
"""Returns score as estimated by discriminator
Arguments
---------
deg_spec : torch.Tensor
The spectral features of the degraded utterance
ref_spec : torch.Tensor
The spectral features of the reference utterance
"""
"""
combined_spec = torch.cat(
[deg_spec.unsqueeze(1), ref_spec.unsqueeze(1)], 1
)
"""
return self.modules.discriminator(deg_spec.unsqueeze(1))
def write_wavs(self, batch_id, wavs, score, lens):
"""Write wavs to files, for historical discriminator training
Arguments
---------
batch_id : list of str
A list of the utterance ids for the batch
wavs : torch.Tensor
The wavs to write to files
score : torch.Tensor
The actual scores for the corresponding utterances
lens : torch.Tensor
The relative lengths of each utterance
"""
lens = lens * wavs.shape[1]
record = {}
for i, (name, pred_wav, length) in enumerate(zip(batch_id, wavs, lens)):
path = os.path.join(self.hparams.MetricGAN_folder, name + ".wav")
data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0)
torchaudio.save(path, data, self.hparams.Sample_rate)
# Make record of path and score for historical training
score = float(score[i][0])
record[name] = {
"enh_wav": path,
"score": score,
}
# Update records for historical training
self.historical_set.update(record)
with open(self.hparams.historical_file, "wb") as fp: # Pickling
pickle.dump(self.historical_set, fp)
def fit_batch(self, batch):
"Compute gradients and update either D or G based on sub-stage."
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss_tracker = 0
if self.sub_stage == SubStage.CURRENT:
for mode in ["enh", "noisy"]:
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, f"D_{mode}"
)
self.d_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.d_optimizer.step()
loss_tracker += loss.detach() / 3
elif self.sub_stage == SubStage.HISTORICAL:
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "D_enh"
)
self.d_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.d_optimizer.step()
loss_tracker += loss.detach()
elif self.sub_stage == SubStage.GENERATOR:
for name, param in self.modules.generator.named_parameters():
if "Learnable_sigmoid" in name:
param.data = torch.clamp(
param, max=3.5
) # to prevent gradient goes to infinity
loss = self.compute_objectives(
predictions, batch, sb.Stage.TRAIN, "generator"
)
self.g_optimizer.zero_grad()
loss.backward()
if self.check_gradients(loss):
self.g_optimizer.step()
loss_tracker += loss.detach()
return loss_tracker
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch
This method calls ``fit()`` again to train the discriminator
before proceeding with generator training.
"""
self.metrics = {"G": [], "D": []}
if stage == sb.Stage.TRAIN:
if self.hparams.target_metric == "srmr":
self.target_metric = MetricStats(
metric=srmrpy_eval,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
elif self.hparams.target_metric == "dnsmos":
self.target_metric = MetricStats(
metric=dnsmos_eval,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
else:
raise NotImplementedError(
"Right now we only support 'srmr' and 'dnsmos'"
)
# Train discriminator before we start generator training
if self.sub_stage == SubStage.GENERATOR:
self.epoch = epoch
self.train_discriminator()
self.sub_stage = SubStage.GENERATOR
print("Generator training by current data...")
if stage != sb.Stage.TRAIN:
self.pesq_metric = MetricStats(
metric=pesq_eval, n_jobs=hparams["n_jobs"], batch_eval=False
)
self.stoi_metric = MetricStats(metric=stoi_loss)
self.srmr_metric = MetricStats(
metric=srmrpy_eval_valid,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
self.dnsmos_metric = MetricStats(
metric=dnsmos_eval_valid,
n_jobs=hparams["n_jobs"],
batch_eval=False,
)
def train_discriminator(self):
"""A total of 3 data passes to update discriminator."""
# First, iterate train subset w/ updates for enh, noisy
print("Discriminator training by current data...")
self.sub_stage = SubStage.CURRENT
self.fit(
range(1),
self.train_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
# Next, iterate historical subset w/ updates for enh
if self.historical_set:
print("Discriminator training by historical data...")
self.sub_stage = SubStage.HISTORICAL
self.fit(
range(1),
self.historical_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
# Finally, iterate train set again. Should iterate same
# samples as before, due to ReproducibleRandomSampler
print("Discriminator training by current data again...")
self.sub_stage = SubStage.CURRENT
self.fit(
range(1),
self.train_set,
train_loader_kwargs=self.hparams.dataloader_options,
)
def on_stage_end(self, stage, stage_loss, epoch=None):
"Called at the end of each stage to summarize progress"
if self.sub_stage != SubStage.GENERATOR:
return
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
g_loss = torch.tensor(self.metrics["G"]) # batch_size
d_loss = torch.tensor(self.metrics["D"]) # batch_size
print("Avg G loss: %.3f" % torch.mean(g_loss))
print("Avg D loss: %.3f" % torch.mean(d_loss))
else:
if self.hparams.calculate_dnsmos_on_validation_set:
stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
"srmr": self.srmr_metric.summarize("average"),
"dnsmos": self.dnsmos_metric.summarize("average"),
}
else:
stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
"srmr": self.srmr_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(5.0 - stats["pesq"])
sb.nnet.schedulers.update_learning_rate(self.g_optimizer, new_lr)
if self.hparams.use_tensorboard:
if (
self.hparams.calculate_dnsmos_on_validation_set
): # Note: very time consuming........
valid_stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
"srmr": self.srmr_metric.summarize("average"),
"dnsmos": self.dnsmos_metric.summarize("average"),
}
else:
valid_stats = {
"SI-SNR": -stage_loss,
"pesq": 5 * self.pesq_metric.summarize("average") - 0.5,
"stoi": -self.stoi_metric.summarize("average"),
"srmr": self.srmr_metric.summarize("average"),
}
self.hparams.tensorboard_train_logger.log_stats(
{"lr": old_lr}, valid_stats
)
self.hparams.train_logger.log_stats(
{"Epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
self.checkpointer.save_and_keep_only(meta=stats, max_keys=["pesq"])
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def make_dataloader(
self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs
):
"Override dataloader to insert custom sampler/dataset"
if stage == sb.Stage.TRAIN:
# Create a new dataset each time, this set grows
if self.sub_stage == SubStage.HISTORICAL:
dataset = sb.dataio.dataset.DynamicItemDataset(
data=dataset,
dynamic_items=[enh_pipeline],
output_keys=["id", "enh_sig", "score"],
)
samples = round(len(dataset) * self.hparams.history_portion)
else:
samples = self.hparams.number_of_samples
# This sampler should give the same samples for D and G
epoch = self.hparams.epoch_counter.current
# Equal weights for all samples, we use "Weighted" so we can do
# both "replacement=False" and a set number of samples, reproducibly
weights = torch.ones(len(dataset))
sampler = ReproducibleWeightedRandomSampler(
weights, epoch=epoch, replacement=False, num_samples=samples
)
loader_kwargs["sampler"] = sampler
if self.sub_stage == SubStage.GENERATOR:
self.train_sampler = sampler
# Make the dataloader as normal
return super().make_dataloader(
dataset, stage, ckpt_prefix, **loader_kwargs
)
def on_fit_start(self):
"Override to prevent this from running for D training"
if self.sub_stage == SubStage.GENERATOR:
super().on_fit_start()
def init_optimizers(self):
"Initializes the generator and discriminator optimizers"
self.g_optimizer = self.hparams.g_opt_class(
self.modules.generator.parameters()
)
self.d_optimizer = self.hparams.d_opt_class(
self.modules.discriminator.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("g_opt", self.g_optimizer)
self.checkpointer.add_recoverable("d_opt", self.d_optimizer)
def zero_grad(self, set_to_none=False):
self.g_optimizer.zero_grad(set_to_none)
self.d_optimizer.zero_grad(set_to_none)
# Define audio piplines for training set
@sb.utils.data_pipeline.takes("noisy_wav")
@sb.utils.data_pipeline.provides("noisy_sig")
def audio_pipeline_train(noisy_wav):
yield sb.dataio.dataio.read_audio(noisy_wav)
# Define audio piplines for validation/test set
@sb.utils.data_pipeline.takes("noisy_wav", "clean_wav")
@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig")
def audio_pipeline_valid(noisy_wav, clean_wav):
yield sb.dataio.dataio.read_audio(noisy_wav)
yield sb.dataio.dataio.read_audio(clean_wav)
# For historical data
@sb.utils.data_pipeline.takes("enh_wav")
@sb.utils.data_pipeline.provides("enh_sig")
def enh_pipeline(enh_wav):
yield sb.dataio.dataio.read_audio(enh_wav)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class."""
# Define datasets
datasets = {}
datasets["train"] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["train_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline_train],
output_keys=["id", "noisy_sig"],
)
datasets["valid"] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["valid_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline_valid],
output_keys=["id", "noisy_sig", "clean_sig"],
)
datasets["test"] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["test_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline_valid],
output_keys=["id", "noisy_sig", "clean_sig"],
)
return datasets
def create_folder(folder):
if not os.path.isdir(folder):
os.makedirs(folder)
# Recipe begins!
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Data preparation
from voicebank_revb_prepare import prepare_voicebank # noqa
run_on_main(
prepare_voicebank,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["data_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Create dataset objects
datasets = dataio_prep(hparams)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs"]
)
# Create the folder to save enhanced files (+ support for DDP)
run_on_main(create_folder, kwargs={"folder": hparams["enhanced_folder"]})
se_brain = MetricGanBrain(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
se_brain.train_set = datasets["train"]
se_brain.historical_set = {}
se_brain.noisy_scores = {}
se_brain.batch_size = hparams["dataloader_options"]["batch_size"]
se_brain.sub_stage = SubStage.GENERATOR
if not os.path.isfile(hparams["historical_file"]):
shutil.rmtree(hparams["MetricGAN_folder"])
run_on_main(create_folder, kwargs={"folder": hparams["MetricGAN_folder"]})
se_brain.load_history()
# Load latest checkpoint to resume training
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["valid_dataloader_options"],
)
# Load best checkpoint for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key="pesq",
test_loader_kwargs=hparams["dataloader_options"],
)
| 28,023 | 34.60864 | 103 | py |
speechbrain | speechbrain-main/recipes/LibriTTS/libritts_prepare.py | from speechbrain.utils.data_utils import get_all_files, download_file
from speechbrain.processing.speech_augmentation import Resample
import json
import os
import shutil
import random
import logging
import torchaudio
logger = logging.getLogger(__name__)
LIBRITTS_URL_PREFIX = "https://www.openslr.org/resources/60/"
def prepare_libritts(
data_folder,
save_json_train,
save_json_valid,
save_json_test,
sample_rate,
split_ratio=[80, 10, 10],
libritts_subsets=["train-clean-100"],
):
"""
Prepares the json files for the LibriTTS dataset.
Downloads the dataset if it is not found in the `data_folder` as expected.
Arguments
---------
data_folder : str
Path to the folder where the LibriTTS dataset is stored.
save_json_train : str
Path where the train data specification file will be saved.
save_json_valid : str
Path where the validation data specification file will be saved.
save_json_test : str
Path where the test data specification file will be saved.
split_ratio : list
List composed of three integers that sets split ratios for train, valid,
and test sets, respectively. For instance split_ratio=[80, 10, 10] will
assign 80% of the sentences to training, 10% for validation, and 10%
for test.
sample_rate : int
The sample rate to be used for the dataset
libritts_subsets: list
List of librispeech subsets to use (e.g., dev-clean, train-clean-100, ...).
Example
-------
>>> data_folder = '/path/to/LibriTTS'
>>> prepare_libritts(data_folder, 'train.json', 'valid.json', 'test.json', 2050)
"""
# Checks if this phase is already done (if so, skips it)
if skip(save_json_train, save_json_valid, save_json_test):
logger.info("Preparation completed in previous run, skipping.")
return
extension = [".wav"] # The expected extension for audio files
wav_list = list() # Stores all audio file paths for the dataset
# For every subset of the dataset, if it doesn't exist, downloads it and sets flag to resample the subset
for subset_name in libritts_subsets:
subset_folder = os.path.join(data_folder, subset_name)
subset_archive = os.path.join(subset_folder, subset_name + ".tar.gz")
subset_data = os.path.join(subset_folder, "LibriTTS")
if not check_folders(subset_data):
logger.info(
f"No data found for {subset_name}. Checking for an archive file."
)
if not os.path.isfile(subset_archive):
logger.info(
f"No archive file found for {subset_name}. Downloading and unpacking."
)
subset_url = LIBRITTS_URL_PREFIX + subset_name + ".tar.gz"
download_file(subset_url, subset_archive)
logger.info(f"Downloaded data for subset {subset_name}.")
else:
logger.info(
f"Found an archive file for {subset_name}. Unpacking."
)
shutil.unpack_archive(subset_archive, subset_folder)
# Collects all files matching the provided extension
wav_list.extend(get_all_files(subset_folder, match_and=extension))
logger.info(
f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}"
)
# Random split the signal list into train, valid, and test sets.
data_split = split_sets(wav_list, split_ratio)
# Creating json files
create_json(data_split["train"], save_json_train, sample_rate)
create_json(data_split["valid"], save_json_valid, sample_rate)
create_json(data_split["test"], save_json_test, sample_rate)
def create_json(wav_list, json_file, sample_rate):
"""
Creates the json file given a list of wav files.
Arguments
---------
wav_list : list of str
The list of wav files.
json_file : str
The path of the output json file
sample_rate : int
The sample rate to be used for the dataset
"""
json_dict = {}
# Creates a resampler object with orig_freq set to LibriTTS sample rate (24KHz) and new_freq set to SAMPLERATE
resampler = Resample(orig_freq=24000, new_freq=sample_rate)
# Processes all the wav files in the list
for wav_file in wav_list:
# Reads the signal
signal, sig_sr = torchaudio.load(wav_file)
signal = signal.squeeze(0)
# Manipulates path to get relative path and uttid
path_parts = wav_file.split(os.path.sep)
uttid, _ = os.path.splitext(path_parts[-1])
relative_path = os.path.join("{data_root}", *path_parts[-6:])
# Gets the path for the text files and extracts the input text
original_text_path = os.path.join(
"/", *path_parts[:-1], uttid + ".original.txt"
)
with open(original_text_path) as f:
original_text = f.read()
if original_text.__contains__("{"):
original_text = original_text.replace("{", "")
if original_text.__contains__("}"):
original_text = original_text.replace("}", "")
# Resamples the audio file if required
if sig_sr != sample_rate:
signal = signal.unsqueeze(0)
resampled_signal = resampler(signal)
os.unlink(wav_file)
torchaudio.save(wav_file, resampled_signal, sample_rate=sample_rate)
# Gets the speaker-id from the utterance-id
spk_id = uttid.split("_")[0]
# Creates an entry for the utterance
json_dict[uttid] = {
"wav": relative_path,
"spk_id": spk_id,
"label": original_text,
"segment": True if "train" in json_file else False,
}
# Writes the dictionary to the json file
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, indent=2)
logger.info(f"{json_file} successfully created!")
def skip(*filenames):
"""
Detects if the data preparation has been already done.
If the preparation has been done, we can skip it.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
for filename in filenames:
if not os.path.isfile(filename):
return False
return True
def split_sets(wav_list, split_ratio):
"""Randomly splits the wav list into training, validation, and test lists.
Arguments
---------
wav_list : list
list of all the signals in the dataset
split_ratio: list
List composed of three integers that sets split ratios for train, valid,
and test sets, respectively. For instance split_ratio=[80, 10, 10] will
assign 80% of the sentences to training, 10% for validation, and 10%
for test.
Returns
------
dictionary containing train, valid, and test splits.
"""
# Random shuffles the list
random.shuffle(wav_list)
tot_split = sum(split_ratio)
tot_snts = len(wav_list)
data_split = {}
splits = ["train", "valid"]
for i, split in enumerate(splits):
n_snts = int(tot_snts * split_ratio[i] / tot_split)
data_split[split] = wav_list[0:n_snts]
del wav_list[0:n_snts]
data_split["test"] = wav_list
return data_split
def check_folders(*folders):
"""Returns False if any passed folder does not exist."""
for folder in folders:
if not os.path.exists(folder):
return False
return True
if __name__ == "__main__":
prepare_libritts(
"libritts_data", "train.json", "valid.json", "test.json", 16000
)
| 7,710 | 33.424107 | 115 | py |
speechbrain | speechbrain-main/recipes/LibriTTS/vocoder/hifigan/train.py | #!/usr/bin/env python3
"""Recipe for training a hifi-gan vocoder.
For more details about hifi-gan: https://arxiv.org/pdf/2010.05646.pdf
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/LibriTTS
Authors
* Duret Jarod 2021
* Yingzhi WANG 2022
* Pradnya Kandarkar 2022
"""
import sys
import torch
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.data_utils import scalarize
import torchaudio
import os
class HifiGanBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""The forward function, generates synthesized waveforms,
calculates the scores and the features of the discriminator
for synthesized waveforms and real waveforms.
Arguments
---------
batch: str
a single batch
stage: speechbrain.Stage
the training stage
"""
batch = batch.to(self.device)
x, _ = batch.mel
y, _ = batch.sig
# generate sythesized waveforms
y_g_hat = self.modules.generator(x)[:, :, : y.size(2)]
# get scores and features from discriminator for real and synthesized waveforms
scores_fake, feats_fake = self.modules.discriminator(y_g_hat.detach())
scores_real, feats_real = self.modules.discriminator(y)
return (y_g_hat, scores_fake, feats_fake, scores_real, feats_real)
def compute_objectives(self, predictions, batch, stage):
"""Computes and combines generator and discriminator losses
"""
batch = batch.to(self.device)
x, _ = batch.mel
y, _ = batch.sig
# Hold on to the batch for the inference sample. This is needed because
# the infernece sample is run from on_stage_end only, where
# batch information is not available
self.last_batch = (x, y)
# Hold on to a sample (for logging)
self._remember_sample(self.last_batch, predictions)
y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions
loss_g = self.hparams.generator_loss(
y_hat, y, scores_fake, feats_fake, feats_real
)
loss_d = self.hparams.discriminator_loss(scores_fake, scores_real)
loss = {**loss_g, **loss_d}
self.last_loss_stats[stage] = scalarize(loss)
return loss
def fit_batch(self, batch):
"""Train discriminator and generator adversarially
"""
batch = batch.to(self.device)
y, _ = batch.sig
outputs = self.compute_forward(batch, sb.core.Stage.TRAIN)
(y_g_hat, scores_fake, feats_fake, scores_real, feats_real) = outputs
# calculate discriminator loss with the latest updated generator
loss_d = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[
"D_loss"
]
# First train the discriminator
self.optimizer_d.zero_grad()
loss_d.backward()
self.optimizer_d.step()
# calculate generator loss with the latest updated discriminator
scores_fake, feats_fake = self.modules.discriminator(y_g_hat)
scores_real, feats_real = self.modules.discriminator(y)
outputs = (y_g_hat, scores_fake, feats_fake, scores_real, feats_real)
loss_g = self.compute_objectives(outputs, batch, sb.core.Stage.TRAIN)[
"G_loss"
]
# Then train the generator
self.optimizer_g.zero_grad()
loss_g.backward()
self.optimizer_g.step()
return loss_g.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Evaluate one batch
"""
out = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(out, batch, stage=stage)
loss_g = loss["G_loss"]
return loss_g.detach().cpu()
def on_fit_start(self):
"""Gets called at the beginning of ``fit()``, on multiple processes
if ``distributed_count > 0`` and backend is ddp and initializes statistics
"""
self.last_epoch = 0
self.last_batch = None
self.last_loss_stats = {}
return super().on_fit_start()
def init_optimizers(self):
"""Called during ``on_fit_start()``, initialize optimizers
after parameters are fully configured (e.g. DDP, jit).
"""
if self.opt_class is not None:
(
opt_g_class,
opt_d_class,
sch_g_class,
sch_d_class,
) = self.opt_class
self.optimizer_g = opt_g_class(self.modules.generator.parameters())
self.optimizer_d = opt_d_class(
self.modules.discriminator.parameters()
)
self.scheduler_g = sch_g_class(self.optimizer_g)
self.scheduler_d = sch_d_class(self.optimizer_d)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"optimizer_g", self.optimizer_g
)
self.checkpointer.add_recoverable(
"optimizer_d", self.optimizer_d
)
self.checkpointer.add_recoverable(
"scheduler_g", self.scheduler_d
)
self.checkpointer.add_recoverable(
"scheduler_d", self.scheduler_d
)
def _remember_sample(self, batch, predictions):
"""Remembers samples of spectrograms and the batch for logging purposes
Arguments
---------
batch: tuple
a training batch
predictions: tuple
predictions (raw output of the Tacotron model)
"""
mel, sig = batch
y_hat, scores_fake, feats_fake, scores_real, feats_real = predictions
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a stage (TRAIN, VALID, Or TEST)
"""
if stage == sb.Stage.VALID:
# Update learning rate
self.scheduler_g.step()
self.scheduler_d.step()
lr_g = self.optimizer_g.param_groups[-1]["lr"]
lr_d = self.optimizer_d.param_groups[-1]["lr"]
self.hparams.train_logger.log_stats( # 1#2#
stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d},
train_stats=self.last_loss_stats[sb.Stage.TRAIN],
valid_stats=self.last_loss_stats[sb.Stage.VALID],
)
# The tensorboard_logger writes a summary to stdout and to the logfile.
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_stats(
stats_meta={"Epoch": epoch, "lr_g": lr_g, "lr_d": lr_d},
train_stats=self.last_loss_stats[sb.Stage.TRAIN],
valid_stats=self.last_loss_stats[sb.Stage.VALID],
)
# Save the current checkpoint and delete previous checkpoints.
epoch_metadata = {
**{"epoch": epoch},
**self.last_loss_stats[sb.Stage.VALID],
}
self.checkpointer.save_and_keep_only(
meta=epoch_metadata,
end_of_epoch=True,
min_keys=["loss"],
ckpt_predicate=(
lambda ckpt: (
ckpt.meta["epoch"]
% self.hparams.keep_checkpoint_interval
!= 0
)
)
if self.hparams.keep_checkpoint_interval is not None
else None,
)
self.run_inference_sample("Valid")
# We also write statistics about test data to stdout and to the TensorboardLogger.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats( # 1#2#
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=self.last_loss_stats[sb.Stage.TEST],
)
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=self.last_loss_stats[sb.Stage.TEST],
)
self.run_inference_sample("Test")
def run_inference_sample(self, name):
"""Produces a sample in inference mode. This is called when producing
samples.
"""
with torch.no_grad():
if self.last_batch is None:
return
x, y = self.last_batch
# Preparing model for inference by removing weight norm
# inference_generator = copy.deepcopy(self.hparams.generator)
inference_generator = type(self.hparams.generator)(
in_channels=self.hparams.in_channels,
out_channels=self.hparams.out_channels,
resblock_type=self.hparams.resblock_type,
resblock_dilation_sizes=self.hparams.resblock_dilation_sizes,
resblock_kernel_sizes=self.hparams.resblock_kernel_sizes,
upsample_kernel_sizes=self.hparams.upsample_kernel_sizes,
upsample_initial_channel=self.hparams.upsample_initial_channel,
upsample_factors=self.hparams.upsample_factors,
inference_padding=self.hparams.inference_padding,
cond_channels=self.hparams.cond_channels,
conv_post_bias=self.hparams.conv_post_bias,
).to(
self.device
) # Gets a new instance
inference_generator.load_state_dict(
self.hparams.generator.state_dict()
) # Copies weights
inference_generator.remove_weight_norm()
sig_out = inference_generator.inference(x)
spec_out = self.hparams.mel_spectogram(
audio=sig_out.squeeze(0).cpu()
)
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_audio(
f"{name}/audio_target", y.squeeze(0), self.hparams.sample_rate
)
self.tensorboard_logger.log_audio(
f"{name}/audio_pred",
sig_out.squeeze(0),
self.hparams.sample_rate,
)
self.tensorboard_logger.log_figure(f"{name}/mel_target", x)
self.tensorboard_logger.log_figure(f"{name}/mel_pred", spec_out)
else:
# folder name is the current epoch for validation and "test" for test
folder = (
self.hparams.epoch_counter.current
if name == "Valid"
else "test"
)
self.save_audio("target", y.squeeze(0), folder)
self.save_audio("synthesized", sig_out.squeeze(0), folder)
def save_audio(self, name, data, epoch):
"""Saves a single wav
Arguments
---------
name: str
the name of the saved audio
data: torch.Tensor
the wave data to save
epoch: int or str
the epoch number (used in file path calculations)
or "test" for test stage
"""
target_path = os.path.join(
self.hparams.progress_sample_path, str(epoch)
)
if not os.path.exists(target_path):
os.makedirs(target_path)
file_name = f"{name}.wav"
effective_file_name = os.path.join(target_path, file_name)
torchaudio.save(
effective_file_name, data.cpu(), self.hparams.sample_rate
)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
"""
segment_size = hparams["segment_size"]
# Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "segment")
@sb.utils.data_pipeline.provides("mel", "sig")
def audio_pipeline(wav, segment):
audio = sb.dataio.dataio.read_audio(wav)
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if segment:
if audio.size(1) >= segment_size:
max_audio_start = audio.size(1) - segment_size
audio_start = torch.randint(0, max_audio_start, (1,))
audio = audio[:, audio_start : audio_start + segment_size]
else:
audio = torch.nn.functional.pad(
audio, (0, segment_size - audio.size(1)), "constant"
)
mel = hparams["mel_spectogram"](audio=audio.squeeze(0))
return mel, audio
datasets = {}
data_info = {
"train": hparams["train_json"],
"valid": hparams["valid_json"],
"test": hparams["test_json"],
}
for dataset in hparams["splits"]:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline],
output_keys=["id", "mel", "sig"],
)
return datasets
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
if not hparams["skip_prep"]:
from libritts_prepare import prepare_libritts
sb.utils.distributed.run_on_main(
prepare_libritts,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_json"],
"save_json_valid": hparams["valid_json"],
"save_json_test": hparams["test_json"],
"sample_rate": hparams["sample_rate"],
"split_ratio": hparams["split_ratio"],
"libritts_subsets": hparams["libritts_subsets"],
},
)
datasets = dataio_prepare(hparams)
# Brain class initialization
hifi_gan_brain = HifiGanBrain(
modules=hparams["modules"],
opt_class=[
hparams["opt_class_generator"],
hparams["opt_class_discriminator"],
hparams["sch_class_generator"],
hparams["sch_class_discriminator"],
],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
if hparams["use_tensorboard"]:
hifi_gan_brain.tensorboard_logger = sb.utils.train_logger.TensorboardLogger(
save_dir=hparams["output_folder"] + "/tensorboard"
)
# Training
hifi_gan_brain.fit(
hifi_gan_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
if "test" in datasets:
hifi_gan_brain.evaluate(
datasets["test"],
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 15,335 | 35.688995 | 90 | py |
speechbrain | speechbrain-main/recipes/DVoice/dvoice_prepare.py | """
Data preparation.
Download: https://dvoice.ma/
Author
------
Abdou Mohamed Naira 2022
"""
import os
import csv
import re
import logging
import torchaudio
import unicodedata
from tqdm.contrib import tzip
import random
import pandas as pd
from tqdm import tqdm
import numpy as np
import glob
logger = logging.getLogger(__name__)
def prepare_dvoice(
data_folder,
save_folder,
train_csv_file=None,
dev_csv_file=None,
test_csv_file=None,
accented_letters=False,
language="fongbe",
skip_prep=False,
):
if skip_prep:
return
# If not specified point toward standard location w.r.t DVoice tree
if train_csv_file is None:
train_csv_file = data_folder + "texts/train.csv"
else:
train_csv_file = train_csv_file
if dev_csv_file is None:
dev_csv_file = data_folder + "texts/dev.csv"
else:
dev_csv_file = dev_csv_file
if test_csv_file is None:
test_csv_file = data_folder + "texts/test.csv"
else:
test_csv_file = test_csv_file
# Setting the save folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Setting the ALFFA-Dataset csv files
ALFFA_LANGUAGES = ["amharic", "fongbe", "wolof"]
if language in ALFFA_LANGUAGES:
df = alffa_public_prepare(language, data_folder)
train, dev, test = train_validate_test_split(df)
train.to_csv(f"{data_folder}/train.csv", index=False, sep="\t")
dev.to_csv(f"{data_folder}/dev.csv", index=False, sep="\t")
test.to_csv(f"{data_folder}/test.csv", index=False, sep="\t")
if language == "swahili":
df = swahili_prepare(data_folder)
train, dev, test = train_validate_test_split(df)
train.to_csv(f"{data_folder}/train.csv", index=False, sep="\t")
dev.to_csv(f"{data_folder}/dev.csv", index=False, sep="\t")
test.to_csv(f"{data_folder}/test.csv", index=False, sep="\t")
if language == "multilingual":
ALFFA_LANGUAGES = ["amharic", "wolof"]
df_alffa = pd.DataFrame()
for lang in ALFFA_LANGUAGES:
data_folder2 = (
data_folder + f"/ALFFA_PUBLIC/ASR/{lang.upper()}/data"
)
df_l = alffa_public_prepare(lang, data_folder2)
df_l["wav"] = df_l["wav"].map(
lambda x: f"ALFFA_PUBLIC/ASR/{lang.upper()}/data/"
+ x.replace(f"{data_folder}/", "")
)
df_alffa = pd.concat([df_alffa, df_l], ignore_index=True)
df_sw = swahili_prepare(data_folder)
train_darija = pd.read_csv(
f"{data_folder}/DVOICE/darija/texts/train.csv", sep="\t"
)
dev_darija = pd.read_csv(
f"{data_folder}/DVOICE/darija/texts/dev.csv", sep="\t"
)
test_darija = pd.read_csv(
f"{data_folder}/DVOICE/darija/texts/test.csv", sep="\t"
)
df_dar = pd.concat(
[train_darija, dev_darija, test_darija], ignore_index=True
)
df_dar["wav"] = df_dar["wav"].map(lambda x: "DVOICE/darija/wavs/" + x)
df = pd.concat([df_alffa, df_sw, df_dar], ignore_index=True)
train, dev, test = train_validate_test_split(df)
train.to_csv(f"{data_folder}/train.csv", index=False, sep="\t")
dev.to_csv(f"{data_folder}/dev.csv", index=False, sep="\t")
test.to_csv(f"{data_folder}/test.csv", index=False, sep="\t")
# Setting ouput files
save_csv_train = save_folder + "/train.csv"
save_csv_dev = save_folder + "/dev.csv"
save_csv_test = save_folder + "/test.csv"
# If csv already exists, we skip the data preparation
if skip(save_csv_train, save_csv_dev, save_csv_test):
msg = "%s already exists, skipping data preparation!" % (save_csv_train)
logger.info(msg)
msg = "%s already exists, skipping data preparation!" % (save_csv_dev)
logger.info(msg)
msg = "%s already exists, skipping data preparation!" % (save_csv_test)
logger.info(msg)
return
# Additional checks to make sure the folder contains the data
check_dvoice_folders(data_folder, language)
# Creating csv file for training data
if train_csv_file is not None:
create_csv(
train_csv_file,
save_csv_train,
data_folder,
accented_letters,
language,
)
# Creating csv file for dev data
if dev_csv_file is not None:
create_csv(
dev_csv_file, save_csv_dev, data_folder, accented_letters, language,
)
# Creating csv file for test data
if test_csv_file is not None:
create_csv(
test_csv_file,
save_csv_test,
data_folder,
accented_letters,
language,
)
def alffa_public_prepare(language, data_folder):
if language == "amharic":
wavs = glob.glob(f"{data_folder}/*/*/*.wav")
f_train = open(f"{data_folder}/train/text", "r")
f_test = open(f"{data_folder}/test/text", "r")
text = f_train.readlines() + f_test.readlines()
random.shuffle(text)
if language == "fongbe":
wavs = glob.glob(f"{data_folder}/*/wav/*/*.wav")
f_train = open(f"{data_folder}/train/text", "r")
f_test = open(f"{data_folder}/test/text", "r")
text = f_train.readlines() + f_test.readlines()
random.shuffle(text)
if language == "wolof":
wavs_train = glob.glob(f"{data_folder}/train/*/*.wav")
wavs_dev = glob.glob(f"{data_folder}/dev/wav/*/*.wav")
wavs_test = glob.glob(f"{data_folder}/test/wav/*/*.wav")
wavs = wavs_train + wavs_dev + wavs_test
f_train = open(f"{data_folder}/train/text", "r")
f_test = open(f"{data_folder}/test/text", "r")
f_dev = open(f"{data_folder}/dev/text", "r")
text = f_train.readlines() + f_dev.readlines() + f_test.readlines()
random.shuffle(text)
data = []
for i in tqdm(range(len(text))):
text[i] = text[i].replace(" ", " ")
text[i] = text[i].replace(" ", " ")
text[i] = text[i].split(" ")
file_name = text[i][0]
words = " ".join(text[i][1:])
for j in range(len(wavs)):
if wavs[j].split("/")[-1] == file_name + ".wav":
wav = wavs[j]
info = torchaudio.info(wav)
duration = info.num_frames / info.sample_rate
dic = {
"wav": wavs[j].replace(data_folder + "/", ""),
"words": str(words).replace("\n", ""),
"duration": duration,
}
data.append(dic)
break
random.shuffle(data)
df = pd.DataFrame(data)
return df
def swahili_prepare(data_folder):
wavs_alffa = glob.glob(
f"{data_folder}/ALFFA_PUBLIC/ASR/SWAHILI/data/*/*/*/*"
)
train_dvoice = pd.read_csv(
f"{data_folder}/DVOICE/swahili/texts/train.csv", sep="\t"
)
dev_dvoice = pd.read_csv(
f"{data_folder}/DVOICE/swahili/texts/dev.csv", sep="\t"
)
test_dvoice = pd.read_csv(
f"{data_folder}/DVOICE/swahili/texts/test.csv", sep="\t"
)
text_dvoice = pd.concat(
[train_dvoice, dev_dvoice, test_dvoice], ignore_index=True
)
text_dvoice["wav"] = text_dvoice["wav"].map(
lambda x: "DVOICE/swahili/wavs/" + x
)
f_train_alffa = open(
f"{data_folder}/ALFFA_PUBLIC/ASR/SWAHILI/data/train/text", "r"
)
f_test_alffa = open(
f"{data_folder}/ALFFA_PUBLIC/ASR/SWAHILI/data/test/text", "r"
)
train_alffa = f_train_alffa.readlines()
test_alffa = f_test_alffa.readlines()
text_alffa = train_alffa + test_alffa
random.shuffle(text_alffa)
data_alffa = []
for i in tqdm(range(len(text_alffa))):
if "\t" in text_alffa[i]:
text_alffa[i] = text_alffa[i].split("\t")
file_name = text_alffa[i][0]
words = text_alffa[i][1]
else:
text_alffa[i] = text_alffa[i].split(" ")
file_name = text_alffa[i][0]
words = " ".join(text_alffa[i][1:])
for j in range(len(wavs_alffa)):
if wavs_alffa[j].split("/")[-1] == file_name + ".wav":
wav = wavs_alffa[j]
info = torchaudio.info(wav)
duration = info.num_frames / info.sample_rate
dic = {
"wav": wavs_alffa[j].replace(data_folder + "/", ""),
"words": str(words).replace("\n", ""),
"duration": duration,
}
data_alffa.append(dic)
break
text_alffa = pd.DataFrame(data_alffa)
df = pd.concat([text_dvoice, text_alffa], ignore_index=True)
return df
def train_validate_test_split(
df, train_percent=0.6, validate_percent=0.2, seed=None
):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
train = df.iloc[perm[:train_end]]
validate = df.iloc[perm[train_end:validate_end]]
test = df.iloc[perm[validate_end:]]
return train, validate, test
def skip(save_csv_train, save_csv_dev, save_csv_test):
"""
Detects if the DVoice data preparation has been already done.
If the preparation has been done, we can skip it.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
# Checking folders and save options
skip = False
if (
os.path.isfile(save_csv_train)
and os.path.isfile(save_csv_dev)
and os.path.isfile(save_csv_test)
):
skip = True
return skip
def create_csv(
orig_csv_file,
csv_file,
data_folder,
accented_letters=False,
language="darija",
):
"""
Creates the csv file given a list of wav files.
Arguments
---------
orig_csv_file : str
Path to the DVoice csv file (standard file).
data_folder : str
Path of the DVoice dataset.
accented_letters : bool, optional
Defines if accented letters will be kept as individual letters or
transformed to the closest non-accented letters.
Returns
-------
None
"""
# Check if the given files exists
if not os.path.isfile(orig_csv_file):
msg = "\t%s doesn't exist, verify your dataset!" % (orig_csv_file)
logger.info(msg)
raise FileNotFoundError(msg)
# We load and skip the header
loaded_csv = open(orig_csv_file, "r").readlines()[1:]
nb_samples = str(len(loaded_csv))
msg = "Preparing CSV files for %s samples ..." % (str(nb_samples))
logger.info(msg)
# Adding some Prints
msg = "Creating csv lists in %s ..." % (csv_file)
logger.info(msg)
csv_lines = [["ID", "duration", "wav", "spk_id", "wrd"]]
# Start processing lines
total_duration = 0.0
for line in tzip(loaded_csv):
line = line[0]
# Path is at indice 1 in DVoice csv files. And .mp3 files
# are located in datasets/lang/clips/
ALFFA_LANGUAGES = ["amharic", "fongbe"]
if language in ALFFA_LANGUAGES:
mp3_path = line.split("\t")[0]
elif (
language == "multilingual"
or language == "swahili"
or language == "wolof"
):
mp3_path = data_folder + "/" + line.split("\t")[0]
else:
mp3_path = data_folder + "/wavs/" + line.split("\t")[0]
file_name = line.split("\t")[0]
spk_id = line.split("\t")[0].replace(".wav", "")
snt_id = os.path.basename(file_name)
# Setting torchaudio backend to sox-io (needed to read mp3 files)
if torchaudio.get_audio_backend() != "sox_io":
logger.warning("This recipe needs the sox-io backend of torchaudio")
logger.warning("The torchaudio backend is changed to sox_io")
torchaudio.set_audio_backend("sox_io")
duration = float(line.split("\t")[2])
total_duration += duration
# Getting transcript
words = line.split("\t")[1]
# Unicode Normalization
# words = unicode_normalisation(words)
# !! Language specific cleaning !!
# Important: feel free to specify the text normalization
# corresponding to your alphabet.
if language == "dar":
HAMZA = "\u0621"
ALEF_MADDA = "\u0622"
ALEF_HAMZA_ABOVE = "\u0623"
letters = (
"ابتةثجحخدذرزسشصضطظعغفقكلمنهويءآأؤإئ"
+ HAMZA
+ ALEF_MADDA
+ ALEF_HAMZA_ABOVE
)
words = re.sub("[^" + letters + "]+", " ", words).upper()
# # Remove accents if specified
# if not accented_letters:
# words = strip_accents(words)
# words = words.replace("'", " ")
# words = words.replace("’", " ")
# # Remove multiple spaces
# words = re.sub(" +", " ", words)
# # Remove spaces at the beginning and the end of the sentence
# words = words.lstrip().rstrip()
# # Getting chars
# chars = words.replace(" ", "_")
# chars = " ".join([char for char in chars][:])
# Remove too short sentences (or empty):
# if len(words.split(" ")) < 3:
# continue
# Composition of the csv_line
csv_line = [snt_id, str(duration), mp3_path, spk_id, str(words)]
# Adding this line to the csv_lines list
csv_lines.append(csv_line)
# Writing the csv lines
with open(csv_file, mode="w", encoding="utf-8") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_lines:
csv_writer.writerow(line)
# Final prints
msg = "%s successfully created!" % (csv_file)
logger.info(msg)
msg = "Number of samples: %s " % (str(len(loaded_csv)))
logger.info(msg)
msg = "Total duration: %s Hours" % (str(round(total_duration / 3600, 2)))
logger.info(msg)
def check_dvoice_folders(data_folder, language):
"""
Check if the data folder actually contains the DVoice dataset.
If not, raises an error.
Returns
-------
None
Raises
------
FileNotFoundError
If data folder doesn't contain DVoice dataset.
"""
ALFFA_LANGUAGES = ["amharic", "fongbe", "wolof"]
if (
language in ALFFA_LANGUAGES
or language == "swahili"
or language == "multilingual"
):
files_str = "/"
else:
files_str = "/wavs"
# Checking clips
if not os.path.exists(data_folder + files_str):
err_msg = (
"the folder %s does not exist (it is expected in "
"the DVoice dataset)" % (data_folder + files_str)
)
raise FileNotFoundError(err_msg)
def unicode_normalisation(text):
try:
text = unicode(text, "utf-8")
except NameError: # unicode is a default on python 3
pass
return str(text)
def strip_accents(text):
text = (
unicodedata.normalize("NFD", text)
.encode("ascii", "ignore")
.decode("utf-8")
)
return str(text)
| 15,401 | 29.804 | 80 | py |
speechbrain | speechbrain-main/recipes/DVoice/ASR/CTC/train_with_wav2vec2.py | #!/usr/bin/env python3
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
"""Recipe for training a sequence-to-sequence ASR system with DVoice.
The system employs a wav2vec2 encoder and a CTC decoder.
Decoding is performed with greedy decoding (will be extended to beam search).
To run this recipe, do the following:
> python train_with_wav2vec2.py hparams/train_sw_with_wav2vec.yaml --data_folder=/path_to_dataset/swahili
With the default hyperparameters, the system employs a pretrained wav2vec2 encoder.
The wav2vec2 model is pretrained following the model given in the hprams file.
It may be dependent on the language.
The neural network is trained with CTC on sub-word units estimated with
Byte Pairwise Encoding (BPE).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training languages, and many
other possible variations.
Authors
* Naira Abdou Mohamed 2022
"""
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC) given predictions and targets."""
p_ctc, wav_lens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
if stage != sb.Stage.TRAIN:
# Decode token terms to words
sequence = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
predicted_words = self.tokenizer(sequence, task="decode_from_list")
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
# TOFIX: CTC fine-tuning currently is unstable
# This is certainly due to CTC being done in fp16 instead of fp32
if self.auto_mix_prec:
with torch.cuda.amp.autocast():
with self.no_sync():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_sync(not should_step):
self.scaler.scale(
loss / self.grad_accumulation_factor
).backward()
if should_step:
if not self.hparams.wav2vec2.freeze:
self.scaler.unscale_(self.wav2vec_optimizer)
self.scaler.unscale_(self.model_optimizer)
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
self.scaler.step(self.wav2vec_optimizer)
self.scaler.step(self.model_optimizer)
self.scaler.update()
self.zero_grad()
self.optimizer_step += 1
else:
# This is mandatory because HF models have a weird behavior with DDP
# on the forward pass
with self.no_sync():
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_sync(not should_step):
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.step()
self.model_optimizer.step()
self.zero_grad()
self.optimizer_step += 1
self.on_fit_batch_end(batch, outputs, loss, should_step)
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
if not self.hparams.wav2vec2.freeze:
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
# If the wav2vec encoder is unfrozen, we create the optimizer
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def zero_grad(self, set_to_none=False):
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad(set_to_none)
self.model_optimizer.zero_grad(set_to_none)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from dvoice_prepare import prepare_dvoice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_dvoice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_csv_file": hparams["train_csv_file"],
"dev_csv_file": hparams["dev_csv_file"],
"test_csv_file": hparams["test_csv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# Create the datasets objects as well as tokenization and encoding :-D
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_options"],
)
| 14,424 | 36.273902 | 105 | py |
speechbrain | speechbrain-main/tests/unittests/test_normalization.py | import torch
import torch.nn
def test_BatchNorm1d(device):
from speechbrain.nnet.normalization import BatchNorm1d
input = torch.randn(100, 10, device=device) + 2.0
norm = BatchNorm1d(input_shape=input.shape).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=0).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=0).mean()
assert torch.abs(1.0 - current_std) < 0.01
input = torch.randn(100, 20, 10, device=device) + 2.0
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=0).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=0).mean()
assert torch.abs(1.0 - current_std) < 0.01
# Test with combined dimensions
input = torch.randn(100, 10, 20, device=device) + 2.0
norm = BatchNorm1d(input_shape=input.shape, combine_batch_time=True).to(
device
)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=0).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=0).mean()
assert torch.abs(1.0 - current_std) < 0.01
input = torch.randn(100, 40, 20, 30, device=device) + 2.0
norm = BatchNorm1d(input_shape=input.shape, combine_batch_time=True).to(
device
)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=0).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=0).mean()
assert torch.abs(1.0 - current_std) < 0.01
assert torch.jit.trace(norm, input)
def test_BatchNorm2d(device):
from speechbrain.nnet.normalization import BatchNorm2d
input = torch.randn(100, 10, 4, 20, device=device) + 2.0
norm = BatchNorm2d(input_shape=input.shape).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=0).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=0).mean()
assert torch.abs(1.0 - current_std) < 0.01
assert torch.jit.trace(norm, input)
def test_LayerNorm(device):
from speechbrain.nnet.normalization import LayerNorm
input = torch.randn(4, 101, 256, device=device) + 2.0
norm = LayerNorm(input_shape=input.shape).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=2).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=2).mean()
assert torch.abs(1.0 - current_std) < 0.01
input = torch.randn(100, 101, 16, 32, device=device) + 2.0
norm = LayerNorm(input_shape=input.shape).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=[2, 3]).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=[2, 3]).mean()
assert torch.abs(1.0 - current_std) < 0.01
assert torch.jit.trace(norm, input)
def test_InstanceNorm1d(device):
from speechbrain.nnet.normalization import InstanceNorm1d
input = torch.randn(100, 10, 128, device=device) + 2.0
norm = InstanceNorm1d(input_shape=input.shape).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=2).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=2).mean()
assert torch.abs(1.0 - current_std) < 0.01
assert torch.jit.trace(norm, input)
def test_InstanceNorm2d(device):
from speechbrain.nnet.normalization import InstanceNorm2d
input = torch.randn(100, 10, 20, 2, device=device) + 2.0
norm = InstanceNorm2d(input_shape=input.shape).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=[2, 3]).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=[2, 3]).mean()
assert torch.abs(1.0 - current_std) < 0.01
assert torch.jit.trace(norm, input)
def test_GroupNorm(device):
from speechbrain.nnet.normalization import GroupNorm
input = torch.randn(4, 101, 256, device=device) + 2.0
norm = GroupNorm(input_shape=input.shape, num_groups=256).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=2).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=2).mean()
assert torch.abs(1.0 - current_std) < 0.01
input = torch.randn(4, 101, 256, device=device) + 2.0
norm = GroupNorm(input_shape=input.shape, num_groups=128).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=2).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=2).mean()
assert torch.abs(1.0 - current_std) < 0.01
input = torch.randn(100, 101, 16, 32, device=device) + 2.0
norm = GroupNorm(input_shape=input.shape, num_groups=32).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=3).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=3).mean()
assert torch.abs(1.0 - current_std) < 0.01
input = torch.randn(100, 101, 16, 32, device=device) + 2.0
norm = GroupNorm(input_shape=input.shape, num_groups=8).to(device)
output = norm(input)
assert input.shape == output.shape
current_mean = output.mean(dim=3).mean()
assert torch.abs(current_mean) < 1e-06
current_std = output.std(dim=3).mean()
assert torch.abs(1.0 - current_std) < 0.01
assert torch.jit.trace(norm, input)
| 5,689 | 28.635417 | 76 | py |
speechbrain | speechbrain-main/tests/unittests/test_core.py | def test_parse_arguments():
from speechbrain.core import parse_arguments
filename, run_opts, overrides = parse_arguments(
["params.yaml", "--device=cpu", "--seed=3", "--data_folder", "TIMIT"]
)
assert filename == "params.yaml"
assert run_opts["device"] == "cpu"
assert overrides == "seed: 3\ndata_folder: TIMIT"
def test_brain(device):
import torch
from speechbrain.core import Brain, Stage
from torch.optim import SGD
model = torch.nn.Linear(in_features=10, out_features=10, device=device)
class SimpleBrain(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
brain = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
inputs = torch.rand(10, 10, device=device)
targets = torch.rand(10, 10, device=device)
train_set = ([inputs, targets],)
valid_set = ([inputs, targets],)
start_output = brain.compute_forward(inputs, Stage.VALID)
start_loss = brain.compute_objectives(start_output, targets, Stage.VALID)
brain.fit(epoch_counter=range(10), train_set=train_set, valid_set=valid_set)
end_output = brain.compute_forward(inputs, Stage.VALID)
end_loss = brain.compute_objectives(end_output, targets, Stage.VALID)
assert end_loss < start_loss
| 1,475 | 35 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_samplers.py | import torch
def test_ConcatDatasetBatchSampler(device):
from torch.utils.data import TensorDataset, ConcatDataset, DataLoader
from speechbrain.dataio.sampler import (
ReproducibleRandomSampler,
ConcatDatasetBatchSampler,
)
import numpy as np
datasets = []
for i in range(3):
if i == 0:
datasets.append(
TensorDataset(torch.arange(i * 10, (i + 1) * 10, device=device))
)
else:
datasets.append(
TensorDataset(torch.arange(i * 6, (i + 1) * 6, device=device))
)
samplers = [ReproducibleRandomSampler(x) for x in datasets]
dataset = ConcatDataset(datasets)
loader = DataLoader(
dataset, batch_sampler=ConcatDatasetBatchSampler(samplers, [1, 1, 1])
)
concat_data = []
for data in loader:
concat_data.append([x.item() for x in data[0]])
concat_data = np.array(concat_data)
non_cat_data = []
for i in range(len(samplers)):
c_data = []
loader = DataLoader(dataset.datasets[i], sampler=samplers[i])
for data in loader:
c_data.append(data[0].item())
non_cat_data.append(c_data)
minlen = min([len(x) for x in non_cat_data])
non_cat_data = [x[:minlen] for x in non_cat_data]
non_cat_data = np.array(non_cat_data)
np.testing.assert_array_equal(non_cat_data.T, concat_data)
| 1,418 | 27.959184 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_pretrainer.py | def test_pretrainer(tmpdir, device):
import torch
from torch.nn import Linear
# save a model in tmpdir/original/model.ckpt
first_model = Linear(32, 32).to(device)
pretrained_dir = tmpdir / "original"
pretrained_dir.mkdir()
with open(pretrained_dir / "model.ckpt", "wb") as fo:
torch.save(first_model.state_dict(), fo)
# Make a new model and Pretrainer
pretrained_model = Linear(32, 32).to(device)
assert not torch.all(torch.eq(pretrained_model.weight, first_model.weight))
from speechbrain.utils.parameter_transfer import Pretrainer
pt = Pretrainer(
collect_in=tmpdir / "reused", loadables={"model": pretrained_model}
)
pt.collect_files(default_source=pretrained_dir)
pt.load_collected()
assert torch.all(torch.eq(pretrained_model.weight, first_model.weight))
| 843 | 35.695652 | 79 | py |
speechbrain | speechbrain-main/tests/unittests/test_dropout.py | import torch
import torch.nn
def test_dropout(device):
from speechbrain.nnet.dropout import Dropout2d
inputs = torch.rand([4, 10, 32], device=device)
drop = Dropout2d(drop_rate=0.0).to(device)
outputs = drop(inputs)
assert torch.all(torch.eq(inputs, outputs))
drop = Dropout2d(drop_rate=1.0).to(device)
outputs = drop(inputs)
assert torch.all(
torch.eq(torch.zeros(inputs.shape, device=device), outputs)
)
assert torch.jit.trace(drop, inputs)
| 497 | 22.714286 | 67 | py |
speechbrain | speechbrain-main/tests/unittests/test_augment.py | import os
import torch
from speechbrain.dataio.dataio import write_audio
def test_add_noise(tmpdir, device):
from speechbrain.processing.speech_augmentation import AddNoise
# Test concatenation of batches
wav_a = torch.sin(torch.arange(8000.0, device=device)).unsqueeze(0)
a_len = torch.ones(1, device=device)
wav_b = (
torch.cos(torch.arange(10000.0, device=device))
.unsqueeze(0)
.repeat(2, 1)
)
b_len = torch.ones(2, device=device)
concat, lens = AddNoise._concat_batch(wav_a, a_len, wav_b, b_len)
assert concat.shape == (3, 10000)
assert lens.allclose(torch.Tensor([0.8, 1, 1]).to(device))
concat, lens = AddNoise._concat_batch(wav_b, b_len, wav_a, a_len)
assert concat.shape == (3, 10000)
expected = torch.Tensor([1, 1, 0.8]).to(device)
assert lens.allclose(expected)
test_waveform = torch.sin(torch.arange(16000.0, device=device)).unsqueeze(0)
test_noise = torch.cos(torch.arange(16000.0, device=device)).unsqueeze(0)
wav_lens = torch.ones(1, device=device)
# Put noise waveform into temporary file
noisefile = os.path.join(tmpdir, "noise.wav")
write_audio(noisefile, test_noise.transpose(0, 1).cpu(), 16000)
csv = os.path.join(tmpdir, "noise.csv")
with open(csv, "w") as w:
w.write("ID, duration, wav, wav_format, wav_opts\n")
w.write(f"1, 1.0, {noisefile}, wav,\n")
# Edge cases
no_noise = AddNoise(mix_prob=0.0).to(device)
assert no_noise(test_waveform, wav_lens).allclose(test_waveform)
no_noise = AddNoise(snr_low=1000, snr_high=1000)
assert no_noise(test_waveform, wav_lens).allclose(test_waveform)
all_noise = AddNoise(csv_file=csv, snr_low=-1000, snr_high=-1000)
assert all_noise(test_waveform, wav_lens).allclose(test_noise, atol=1e-4)
# Basic 0dB case
add_noise = AddNoise(csv_file=csv).to(device)
expected = (test_waveform + test_noise) / 2
assert add_noise(test_waveform, wav_lens).allclose(expected, atol=1e-4)
def test_add_reverb(tmpdir, device):
from speechbrain.processing.speech_augmentation import AddReverb
test_waveform = torch.sin(torch.arange(16000.0, device=device)).unsqueeze(0)
impulse_response = torch.zeros(1, 8000, device=device)
impulse_response[0, 0] = 1.0
wav_lens = torch.ones(1, device=device)
# Put ir waveform into temporary file
ir1 = os.path.join(tmpdir, "ir1.wav")
ir2 = os.path.join(tmpdir, "ir2.wav")
ir3 = os.path.join(tmpdir, "ir3.wav")
write_audio(ir1, impulse_response.cpu().transpose(0, 1), 16000)
impulse_response[0, 0] = 0.0
impulse_response[0, 10] = 0.5
write_audio(ir2, impulse_response.cpu().transpose(0, 1), 16000)
# Check a very simple non-impulse-response case:
impulse_response[0, 10] = 0.6
impulse_response[0, 11] = 0.4
# sf.write(ir3, impulse_response.squeeze(0).numpy(), 16000)
write_audio(ir3, impulse_response.cpu().transpose(0, 1), 16000)
ir3_result = test_waveform * 0.6 + test_waveform.roll(1, -1) * 0.4
# write ir csv file
csv = os.path.join(tmpdir, "ir.csv")
with open(csv, "w") as w:
w.write("ID, duration, wav, wav_format, wav_opts\n")
w.write(f"1, 0.5, {ir1}, wav,\n")
w.write(f"2, 0.5, {ir2}, wav,\n")
w.write(f"3, 0.5, {ir3}, wav,\n")
# Edge case
no_reverb = AddReverb(csv, reverb_prob=0.0).to(device)
assert no_reverb(test_waveform, wav_lens).allclose(test_waveform)
# Normal cases
add_reverb = AddReverb(csv, sorting="original")
reverbed = add_reverb(test_waveform, wav_lens)[:, 0:1000]
assert reverbed.allclose(test_waveform[:, 0:1000], atol=1e-1)
reverbed = add_reverb(test_waveform, wav_lens)[:, 0:1000]
assert reverbed.allclose(test_waveform[:, 0:1000], atol=1e-1)
reverbed = add_reverb(test_waveform, wav_lens)[:, 0:1000]
assert reverbed.allclose(ir3_result[:, 0:1000], atol=2e-1)
def test_speed_perturb(device):
from speechbrain.processing.speech_augmentation import SpeedPerturb
test_waveform = torch.sin(torch.arange(16000.0, device=device)).unsqueeze(0)
# Edge cases
no_perturb = SpeedPerturb(16000, perturb_prob=0.0).to(device)
assert no_perturb(test_waveform).allclose(test_waveform)
no_perturb = SpeedPerturb(16000, speeds=[100]).to(device)
assert no_perturb(test_waveform).allclose(test_waveform)
# Half speed
half_speed = SpeedPerturb(16000, speeds=[50]).to(device)
assert half_speed(test_waveform).allclose(test_waveform[:, ::2], atol=3e-1)
def test_babble(device):
from speechbrain.processing.speech_augmentation import AddBabble
test_waveform = torch.stack(
(
torch.sin(torch.arange(16000.0, device=device)),
torch.cos(torch.arange(16000.0, device=device)),
)
)
lengths = torch.ones(2, device=device)
# Edge cases
no_babble = AddBabble(mix_prob=0.0).to(device)
assert no_babble(test_waveform, lengths).allclose(test_waveform)
no_babble = AddBabble(speaker_count=1, snr_low=1000, snr_high=1000)
assert no_babble(test_waveform, lengths).allclose(test_waveform)
# One babbler just averages the two speakers
babble = AddBabble(speaker_count=1).to(device)
expected = (test_waveform + test_waveform.roll(1, 0)) / 2
assert babble(test_waveform, lengths).allclose(expected, atol=1e-4)
def test_drop_freq(device):
from speechbrain.processing.speech_augmentation import DropFreq
test_waveform = torch.sin(torch.arange(16000.0, device=device)).unsqueeze(0)
# Edge cases
no_drop = DropFreq(drop_prob=0.0).to(device)
assert no_drop(test_waveform).allclose(test_waveform)
no_drop = DropFreq(drop_count_low=0, drop_count_high=0)
assert no_drop(test_waveform).allclose(test_waveform)
# Check case where frequency range *does not* include signal frequency
drop_diff_freq = DropFreq(drop_freq_low=0.5, drop_freq_high=0.9)
assert drop_diff_freq(test_waveform).allclose(test_waveform, atol=1e-1)
# Check case where frequency range *does* include signal frequency
drop_same_freq = DropFreq(drop_freq_low=0.28, drop_freq_high=0.28)
assert drop_same_freq(test_waveform).allclose(
torch.zeros(1, 16000, device=device), atol=4e-1
)
def test_drop_chunk(device):
from speechbrain.processing.speech_augmentation import DropChunk
test_waveform = torch.sin(torch.arange(16000.0, device=device)).unsqueeze(0)
lengths = torch.ones(1, device=device)
# Edge cases
no_drop = DropChunk(drop_prob=0.0).to(device)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
no_drop = DropChunk(drop_length_low=0, drop_length_high=0).to(device)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
no_drop = DropChunk(drop_count_low=0, drop_count_high=0).to(device)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
no_drop = DropChunk(drop_start=0, drop_end=0).to(device)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
# Specify all parameters to ensure it is deterministic
dropper = DropChunk(
drop_length_low=100,
drop_length_high=100,
drop_count_low=1,
drop_count_high=1,
drop_start=100,
drop_end=200,
noise_factor=0.0,
).to(device)
expected_waveform = test_waveform.clone()
expected_waveform[:, 100:200] = 0.0
assert dropper(test_waveform, lengths).allclose(expected_waveform)
# Make sure amplitude is similar before and after
dropper = DropChunk(noise_factor=1.0).to(device)
drop_amplitude = dropper(test_waveform, lengths).abs().mean()
orig_amplitude = test_waveform.abs().mean()
assert drop_amplitude.allclose(orig_amplitude, atol=1e-2)
def test_clip(device):
from speechbrain.processing.speech_augmentation import DoClip
test_waveform = torch.sin(torch.arange(16000.0, device=device)).unsqueeze(0)
# Edge cases
no_clip = DoClip(clip_prob=0.0).to(device)
assert no_clip(test_waveform).allclose(test_waveform)
no_clip = DoClip(clip_low=1, clip_high=1).to(device)
assert no_clip(test_waveform).allclose(test_waveform)
# Sort of a reimplementation of clipping, but its one function call.
expected = test_waveform.clamp(min=-0.5, max=0.5)
half_clip = DoClip(clip_low=0.5, clip_high=0.5).to(device)
assert half_clip(test_waveform).allclose(expected)
| 8,419 | 38.345794 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_categorical_encoder.py | import pytest
def test_categorical_encoder(device):
from speechbrain.dataio.encoder import CategoricalEncoder
encoder = CategoricalEncoder()
encoder.update_from_iterable("abcd")
integers = encoder.encode_sequence("dcba")
assert all(isinstance(i, int) for i in integers)
assert encoder.is_continuous()
with pytest.raises(KeyError):
encoder.add_label("a")
# Does NOT raise:
encoder.ensure_label("a")
with pytest.raises(KeyError):
encoder.insert_label("a", -3)
encoder.enforce_label("a", -3)
assert encoder.encode_label("a") == -3
assert not encoder.is_continuous()
# Decoding:
import torch
encoder = CategoricalEncoder()
encoder.update_from_iterable("abcd")
result = encoder.decode_torch(
torch.tensor([[0, 0], [1, 1], [2, 2], [3, 3]], device=device)
)
assert result == [["a", "a"], ["b", "b"], ["c", "c"], ["d", "d"]]
result = encoder.decode_ndim([[0, 0], [1, 1], [2, 2], [3, 3]])
assert result == [["a", "a"], ["b", "b"], ["c", "c"], ["d", "d"]]
result = encoder.decode_ndim(torch.tensor([[0, 0], [1, 1], [2, 2], [3, 3]]))
assert result == [["a", "a"], ["b", "b"], ["c", "c"], ["d", "d"]]
result = encoder.decode_ndim([[[[[0, 0], [1, 1], [2, 2], [3, 3]]]]])
assert result == [[[[["a", "a"], ["b", "b"], ["c", "c"], ["d", "d"]]]]]
result = encoder.decode_torch(
torch.tensor([[[[[0, 0], [1, 1], [2, 2], [3, 3]]]]], device=device)
)
assert result == [[[[["a", "a"], ["b", "b"], ["c", "c"], ["d", "d"]]]]]
result = encoder.decode_ndim([[0, 0], [1], [2, 2, 2], []])
assert result == [["a", "a"], ["b"], ["c", "c", "c"], []]
encoder = CategoricalEncoder()
encoder.limited_labelset_from_iterable("aabbbcccd", n_most_common=3)
encoder.encode_sequence("abc")
with pytest.raises(KeyError):
encoder.encode_label("d")
encoder = CategoricalEncoder()
encoder.limited_labelset_from_iterable("aabbbcccd", min_count=3)
encoder.encode_sequence("cbcb")
with pytest.raises(KeyError):
encoder.encode_label("a")
with pytest.raises(KeyError):
encoder.encode_label("d")
encoder = CategoricalEncoder()
encoder.limited_labelset_from_iterable(
"aabbbcccd", n_most_common=3, min_count=3
)
encoder.encode_sequence("cbcb")
with pytest.raises(KeyError):
encoder.encode_label("a")
with pytest.raises(KeyError):
encoder.encode_label("d")
encoder = CategoricalEncoder(unk_label="<unk>")
encoder.update_from_iterable("abc")
assert encoder.encode_label("a") == 1
assert encoder.encode_label("d") == 0
assert encoder.decode_ndim(encoder.encode_label("d")) == "<unk>"
def test_categorical_encoder_saving(tmpdir):
from speechbrain.dataio.encoder import CategoricalEncoder
encoder = CategoricalEncoder(starting_index=3)
encoding_file = tmpdir / "char_encoding.txt"
# First time this runs, the encoding is created:
if not encoder.load_if_possible(encoding_file):
encoder.update_from_iterable("abcd")
encoder.save(encoding_file)
else:
assert False # We should not get here!
# Now, imagine a recovery:
encoder = CategoricalEncoder()
# The second time, the encoding is just loaded from file:
if not encoder.load_if_possible(encoding_file):
assert False # We should not get here!
integers = encoder.encode_sequence("dcba")
assert all(isinstance(i, int) for i in integers)
assert encoder.starting_index == 3 # This is also loaded
# Also possible to encode tuples and load
encoder = CategoricalEncoder()
encoding_file = tmpdir / "tuple_encoding.txt"
encoder.add_label((1, 2, 3))
encoder.insert_label((1, 2), index=-1)
encoder.save(encoding_file)
# Reload
encoder = CategoricalEncoder()
assert encoder.load_if_possible(encoding_file)
assert encoder.encode_label((1, 2)) == -1
# Load unk:
encoder = CategoricalEncoder(unk_label="UNKNOWN")
encoding_file = tmpdir / "unk_encoding.txt"
encoder.update_from_iterable("abc")
encoder.save(encoding_file)
encoder = CategoricalEncoder()
assert encoder.load_if_possible(encoding_file)
assert encoder.encode_label("a") == 1
assert encoder.decode_ndim(encoder.encode_label("d")) == "UNKNOWN"
# Even if set differently:
encoder = CategoricalEncoder()
encoder.add_unk()
assert encoder.load_if_possible(encoding_file)
assert encoder.encode_label("a") == 1
assert encoder.decode_ndim(encoder.encode_label("d")) == "UNKNOWN"
def test_categorical_encoder_from_dataset():
from speechbrain.dataio.encoder import CategoricalEncoder
from speechbrain.dataio.dataset import DynamicItemDataset
encoder = CategoricalEncoder()
data = {
"utt1": {"foo": -1, "bar": 0, "text": "hello world"},
"utt2": {"foo": 1, "bar": 2, "text": "how are you world"},
"utt3": {"foo": 3, "bar": 4, "text": "where are you world"},
"utt4": {"foo": 5, "bar": 6, "text": "hello nation"},
}
dynamic_items = [
{"func": lambda x: x.split(), "takes": ["text"], "provides": "words"},
{
"func": encoder.encode_sequence,
"takes": ["words"],
"provides": "words_t",
},
]
output_keys = ["words_t"]
dataset = DynamicItemDataset(data, dynamic_items, output_keys)
encoder.update_from_didataset(dataset, "words", sequence_input=True)
assert dataset[0]["words_t"] == [0, 1]
assert encoder.decode_ndim(dataset[0]["words_t"]) == ["hello", "world"]
def test_text_encoder(tmpdir):
from speechbrain.dataio.encoder import TextEncoder
encoder = TextEncoder()
encoding_file = tmpdir / "text_encoding.txt"
encoder.add_bos_eos()
encoder.update_from_iterable(
[["hello", "world"], ["how", "are", "you", "world"]],
sequence_input=True,
)
encoded = encoder.encode_sequence(
encoder.prepend_bos_label(["are", "you", "world"])
)
assert encoded[0] == 0
encoded = encoder.append_eos_index(
encoder.encode_sequence(["are", "you", "world"])
)
assert encoded[-1] == 1 # By default uses just one sentence_boundary marker
encoder.save(encoding_file)
encoder = TextEncoder()
assert encoder.load_if_possible(encoding_file)
encoded = encoder.encode_sequence(
encoder.append_eos_label(["are", "you", "world"])
)
assert encoded[-1] == 1
encoded = encoder.prepend_bos_index(
encoder.encode_sequence(["are", "you", "world"])
)
assert encoded[0] == 0
def test_ctc_encoder(tmpdir):
from speechbrain.dataio.encoder import CTCTextEncoder
encoder = CTCTextEncoder()
encoder.insert_bos_eos(
bos_label="<s>", bos_index=0, eos_label="</s>", eos_index=1
)
encoder.insert_blank(blank_label="_", index=2)
encoding_file = tmpdir / "ctc_encoding.txt"
encoder.update_from_iterable(["abcd", "bcdef"], sequence_input=True)
encoded = encoder.encode_sequence(encoder.prepend_bos_label(["a", "b"]))
assert encoded[0] == 0
encoder.save(encoding_file)
encoder = CTCTextEncoder()
assert encoder.load_if_possible(encoding_file)
assert (
"".join(encoder.collapse_labels("_bb_aaa___bbbbb_b_eeee_____"))
== "babbe"
)
assert "".join(encoder.collapse_labels("babe")) == "babe"
assert (
"".join(
encoder.collapse_labels(
"_bb_aaa___bbbbb_b_eeee_____", merge_repeats=False
)
)
== "bbaaabbbbbbeeee"
)
assert encoder.decode_ndim(
(
encoder.collapse_indices_ndim(
[
[0, 2, 4, 4, 2, 3, 3, 3, 2, 2, 2, 4, 2, 4, 2, 7, 2, 1],
[[0, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1]],
]
)
)
) == [
["<s>", "b", "a", "b", "b", "e", "</s>"],
[["<s>", "a", "b", "c", "d", "c", "b", "a", "</s>"]],
]
| 8,005 | 35.557078 | 80 | py |
speechbrain | speechbrain-main/tests/unittests/test_RNN.py | import torch
import torch.nn
from collections import OrderedDict
def test_RNN(device):
from speechbrain.nnet.RNN import RNN, GRU, LSTM, LiGRU, QuasiRNN, RNNCell
# Check RNN
inputs = torch.randn(4, 2, 7, device=device)
net = RNN(
hidden_size=5,
input_shape=inputs.shape,
num_layers=2,
bidirectional=False,
).to(device)
output, hn = net(inputs)
output_l = []
hn_t = None
for t in range(inputs.shape[1]):
out_t, hn_t = net(inputs[:, t, :].unsqueeze(1), hn_t)
output_l.append(out_t.squeeze(1))
out_steps = torch.stack(output_l, dim=1)
assert torch.all(
torch.lt(torch.add(out_steps, -output), 1e-3)
), "GRU output mismatch"
assert torch.all(
torch.lt(torch.add(hn_t, -hn), 1e-3)
), "GRU hidden states mismatch"
assert torch.jit.trace(net, inputs)
# Check GRU
inputs = torch.randn(4, 2, 7, device=device)
net = GRU(
hidden_size=5,
input_shape=inputs.shape,
num_layers=2,
bidirectional=False,
).to(device)
output, hn = net(inputs)
output_l = []
hn_t = None
for t in range(inputs.shape[1]):
out_t, hn_t = net(inputs[:, t, :].unsqueeze(1), hn_t)
output_l.append(out_t.squeeze(1))
out_steps = torch.stack(output_l, dim=1)
assert torch.all(
torch.lt(torch.add(out_steps, -output), 1e-3)
), "GRU output mismatch"
assert torch.all(
torch.lt(torch.add(hn_t, -hn), 1e-3)
), "GRU hidden states mismatch"
assert torch.jit.trace(net, inputs)
# Check LSTM
inputs = torch.randn(4, 2, 7, device=device)
net = LSTM(
hidden_size=5,
input_shape=inputs.shape,
num_layers=2,
bidirectional=False,
).to(device)
output, hn = net(inputs)
output_l = []
hn_t = None
for t in range(inputs.shape[1]):
out_t, hn_t = net(inputs[:, t, :].unsqueeze(1), hn_t)
output_l.append(out_t.squeeze(1))
out_steps = torch.stack(output_l, dim=1)
assert torch.all(
torch.lt(torch.add(out_steps, -output), 1e-3)
), "LSTM output mismatch"
assert torch.all(torch.lt(torch.add(hn_t[0], -hn[0]), 1e-3)) and torch.all(
torch.lt(torch.add(hn_t[1], -hn[1]), 1e-3)
), "LSTM hidden states mismatch"
assert torch.jit.trace(net, inputs)
# Check LiGRU
inputs = torch.randn(1, 2, 2, device=device)
net = LiGRU(
hidden_size=5,
input_shape=inputs.shape,
num_layers=2,
bidirectional=False,
normalization="layernorm",
).to(device)
output, hn = net(inputs)
output_l = []
hn_t = None
for t in range(inputs.shape[1]):
out_t, hn_t = net(inputs[:, t, :].unsqueeze(1), hn_t)
output_l.append(out_t.squeeze(1))
out_steps = torch.stack(output_l, dim=1)
assert torch.all(
torch.lt(torch.add(out_steps, -output), 1e-3)
), "LiGRU output mismatch"
assert torch.all(torch.lt(torch.add(hn_t[0], -hn[0]), 1e-3)) and torch.all(
torch.lt(torch.add(hn_t[1], -hn[1]), 1e-3)
), "LiGRU hidden states mismatch"
# Check QuasiRNN
inputs = torch.randn(1, 2, 2, device=device)
net = QuasiRNN(
hidden_size=5,
input_shape=inputs.shape,
num_layers=2,
bidirectional=False,
).to(device)
output, hn = net(inputs)
output_l = []
hn_t = None
for t in range(inputs.shape[1]):
out_t, hn_t = net(inputs[:, t, :].unsqueeze(1), hn_t)
output_l.append(out_t.squeeze(1))
out_steps = torch.stack(output_l, dim=1)
assert torch.all(
torch.lt(torch.add(out_steps, -output), 1e-3)
), "QuasiRNN output mismatch"
assert torch.all(
torch.lt(torch.add(hn_t[0], -hn[0][1]), 1e-3)
) and torch.all(
torch.lt(torch.add(hn_t[1], -hn[1][1]), 1e-3)
), "QuasiRNN hidden states mismatch"
assert torch.jit.trace(net, inputs)
# Check RNNCell
inputs = torch.randn(4, 2, 7, device=device)
net = RNNCell(hidden_size=5, input_size=7, num_layers=2, dropout=0.0).to(
device
)
hn_t = None
output_lst = []
for t in range(inputs.shape[1]):
output, hn_t = net(inputs[:, t], hn_t)
output_lst.append(output)
out_steps = torch.stack(output_lst, dim=1)
rnn = torch.nn.RNN(
input_size=7, hidden_size=5, num_layers=2, batch_first=True
).to(device)
# rename the state_dict
state = net.state_dict()
new_state = []
for name, tensor in state.items():
index, weight_id = name[len("rnn_cells.")], name[len("rnn_cells.0.") :]
new_state.append((f"{weight_id}_l{index}", tensor))
new_state = OrderedDict(new_state)
rnn.load_state_dict(new_state)
output, hn = rnn(inputs)
assert torch.all(
torch.lt(torch.add(out_steps, -output), 1e-3)
), "RNNCell output mismatch"
assert torch.all(torch.lt(torch.add(hn_t[0], -hn[0]), 1e-3)) and torch.all(
torch.lt(torch.add(hn_t[1], -hn[1]), 1e-3)
), "RNNCell hidden states mismatch"
assert torch.jit.trace(rnn, inputs)
| 5,091 | 29.130178 | 79 | py |
speechbrain | speechbrain-main/tests/unittests/test_features.py | import torch
def test_deltas(device):
from speechbrain.processing.features import Deltas
size = torch.Size([10, 101, 20], device=device)
inp = torch.ones(size, device=device)
compute_deltas = Deltas(input_size=20).to(device)
out = torch.zeros(size, device=device)
assert torch.sum(compute_deltas(inp) == out) == out.numel()
assert torch.jit.trace(compute_deltas, inp)
def test_context_window(device):
from speechbrain.processing.features import ContextWindow
inp = (
torch.tensor([1, 2, 3], device=device)
.unsqueeze(0)
.unsqueeze(-1)
.float()
)
compute_cw = ContextWindow(left_frames=1, right_frames=1).to(device)
out = (
torch.tensor([[0, 1, 2], [1, 2, 3], [2, 3, 0]], device=device)
.unsqueeze(0)
.float()
)
assert torch.sum(compute_cw(inp) == out) == 9
inp = torch.rand([2, 10, 5], device=device)
compute_cw = ContextWindow(left_frames=0, right_frames=0).to(device)
assert torch.sum(compute_cw(inp) == inp) == inp.numel()
assert torch.jit.trace(compute_cw, inp)
def test_istft(device):
from speechbrain.processing.features import STFT
from speechbrain.processing.features import ISTFT
fs = 16000
inp = torch.randn([10, 16000], device=device)
inp = torch.stack(3 * [inp], -1)
compute_stft = STFT(sample_rate=fs).to(device)
compute_istft = ISTFT(sample_rate=fs).to(device)
out = compute_istft(compute_stft(inp), sig_length=16000)
assert torch.sum(torch.abs(inp - out) < 5e-5) >= inp.numel() - 5
assert torch.jit.trace(compute_stft, inp)
assert torch.jit.trace(compute_istft, compute_stft(inp))
def test_filterbank(device):
from speechbrain.processing.features import Filterbank
compute_fbanks = Filterbank().to(device)
inputs = torch.ones([10, 101, 201], device=device)
assert torch.jit.trace(compute_fbanks, inputs)
# Check amin (-100 dB)
inputs = torch.zeros([10, 101, 201], device=device)
fbanks = compute_fbanks(inputs)
assert torch.equal(fbanks, torch.ones_like(fbanks) * -100)
# Check top_db
fbanks = torch.zeros([1, 1, 1], device=device)
expected = torch.Tensor([[[-100]]]).to(device)
fbanks_db = compute_fbanks._amplitude_to_DB(fbanks)
assert torch.equal(fbanks_db, expected)
# Making sure independent computation gives same results
# as the batch computation
input1 = torch.rand([1, 101, 201], device=device) * 10
input2 = torch.rand([1, 101, 201], device=device)
input3 = torch.cat([input1, input2], dim=0)
fbank1 = compute_fbanks(input1)
fbank2 = compute_fbanks(input2)
fbank3 = compute_fbanks(input3)
assert torch.sum(torch.abs(fbank1[0] - fbank3[0])) < 8e-05
assert torch.sum(torch.abs(fbank2[0] - fbank3[1])) < 8e-05
def test_dtc(device):
from speechbrain.processing.features import DCT
compute_dct = DCT(input_size=40)
inputs = torch.randn([10, 101, 40], device=device)
assert torch.jit.trace(compute_dct, inputs)
def test_input_normalization(device):
from speechbrain.processing.features import InputNormalization
norm = InputNormalization().to(device)
inputs = torch.randn([10, 101, 20], device=device)
inp_len = torch.ones([10], device=device)
assert torch.jit.trace(norm, (inputs, inp_len))
norm = InputNormalization().to(device)
inputs = (
torch.FloatTensor([1, 2, 3, 0, 0, 0])
.to(device)
.unsqueeze(0)
.unsqueeze(2)
)
inp_len = torch.FloatTensor([0.5]).to(device)
out_norm = norm(inputs, inp_len).squeeze()
target = torch.FloatTensor([-1, 0, 1, -2, -2, -2]).to(device)
assert torch.equal(out_norm, target)
def test_features_multimic(device):
from speechbrain.processing.features import Filterbank
compute_fbanks = Filterbank().to(device)
inputs = torch.rand([10, 101, 201], device=device)
output = compute_fbanks(inputs)
inputs_ch2 = torch.stack((inputs, inputs), -1)
output_ch2 = compute_fbanks(inputs_ch2)
output_ch2 = output_ch2[..., 0]
assert torch.sum(output - output_ch2) < 5e-05
| 4,139 | 30.12782 | 72 | py |
speechbrain | speechbrain-main/tests/unittests/test_profiling.py | def test_profile_class(device):
import torch
from torch.optim import SGD
from speechbrain.core import Brain
from speechbrain.utils.profiling import profile
@profile
class SimpleBrain(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
model = torch.nn.Linear(in_features=10, out_features=10, device=device)
inputs = torch.rand(10, 10, device=device)
targets = torch.rand(10, 10, device=device)
train_set = ([inputs, targets],)
valid_set = ([inputs, targets],)
# Profiling: __init__ constructor.
brain = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
assert brain.profiler is not None
assert brain.profiler.profiler is not None
# assert len(brain.profiler.key_averages()) == 2
# assert (brain.profiler.events().total_average().count >= 4) # == 6 # before; config dependent: 7
assert (
len(brain.profiler.speechbrain_event_traces) == 1
) # set & filled by the @profile decorator
"""print(brain.profiler.key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::_has_compatible_shallow_copy_type 73.33% 11.000us 73.33% 11.000us 2.750us 4
aten::to 26.67% 4.000us 26.67% 4.000us 2.000us 2
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 15.000us
"""
# Profiling: fit() for train operations.
# By default, @profile should also annotate fit & evaluate functions; here the fit function is tested only.
brain.fit(epoch_counter=range(10), train_set=train_set, valid_set=valid_set)
assert brain.profiler is not None
# assert len(brain.profiler.key_averages()) >= 60 # 72 with torch==1.10.1
# assert brain.profiler.events().total_average().count >= 2000 # 2832 with torch==1.10.1
assert len(brain.profiler.speechbrain_event_traces) == 2
# assert len(brain.profiler.speechbrain_event_traces[0]) >= 4 # == 6 # before; config dependent: 7
# assert len(brain.profiler.speechbrain_event_traces[1]) >= 2000 # 2862 with torch==1.10.1
"""print(brain.profiler.key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::l1_loss 2.60% 443.000us 26.15% 4.460ms 111.500us 40
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 12.97% 2.212ms 20.28% 3.459ms 86.475us 40
aten::linear 1.28% 219.000us 12.97% 2.212ms 110.600us 20
aten::to 1.88% 320.000us 10.68% 1.822ms 11.387us 160
aten::isfinite 1.98% 338.000us 9.82% 1.674ms 55.800us 30
aten::mean 1.10% 188.000us 9.31% 1.587ms 79.350us 20
aten::_to_copy 5.65% 964.000us 8.99% 1.533ms 15.330us 100
aten::stack 2.23% 380.000us 8.67% 1.479ms 29.580us 50
aten::matmul 1.62% 277.000us 7.63% 1.301ms 65.050us 20
aten::l1_loss_backward 1.24% 212.000us 6.78% 1.157ms 57.850us 20
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 17.054ms
"""
def test_profile_func(device):
# import torch
# from pytest import raises
# from torch.optim import SGD
# from speechbrain.core import Brain
# from torch.autograd.profiler import record_function
from speechbrain.utils.profiling import profile
# from speechbrain.utils.profiling import events_diff
"""
class SimpleBrain(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
class SimpleBrainNittyGritty(Brain):
def compute_forward(self, batch, stage):
# example: one way of using torch.autograd.profiler.record_function
with record_function("is this faster (?)"):
this = self.modules.model(batch[0])
return this
def compute_objectives(self, predictions, batch, stage):
# example: one could also think of running comparative testing using record_function
with record_function("or that (?)"):
that = torch.nn.functional.l1_loss(predictions, batch[1])
return that
"""
@profile
def train(brain, train_set, valid_set):
brain.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
"""
model = torch.nn.Linear(in_features=10, out_features=10, device=device)
inputs = torch.rand(10, 10, device=device)
targets = torch.rand(10, 10, device=device)
training_set = ([inputs, targets],)
validation_set = ([inputs, targets],)
simple_brain = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
prof_simple = train(simple_brain, training_set, validation_set)
# print(prof_simple.key_averages().table(sort_by="cpu_time_total"))
# assert len(prof_simple.events()) >= 2500 # 2832 with torch==1.10.1
# assert len(prof_simple.key_averages()) >= 60 # 72 with torch==1.10.1
simple_brain_nitty_gritty = SimpleBrainNittyGritty(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
prof_nitty_gritty = train(
simple_brain_nitty_gritty, training_set, validation_set
)
# print(prof_nitty_gritty.key_averages().table(sort_by="cpu_time_total"))
# assert len(prof_nitty_gritty.events()) >= 2500 # 3030 with torch==1.10.1
# assert len(prof_nitty_gritty.key_averages()) >= 60 # 74 with torch==1.10.1
"""
# The outputs of this diff are only for visualisation, ``simple_delta._build_tree()`` will throw an error.
"""
simple_delta, nitty_gritty_delta = events_diff(
prof_simple.key_averages(), prof_nitty_gritty.key_averages()
)
# assert len(simple_delta) >= 4 # == 6 # before; config dependent: 7
# assert len(nitty_gritty_delta) >= 4 # == 8 # before
# assert simple_delta.total_average().count == 582 #Switching off becuase sometimes it fails
# assert nitty_gritty_delta.total_average().count == 780 #Switching off becuase sometimes it fails
with raises(Exception) as err_tree:
simple_delta._build_tree() # as mentioned.
assert err_tree.type == AttributeError
with raises(Exception) as err_averages:
simple_delta.key_averages() # as mentioned.
assert err_averages.type == AssertionError
" ""Both classes have alike numbers of function calls (given the same input data and train function).
Sparing where both have the same number of calls:
print(simple_delta.table(sort_by="cpu_time_total"))
---------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
---------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::zeros 17.44% 240.000us 27.69% 381.000us 6.350us 60
aten::empty 26.96% 371.000us 27.11% 373.000us 1.492us 250
aten::detach 18.68% 257.000us 25.65% 353.000us 5.694us 62
aten::add_ 25.00% 344.000us 25.00% 344.000us 5.931us 58
detach 6.98% 96.000us 9.45% 130.000us 2.097us 62
aten::zero_ 4.94% 68.000us 4.94% 68.000us 0.756us 90
---------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 1.376ms
print(nitty_gritty_delta.table(sort_by="cpu_time_total"))
---------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
---------------------- ------------ ------------ ------------ ------------ ------------ ------------
is this faster (?) 29.70% 1.024ms 76.19% 2.627ms 131.350us 20
or that (?) 29.29% 1.010ms 67.00% 2.310ms 115.500us 20
aten::zeros 8.32% 287.000us 15.52% 535.000us 5.350us 100
aten::empty 14.01% 483.000us 14.07% 485.000us 1.470us 330
aten::add_ 9.92% 342.000us 9.92% 342.000us 5.700us 60
aten::detach 2.81% 97.000us 6.09% 210.000us 3.500us 60
detach 3.28% 113.000us 4.00% 138.000us 2.300us 60
aten::zero_ 2.67% 92.000us 2.67% 92.000us 0.708us 130
---------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 3.448ms
Curiosity doesn't come for free ;-)
"""
def test_scheduler(device):
import torch
from pytest import raises
from torch.optim import SGD
from speechbrain.core import Brain
from speechbrain.utils.profiling import profile, schedule
@schedule
@profile
class SimpleBrain(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
model = torch.nn.Linear(in_features=10, out_features=10, device=device)
inputs = torch.rand(10, 10, device=device)
targets = torch.rand(10, 10, device=device)
train_set = ([inputs, targets],)
valid_set = ([inputs, targets],)
test_set = ([inputs, targets],)
# Profiling: __init__ constructor -- while scheduler: waiting --> nothing to report
brain = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
assert brain.profiler.profiler is None
assert len(brain.profiler.speechbrain_event_traces) == 0
with raises(Exception) as err:
brain.profiler.events() # Tracing hasn't started, yet, so everything is in err. Scheduler says: wait.
assert err.type == AssertionError
assert brain.profiler.step_num == 0
# Profiling: fit() for train operations.
brain.fit(epoch_counter=range(10), train_set=train_set, valid_set=valid_set)
assert brain.profiler.step_num == 20
assert len(brain.profiler.speechbrain_event_traces) == 1
# assert len(brain.profiler.events()) >= 250 # 293 with torch==1.10.1
# assert len(brain.profiler.key_averages()) >= 60 # 73 with torch==1.10.1
"""print(brain.profiler.key_averages().table(sort_by="cpu_time_total"))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
ProfilerStep* 55.48% 1.504ms 99.00% 2.684ms 1.342ms 2
aten::l1_loss 1.07% 29.000us 9.30% 252.000us 63.000us 4
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 4.57% 124.000us 7.01% 190.000us 47.500us 4
aten::linear 2.32% 63.000us 6.93% 188.000us 94.000us 2
aten::isfinite 0.89% 24.000us 5.35% 145.000us 48.333us 3
aten::to 1.00% 27.000us 4.46% 121.000us 7.562us 16
aten::_to_copy 2.18% 59.000us 3.58% 97.000us 9.700us 10
aten::stack 0.89% 24.000us 3.28% 89.000us 17.800us 5
aten::l1_loss_backward 0.55% 15.000us 3.14% 85.000us 42.500us 2
aten::mean 0.48% 13.000us 3.10% 84.000us 42.000us 2
aten::matmul 0.55% 15.000us 2.84% 77.000us 38.500us 2
aten::ne 0.77% 21.000us 2.73% 74.000us 24.667us 3
aten::mm 2.29% 62.000us 2.36% 64.000us 21.333us 3
aten::div 1.36% 37.000us 2.25% 61.000us 20.333us 3
Optimizer.step#SGD.step 1.88% 51.000us 2.21% 60.000us 60.000us 1
autograd::engine::evaluate_function: L1LossBackward0... 0.11% 3.000us 2.07% 56.000us 56.000us 1
aten::zeros 1.44% 39.000us 1.99% 54.000us 6.750us 8
L1LossBackward0 0.11% 3.000us 1.95% 53.000us 53.000us 1
aten::t 0.89% 24.000us 1.88% 51.000us 10.200us 5
aten::cat 0.59% 16.000us 1.81% 49.000us 9.800us 5
aten::div_ 0.70% 19.000us 1.73% 47.000us 23.500us 2
autograd::engine::evaluate_function: MmBackward0 0.11% 3.000us 1.70% 46.000us 46.000us 1
aten::abs 1.14% 31.000us 1.59% 43.000us 5.375us 8
MmBackward0 0.37% 10.000us 1.59% 43.000us 43.000us 1
aten::sum 1.00% 27.000us 1.33% 36.000us 12.000us 3
aten::empty 1.29% 35.000us 1.29% 35.000us 1.207us 29
autograd::engine::evaluate_function: AddBackward0 0.63% 17.000us 1.25% 34.000us 34.000us 1
aten::_cat 0.85% 23.000us 1.22% 33.000us 6.600us 5
aten::add_ 1.11% 30.000us 1.11% 30.000us 5.000us 6
aten::transpose 0.70% 19.000us 0.92% 25.000us 5.000us 5
autograd::engine::evaluate_function: DivBackward0 0.18% 5.000us 0.92% 25.000us 25.000us 1
aten::norm 0.77% 21.000us 0.89% 24.000us 8.000us 3
Optimizer.zero_grad#SGD.zero_grad 0.63% 17.000us 0.81% 22.000us 22.000us 1
aten::item 0.55% 15.000us 0.77% 21.000us 3.000us 7
aten::copy_ 0.77% 21.000us 0.77% 21.000us 2.100us 10
DivBackward0 0.15% 4.000us 0.74% 20.000us 20.000us 1
autograd::engine::evaluate_function: torch::autograd... 0.15% 4.000us 0.74% 20.000us 10.000us 2
aten::mul 0.55% 15.000us 0.74% 20.000us 5.000us 4
aten::detach 0.37% 10.000us 0.70% 19.000us 3.167us 6
aten::as_strided 0.59% 16.000us 0.63% 17.000us 1.062us 16
aten::unsqueeze 0.41% 11.000us 0.59% 16.000us 2.667us 6
aten::empty_strided 0.59% 16.000us 0.59% 16.000us 1.455us 11
torch::autograd::AccumulateGrad 0.30% 8.000us 0.59% 16.000us 8.000us 2
aten::is_nonzero 0.18% 5.000us 0.59% 16.000us 5.333us 3
aten::view 0.55% 15.000us 0.55% 15.000us 3.000us 5
aten::random_ 0.52% 14.000us 0.52% 14.000us 7.000us 2
autograd::engine::evaluate_function: UnsafeViewBackw... 0.11% 3.000us 0.52% 14.000us 14.000us 1
aten::sub 0.48% 13.000us 0.48% 13.000us 4.333us 3
aten::add 0.18% 5.000us 0.48% 13.000us 13.000us 1
aten::ones_like 0.15% 4.000us 0.44% 12.000us 12.000us 1
aten::mul_ 0.44% 12.000us 0.44% 12.000us 4.000us 3
detach 0.33% 9.000us 0.44% 12.000us 2.000us 6
UnsafeViewBackward0 0.11% 3.000us 0.41% 11.000us 11.000us 1
aten::abs_ 0.18% 5.000us 0.37% 10.000us 5.000us 2
aten::empty_like 0.26% 7.000us 0.37% 10.000us 5.000us 2
aten::clamp 0.26% 7.000us 0.37% 10.000us 10.000us 1
aten::zeros_like 0.18% 5.000us 0.33% 9.000us 9.000us 1
aten::eq 0.33% 9.000us 0.33% 9.000us 3.000us 3
aten::zero_ 0.30% 8.000us 0.30% 8.000us 0.727us 11
aten::_unsafe_view 0.22% 6.000us 0.30% 8.000us 4.000us 2
aten::fill_ 0.30% 8.000us 0.30% 8.000us 2.000us 4
aten::reshape 0.18% 5.000us 0.30% 8.000us 8.000us 1
aten::_local_scalar_dense 0.22% 6.000us 0.26% 7.000us 1.000us 7
autograd::engine::evaluate_function: TBackward0 0.11% 3.000us 0.26% 7.000us 7.000us 1
aten::resize_ 0.22% 6.000us 0.22% 6.000us 1.200us 5
aten::reciprocal 0.22% 6.000us 0.22% 6.000us 6.000us 1
aten::sgn_ 0.15% 4.000us 0.15% 4.000us 4.000us 1
TBackward0 0.07% 2.000us 0.15% 4.000us 4.000us 1
aten::_reshape_alias 0.11% 3.000us 0.11% 3.000us 3.000us 1
aten::clamp_max 0.11% 3.000us 0.11% 3.000us 3.000us 1
aten::resolve_conj 0.07% 2.000us 0.07% 2.000us 0.333us 6
aten::broadcast_tensors 0.07% 2.000us 0.07% 2.000us 1.000us 2
AddBackward0 0.04% 1.000us 0.04% 1.000us 1.000us 1
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 2.711ms <=== above: Self CPU time total: 18.451ms (... the impact of warm-up)
"""
@schedule
@profile
def train():
# The step() function is executed inside speechbrain.core.brain.fit and is property of the Brain's profiler.
# Above profiler and its scheduler are without power here, since prof.step() is not run - at all.
brain.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
prof = train()
# since we used the same brain (which has its own profiler)
# assert brain.profiler.step_num == 20 # started again from 0 steps
assert len(brain.profiler.speechbrain_event_traces) == 2
# assert len(brain.profiler.events()) >= 250 # 293 with torch==1.10.1 # unchanged (overwritten with akin data)
# assert len(brain.profiler.key_averages()) >= 60 # 73 with torch==1.10.1 # unchanged (akin data)
# now, to the train function's profiler
assert (
prof.step_num == 0
) # the prof.step() operation wasn't run (not in scope) -> its scheduler is unawaken!
assert not hasattr(prof, "speechbrain_event_traces") # no trace collection
with raises(Exception) as err_prof:
prof.events() # No tracing started with this one.
assert err_prof.type == AssertionError # sparing: key_averages()
# But how to add profiling then if no writing access is there for a class... pretrained, for example:
class SimpleBrainUntracked(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
brain_or_pretrained = SimpleBrainUntracked(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
# Set-up the profiler and hook it to the model.
scheduled_profiler = schedule(profile)
scheduled_profiler(brain_or_pretrained)
# Profiling: still too early for scheduler!
brain_or_pretrained.evaluate(test_set=test_set) # -> step_num=1
assert brain_or_pretrained.profiler.step_num == 1
assert brain_or_pretrained.profiler.profiler is None
# Profiling: scheduler warms-up.
brain_or_pretrained.evaluate(
test_set=(
[inputs, targets], # +1x test_set -> step_num=2
[inputs, targets],
)
)
assert brain_or_pretrained.profiler.step_num == 2
# brain_or_pretrained.profiler.profiler will be set (not None anymore)
# when run on cpu, there are no events - but cuda activities are recorded if existing
# see: https://github.com/speechbrain/speechbrain/issues/1469
if (
torch.profiler.ProfilerActivity.CUDA
in brain_or_pretrained.profiler.activities
):
assert (
len(
set(
[
x.name
for x in brain_or_pretrained.profiler.profiler.function_events
]
)
- {
"cudaGetDeviceCount",
"cudaGetDeviceProperties",
"cudaDeviceSynchronize",
}
)
) == 0
else:
assert len(brain_or_pretrained.profiler.events()) == 0
# Profiling: scheduler warms-up...
brain_or_pretrained.evaluate(
test_set=(
[inputs, targets], # +1x test_set
[inputs, targets], # +2x test_set -> step_num=3
[inputs, targets],
)
)
assert brain_or_pretrained.profiler.step_num == 3
if (
torch.profiler.ProfilerActivity.CUDA
in brain_or_pretrained.profiler.activities
):
assert (
len(
set(
[
x.name
for x in brain_or_pretrained.profiler.profiler.function_events
]
)
- {
"cudaGetDeviceCount",
"cudaGetDeviceProperties",
"cudaDeviceSynchronize",
}
)
) == 0
else:
assert len(brain_or_pretrained.profiler.events()) == 0
# Profiling: first trace!
brain_or_pretrained.evaluate(
test_set=(
[inputs, targets], # +1x test_set
[inputs, targets], # +2x test_set
[inputs, targets], # +3x test_set -> step_num=4
[inputs, targets],
)
)
assert brain_or_pretrained.profiler.step_num == 4
# assert len(brain_or_pretrained.profiler.events()) >= 4 # == 10 # before
# assert len(brain_or_pretrained.profiler.key_averages()) >= 4 # == 5 # before
assert (
len(brain_or_pretrained.profiler.events()) >= 1
) # 1 on CPU; more w/ CUDA
def test_tracer(device):
import torch
from torch.optim import SGD
from speechbrain.core import Brain
from speechbrain.utils.profiling import profile, export
@export
@profile
class SimpleBrain(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
model = torch.nn.Linear(in_features=10, out_features=10, device=device)
inputs = torch.rand(10, 10, device=device)
targets = torch.rand(10, 10, device=device)
train_set = ([inputs, targets],)
valid_set = ([inputs, targets],)
test_set = ([inputs, targets],)
# Profiling: __init__ constructor and model training.
brain = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
brain.fit(epoch_counter=range(10), train_set=train_set, valid_set=valid_set)
# Pretrained example.
class SimpleBrainUntracked(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
# No tracing during __init__
brain_or_pretrained = SimpleBrainUntracked(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
profile(brain_or_pretrained, on_trace_ready=export(), with_stack=True)
brain_or_pretrained.evaluate(test_set=test_set)
# Set-up the profiler; hook it to the model, and benchmark inference.
brain_or_pretrained2 = SimpleBrainUntracked(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
logged_profiler = export(profile)
assert brain_or_pretrained2.profiler is None
logged_profiler(brain_or_pretrained2)
brain_or_pretrained2.evaluate(test_set=test_set)
def test_aggregated_traces(device):
import torch
from torch.optim import SGD
from speechbrain.core import Brain
from speechbrain.utils.profiling import profile
@profile
class SimpleBrain(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
model = torch.nn.Linear(in_features=10, out_features=10, device=device)
inputs = torch.rand(10, 10, device=device)
targets = torch.rand(10, 10, device=device)
train_set = ([inputs, targets],)
valid_set = ([inputs, targets],)
test_set = (
[inputs, targets],
[inputs, targets],
)
# Profiling: __init__ constructor -- while scheduler: waiting --> nothing to report
brain = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
# Profiling: empty traces
assert len(brain.profiler.speechbrain_event_traces) == 1
"""
init_report = brain.profiler.merge_traces()
assert len(init_report) >= 1
# assert len(init_report) >= 4 # == 6 # before; config dependent: 7
assert len(brain.profiler.speechbrain_event_traces) == 1
" ""print(brain.profiler.key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::_has_compatible_shallow_copy_type 80.00% 12.000us 80.00% 12.000us 3.000us 4
aten::to 20.00% 3.000us 20.00% 3.000us 1.500us 2
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 15.000us
print(init_report.key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::_has_compatible_shallow_copy_type 80.00% 12.000us 80.00% 12.000us 3.000us 4
aten::to 20.00% 3.000us 20.00% 3.000us 1.500us 2
------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 15.000us
"""
brain.fit(epoch_counter=range(10), train_set=train_set, valid_set=valid_set)
assert len(brain.profiler.speechbrain_event_traces) == 2
# assert len(brain.profiler.speechbrain_event_traces[0]) >= 4 # == 6 # before; config dependent: 7
# assert len(brain.profiler.speechbrain_event_traces[1]) >= 2500 # 2862 with torch==1.10.1
# assert len(brain.profiler.events()) >= 2500 # 2832 with torch==1.10.1
# assert len(brain.profiler.events().key_averages()) >= 60 # 72 with torch==1.10.1
"""print(brain.profiler.events().key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::l1_loss 2.39% 415.000us 25.28% 4.392ms 109.800us 40
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 12.65% 2.198ms 20.06% 3.485ms 87.125us 40
aten::linear 1.41% 245.000us 13.24% 2.299ms 114.950us 20
aten::to 2.04% 354.000us 10.59% 1.839ms 11.494us 160
aten::isfinite 2.12% 369.000us 9.87% 1.714ms 57.133us 30
aten::mean 1.13% 196.000us 9.15% 1.589ms 79.450us 20
aten::stack 2.33% 404.000us 8.94% 1.553ms 31.060us 50
aten::_to_copy 5.67% 985.000us 8.67% 1.506ms 15.060us 100
aten::matmul 1.57% 273.000us 7.83% 1.360ms 68.000us 20
aten::l1_loss_backward 1.22% 212.000us 6.67% 1.158ms 57.900us 20
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 17.370ms
"""
# Profiling: aggregate traces
"""
short_report = brain.profiler.merge_traces()
assert len(short_report) >= 1
# assert len(short_report) >= 2500 # 2838 with torch==1.10.1
# assert len(short_report.key_averages()) >= 60 # 73 with torch==1.10.1
" ""print(short_report.key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::l1_loss 2.39% 415.000us 25.26% 4.392ms 109.800us 40
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 12.64% 2.198ms 20.05% 3.485ms 87.125us 40
aten::linear 1.41% 245.000us 13.22% 2.299ms 114.950us 20
aten::to 2.05% 357.000us 10.60% 1.842ms 11.370us 162
aten::isfinite 2.12% 369.000us 9.86% 1.714ms 57.133us 30
aten::mean 1.13% 196.000us 9.14% 1.589ms 79.450us 20
aten::stack 2.32% 404.000us 8.93% 1.553ms 31.060us 50
aten::_to_copy 5.67% 985.000us 8.66% 1.506ms 15.060us 100
aten::matmul 1.57% 273.000us 7.82% 1.360ms 68.000us 20
aten::l1_loss_backward 1.22% 212.000us 6.66% 1.158ms 57.900us 20
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 17.385ms
"""
brain.evaluate(test_set=test_set)
brain.evaluate(test_set=test_set)
brain.evaluate(test_set=test_set)
assert len(brain.profiler.speechbrain_event_traces) == 5
# assert len(brain.profiler.speechbrain_event_traces[0]) >= 4 # == 6 # before; config dependent: 7
# assert len(brain.profiler.speechbrain_event_traces[1]) >= 2500 # 2862 with torch==1.10.1
# assert len(brain.profiler.speechbrain_event_traces[2]) >= 125 # 143 with torch==1.10.1
# assert len(brain.profiler.speechbrain_event_traces[3]) >= 125 # 143 with torch==1.10.1
# assert len(brain.profiler.speechbrain_event_traces[4]) >= 125 # 143 with torch==1.10.1
# assert len(brain.profiler.events()) >= 125 # 141 with torch==1.10.1
# assert len(brain.profiler.events().key_averages()) >= 25 # 42 with torch==1.10.1
# the following is only for the last call of the 3x brain.evaluate()
"""print(brain.profiler.events().key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::l1_loss 3.54% 23.000us 37.38% 243.000us 60.750us 4
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 16.62% 108.000us 29.38% 191.000us 63.667us 3
aten::linear 2.77% 18.000us 20.46% 133.000us 66.500us 2
aten::isfinite 3.54% 23.000us 14.31% 93.000us 46.500us 2
aten::mean 1.85% 12.000us 12.62% 82.000us 41.000us 2
aten::stack 3.23% 21.000us 12.15% 79.000us 19.750us 4
aten::matmul 2.62% 17.000us 11.69% 76.000us 38.000us 2
aten::div_ 2.92% 19.000us 7.54% 49.000us 24.500us 2
aten::to 1.85% 12.000us 7.08% 46.000us 7.667us 6
aten::mm 6.31% 41.000us 6.77% 44.000us 22.000us 2
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 650.000us
"""
# Profiling: putting previous benchmark reporting together.
"""
full_report = brain.profiler.merge_traces()
assert len(full_report) >= 1
# assert len(full_report.key_averages()) >= 60 # 73 with torch==1.10.1
# In this minimal example, only 73 functions matter.
# Some events are duplicated (perhaps from wrapping functions):
# => they appear stacked & EventList._remove_dup_nodes drops direct child events of same name as their parent.
num_events = sum([len(x) for x in brain.profiler.speechbrain_event_traces])
assert num_events >= 1
# assert num_events >= 3000 # 3297 with torch==1.10.1 # expected: 6 + 2862 + 3x143 = 3297
# Apparently, this depends on how this test is run (by its own or as part of the entire file's test suite).
# assert (num_events == len(full_report)) or (len(full_report) == len(set([x.id for x in full_report])))
# ... not tested, why
" ""print(full_report.key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
aten::l1_loss 2.21% 427.000us 27.58% 5.326ms 102.423us 52
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 11.89% 2.297ms 21.80% 4.210ms 85.918us 49
aten::linear 1.57% 304.000us 14.61% 2.821ms 108.500us 26
aten::isfinite 2.53% 488.000us 10.72% 2.071ms 57.528us 36
aten::to 2.03% 392.000us 10.42% 2.013ms 11.183us 180
aten::mean 1.15% 223.000us 9.81% 1.894ms 72.846us 26
aten::stack 2.34% 452.000us 9.54% 1.842ms 29.710us 62
aten::_to_copy 5.49% 1.061ms 8.51% 1.643ms 14.670us 112
aten::matmul 1.65% 318.000us 8.48% 1.638ms 63.000us 26
aten::div_ 1.50% 290.000us 6.95% 1.343ms 47.964us 28
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 19.311ms
"""
# 19.311ms = 3x ~650.000us + 17.370ms + 15.000us <=> 1st & 2nd call of brain.evaluate() = 1276us = 2x 638us
# max([x.time_range.end for x in full_report]) -> 41965 (us)
def test_profile_details(device):
import torch
# from copy import deepcopy
from torch.optim import SGD
from speechbrain.core import Brain
from speechbrain.utils.profiling import (
profile_analyst,
profile_optimiser,
export,
# events_diff,
)
class SimpleBrain(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
model = torch.nn.Linear(in_features=10, out_features=10, device=device)
inputs = torch.rand(10, 10, device=device)
targets = torch.rand(10, 10, device=device)
train_set = ([inputs, targets],)
valid_set = ([inputs, targets],)
test_set = (
[inputs, targets],
[inputs, targets],
[inputs, targets],
[inputs, targets],
[inputs, targets],
[inputs, targets],
)
brain_analyst = profile_analyst(
SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
)
brain_optimiser = profile_optimiser(
SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
)
assert len(brain_analyst.profiler.speechbrain_event_traces) == 0
brain_analyst.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
assert len(brain_analyst.profiler.speechbrain_event_traces) == 1
# assert len(brain_analyst.profiler.speechbrain_event_traces[0]) >= 250 # 296 with torch==1.10.1
# assert len(brain_analyst.profiler.events()) >= 250 # 293 with torch==1.10.1
# assert len(brain_analyst.profiler.events().key_averages()) >= 60 # 73 with torch==1.10.1
"""print(brain_analyst.profiler.events().key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg CPU Mem Self CPU Mem # of Calls Total FLOPs
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
ProfilerStep* 36.62% 4.345ms 98.50% 11.686ms 5.843ms 796 b -1.66 Kb 2 --
aten::l1_loss 2.50% 297.000us 14.37% 1.705ms 426.250us 16 b -800 b 4 --
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 3.91% 464.000us 11.01% 1.306ms 326.500us 1.55 Kb -80 b 4 --
aten::isfinite 3.10% 368.000us 9.65% 1.145ms 381.667us 3 b -18 b 3 --
aten::stack 2.79% 331.000us 8.46% 1.004ms 200.800us 1.57 Kb 0 b 5 --
aten::to 1.96% 232.000us 7.25% 860.000us 53.750us 40 b 0 b 16 --
aten::linear 1.40% 166.000us 6.55% 777.000us 388.500us 800 b 0 b 2 --
aten::l1_loss_backward 1.58% 188.000us 6.08% 721.000us 360.500us 400 b -4 b 2 --
aten::_to_copy 4.28% 508.000us 5.29% 628.000us 62.800us 40 b 0 b 10 --
aten::mean 0.89% 105.000us 4.44% 527.000us 263.500us 8 b 8 b 2 --
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 11.864ms
"""
# 6-batch inference
brain_analyst.evaluate(test_set=test_set)
assert len(brain_analyst.profiler.speechbrain_event_traces) == 2
# assert len(brain_analyst.profiler.speechbrain_event_traces[0]) >= 250 # 296 with torch==1.10.1
# assert len(brain_analyst.profiler.speechbrain_event_traces[1]) >= 125 # 144 with torch==1.10.1
# as of evaluate() call
# assert len(brain_analyst.profiler.events()) >= 125 # 142 with torch==1.10.1
# assert len(brain_analyst.profiler.events().key_averages()) >= 25 # 42 with torch==1.10.1
"""print(brain_analyst.profiler.events().key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg CPU Mem Self CPU Mem # of Calls Total FLOPs
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
ProfilerStep* 19.24% 1.129ms 96.92% 5.687ms 2.844ms 796 b -1.61 Kb 2 --
aten::l1_loss 5.16% 303.000us 35.50% 2.083ms 520.750us 16 b -800 b 4 --
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 7.41% 435.000us 25.95% 1.523ms 761.500us 1.55 Kb -40 b 2 --
aten::stack 6.15% 361.000us 18.20% 1.068ms 267.000us 1.56 Kb 0 b 4 --
aten::linear 3.43% 201.000us 15.78% 926.000us 463.000us 800 b 0 b 2 --
aten::mean 2.42% 142.000us 11.93% 700.000us 350.000us 8 b 8 b 2 --
aten::isfinite 3.72% 218.000us 10.84% 636.000us 318.000us 2 b -12 b 2 --
aten::cat 2.68% 157.000us 8.95% 525.000us 131.250us 1.56 Kb 0 b 4 --
aten::matmul 3.83% 225.000us 8.88% 521.000us 260.500us 800 b 0 b 2 --
aten::div_ 3.34% 196.000us 6.97% 409.000us 204.500us 0 b -8 b 2 --
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 5.868ms
"""
brain_optimiser.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
# key_avg_fit = deepcopy(brain_optimiser.profiler.events().key_averages())
"""print(brain_optimiser.profiler.events().key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg CPU Mem Self CPU Mem # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
ProfilerStep* 50.73% 1.874ms 98.86% 3.652ms 1.826ms 796 b -1.66 Kb 2
aten::l1_loss 1.38% 51.000us 11.83% 437.000us 109.250us 16 b -400 b 4
aten::isfinite 1.49% 55.000us 8.69% 321.000us 107.000us 3 b -16 b 3
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 4.03% 149.000us 7.58% 280.000us 70.000us 1.55 Kb -64 b 4
aten::linear 0.51% 19.000us 5.66% 209.000us 104.500us 800 b 0 b 2
aten::abs 3.19% 118.000us 5.58% 206.000us 25.750us 24 b 12 b 8
aten::l1_loss_backward 0.92% 34.000us 4.28% 158.000us 79.000us 400 b -4 b 2
aten::stack 1.00% 37.000us 3.87% 143.000us 28.600us 1.57 Kb 0 b 5
aten::empty 3.76% 139.000us 3.76% 139.000us 4.793us 544 b 544 b 29
aten::to 0.76% 28.000us 3.76% 139.000us 8.688us 44 b 4 b 16
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 3.694ms
# to 11.864ms (analyst)
"""
brain_optimiser.evaluate(test_set=test_set)
"""
key_avg_evaluate = deepcopy(
brain_optimiser.profiler.events().key_averages()
)
"""
"""print(brain_optimiser.profiler.events().key_averages().table(sort_by="cpu_time_total", row_limit=10))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg CPU Mem Self CPU Mem # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
ProfilerStep* 24.80% 524.000us 96.50% 2.039ms 1.020ms 796 b -1.61 Kb 2
aten::l1_loss 2.74% 58.000us 33.65% 711.000us 177.750us 16 b -800 b 4
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 9.94% 210.000us 21.11% 446.000us 223.000us 1.55 Kb -40 b 2
aten::isfinite 3.64% 77.000us 15.76% 333.000us 166.500us 2 b -12 b 2
aten::linear 1.04% 22.000us 11.88% 251.000us 125.500us 800 b 0 b 2
aten::mean 2.04% 43.000us 11.74% 248.000us 124.000us 8 b 8 b 2
aten::stack 3.31% 70.000us 10.18% 215.000us 53.750us 1.56 Kb 0 b 4
aten::div_ 4.83% 102.000us 7.90% 167.000us 83.500us 0 b -8 b 2
aten::matmul 1.61% 34.000us 7.38% 156.000us 78.000us 800 b 0 b 2
aten::ne 4.54% 96.000us 6.72% 142.000us 71.000us 2 b -6 b 2
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 2.113ms
# to 5.868ms (analyst)
"""
# same check as for analyst
assert len(brain_optimiser.profiler.speechbrain_event_traces) == 2
# assert len(brain_optimiser.profiler.speechbrain_event_traces[0]) >= 250 # 296 with torch==1.10.1
# assert len(brain_optimiser.profiler.speechbrain_event_traces[1]) >= 125 # 144 with torch==1.10.1
# as of evaluate() call
# assert len(brain_optimiser.profiler.events()) >= 125 # 142 with torch==1.10.1
# assert len(brain_optimiser.profiler.events().key_averages()) >= 25 # 42 with torch==1.10.1
# different config
assert (
brain_optimiser.profiler.record_shapes
!= brain_analyst.profiler.record_shapes
)
assert (
brain_optimiser.profiler.with_stack != brain_analyst.profiler.with_stack
)
assert (
brain_optimiser.profiler.with_flops != brain_analyst.profiler.with_flops
)
# same config
assert (
brain_optimiser.profiler.with_modules
== brain_analyst.profiler.with_modules
)
assert (
brain_optimiser.profiler.profile_memory
== brain_analyst.profiler.profile_memory
)
"""
# let's take a look at the diff
diff_fit, diff_evaluate = events_diff(key_avg_fit, key_avg_evaluate)
# assert len(diff_fit) >= 50 # 64 with torch==1.10.1
# assert len(diff_evaluate) >= 25 # 33 with torch==1.10.1
# assert diff_fit.total_average().count >= 250 # 273 with torch==1.10.1
# assert diff_evaluate.total_average().count >= 100 # 122 with torch==1.10.1
" ""For curiosity only... the printed FunctionEvents differ by (name, # of Calls)
print(diff_fit.table(sort_by="cpu_time_total"))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg CPU Mem Self CPU Mem # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
aten::isfinite 3.35% 55.000us 19.55% 321.000us 107.000us 3 b -16 b 3
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 9.07% 149.000us 17.05% 280.000us 70.000us 1.55 Kb -64 b 4
aten::abs 7.19% 118.000us 12.55% 206.000us 25.750us 24 b 12 b 8
aten::l1_loss_backward 2.07% 34.000us 9.62% 158.000us 79.000us 400 b -4 b 2
aten::stack 2.25% 37.000us 8.71% 143.000us 28.600us 1.57 Kb 0 b 5
aten::empty 8.47% 139.000us 8.47% 139.000us 4.793us 544 b 544 b 29
aten::to 1.71% 28.000us 8.47% 139.000us 8.688us 44 b 4 b 16
aten::_to_copy 1.89% 31.000us 7.00% 115.000us 11.500us 40 b 0 b 10
aten::mm 6.46% 106.000us 6.58% 108.000us 36.000us 1.17 Kb 1.17 Kb 3
aten::ne 4.26% 70.000us 6.46% 106.000us 35.333us 3 b -9 b 3
aten::zeros 1.95% 32.000us 6.03% 99.000us 12.375us 32 b 0 b 8
autograd::engine::evaluate_function: L1LossBackward0... 0.37% 6.000us 5.97% 98.000us 98.000us 396 b -4 b 1
L1LossBackward0 0.24% 4.000us 5.60% 92.000us 92.000us 400 b 0 b 1
aten::cat 0.97% 16.000us 5.05% 83.000us 16.600us 1.57 Kb 0 b 5
aten::div 2.80% 46.000us 4.81% 79.000us 26.333us 12 b 0 b 3
Optimizer.step#SGD.step 3.71% 61.000us 4.51% 74.000us 74.000us -4 b -20 b 1
aten::_cat 2.13% 35.000us 4.08% 67.000us 13.400us 1.57 Kb 0 b 5
aten::empty_strided 3.41% 56.000us 3.41% 56.000us 5.091us 44 b 44 b 11
autograd::engine::evaluate_function: MmBackward0 0.37% 6.000us 3.41% 56.000us 56.000us 0 b -400 b 1
aten::t 2.13% 35.000us 3.11% 51.000us 10.200us 0 b 0 b 5
aten::add_ 3.05% 50.000us 3.05% 50.000us 8.333us 0 b 0 b 6
MmBackward0 0.43% 7.000us 3.05% 50.000us 50.000us 400 b 0 b 1
aten::mul 2.01% 33.000us 2.62% 43.000us 10.750us 5 b -3 b 4
aten::sum 2.01% 33.000us 2.50% 41.000us 13.667us 40 b 40 b 3
autograd::engine::evaluate_function: DivBackward0 0.55% 9.000us 2.38% 39.000us 39.000us -4 b -8 b 1
aten::norm 2.13% 35.000us 2.38% 39.000us 13.000us 12 b 12 b 3
Optimizer.zero_grad#SGD.zero_grad 1.46% 24.000us 1.89% 31.000us 31.000us -4 b -4 b 1
DivBackward0 0.24% 4.000us 1.83% 30.000us 30.000us 4 b 0 b 1
aten::item 1.04% 17.000us 1.77% 29.000us 4.143us 0 b 0 b 7
autograd::engine::evaluate_function: AddBackward0 0.43% 7.000us 1.77% 29.000us 29.000us 40 b 0 b 1
aten::random_ 1.71% 28.000us 1.71% 28.000us 14.000us 0 b 0 b 2
aten::sub 1.71% 28.000us 1.71% 28.000us 9.333us 800 b 800 b 3
aten::copy_ 1.71% 28.000us 1.71% 28.000us 2.800us 0 b 0 b 10
autograd::engine::evaluate_function: torch::autograd... 0.30% 5.000us 1.71% 28.000us 14.000us -440 b 0 b 2
aten::resize_ 1.64% 27.000us 1.64% 27.000us 5.400us 1.57 Kb 1.57 Kb 5
aten::add 0.85% 14.000us 1.64% 27.000us 27.000us 4 b 0 b 1
aten::eq 1.58% 26.000us 1.58% 26.000us 8.667us 3 b 3 b 3
aten::unsqueeze 0.91% 15.000us 1.40% 23.000us 3.833us 0 b 0 b 6
torch::autograd::AccumulateGrad 0.97% 16.000us 1.40% 23.000us 11.500us -440 b -440 b 2
aten::is_nonzero 0.43% 7.000us 1.40% 23.000us 7.667us 0 b 0 b 3
aten::detach 0.55% 9.000us 1.28% 21.000us 3.500us 0 b 0 b 6
aten::as_strided 1.16% 19.000us 1.16% 19.000us 1.188us 0 b 0 b 16
aten::view 1.04% 17.000us 1.04% 17.000us 3.400us 0 b 0 b 5
aten::transpose 0.67% 11.000us 0.97% 16.000us 3.200us 0 b 0 b 5
aten::empty_like 0.37% 6.000us 0.91% 15.000us 7.500us 404 b 0 b 2
detach 0.73% 12.000us 0.85% 14.000us 2.333us 0 b 0 b 6
aten::clamp 0.61% 10.000us 0.85% 14.000us 14.000us 4 b 4 b 1
aten::_local_scalar_dense 0.73% 12.000us 0.79% 13.000us 1.857us 0 b 0 b 7
aten::mul_ 0.79% 13.000us 0.79% 13.000us 4.333us 0 b 0 b 3
aten::ones_like 0.24% 4.000us 0.73% 12.000us 12.000us 4 b 0 b 1
aten::zeros_like 0.30% 5.000us 0.73% 12.000us 12.000us 400 b 0 b 1
autograd::engine::evaluate_function: UnsafeViewBackw... 0.18% 3.000us 0.73% 12.000us 12.000us 0 b 0 b 1
UnsafeViewBackward0 0.12% 2.000us 0.55% 9.000us 9.000us 0 b 0 b 1
aten::reciprocal 0.49% 8.000us 0.49% 8.000us 8.000us 4 b 4 b 1
aten::reshape 0.24% 4.000us 0.43% 7.000us 7.000us 0 b 0 b 1
autograd::engine::evaluate_function: TBackward0 0.12% 2.000us 0.43% 7.000us 7.000us 0 b 0 b 1
aten::zero_ 0.37% 6.000us 0.37% 6.000us 0.545us 0 b 0 b 11
aten::fill_ 0.37% 6.000us 0.37% 6.000us 1.500us 0 b 0 b 4
aten::sgn_ 0.30% 5.000us 0.30% 5.000us 5.000us 0 b 0 b 1
TBackward0 0.06% 1.000us 0.30% 5.000us 5.000us 0 b 0 b 1
aten::clamp_max 0.24% 4.000us 0.24% 4.000us 4.000us 0 b 0 b 1
aten::_reshape_alias 0.18% 3.000us 0.18% 3.000us 3.000us 0 b 0 b 1
aten::resolve_conj 0.12% 2.000us 0.12% 2.000us 0.333us 0 b 0 b 6
AddBackward0 0.06% 1.000us 0.06% 1.000us 1.000us 0 b 0 b 1
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 1.642ms
print(diff_evaluate.table(sort_by="cpu_time_total"))
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Name Self CPU % Self CPU CPU total % CPU total CPU time avg CPU Mem Self CPU Mem # of Calls
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
enumerate(DataLoader)#_SingleProcessDataLoaderIter._... 16.03% 210.000us 34.05% 446.000us 223.000us 1.55 Kb -40 b 2
aten::isfinite 5.88% 77.000us 25.42% 333.000us 166.500us 2 b -12 b 2
aten::stack 5.34% 70.000us 16.41% 215.000us 53.750us 1.56 Kb 0 b 4
aten::ne 7.33% 96.000us 10.84% 142.000us 71.000us 2 b -6 b 2
aten::empty 10.08% 132.000us 10.08% 132.000us 8.250us 80 b 80 b 16
aten::to 1.37% 18.000us 8.63% 113.000us 18.833us 16 b 0 b 6
aten::zeros 3.13% 41.000us 8.55% 112.000us 28.000us 16 b 0 b 4
aten::cat 1.45% 19.000us 8.40% 110.000us 27.500us 1.56 Kb 0 b 4
aten::mm 7.02% 92.000us 7.33% 96.000us 48.000us 800 b 800 b 2
aten::abs 4.58% 60.000us 7.33% 96.000us 16.000us 16 b 8 b 6
aten::_to_copy 2.21% 29.000us 7.25% 95.000us 23.750us 16 b 0 b 4
aten::_cat 2.82% 37.000us 6.95% 91.000us 22.750us 1.56 Kb 0 b 4
aten::resize_ 3.51% 46.000us 3.51% 46.000us 11.500us 1.56 Kb 1.56 Kb 4
aten::empty_strided 3.36% 44.000us 3.36% 44.000us 11.000us 16 b 16 b 4
aten::sub 2.98% 39.000us 2.98% 39.000us 19.500us 800 b 800 b 2
aten::sum 2.14% 28.000us 2.90% 38.000us 19.000us 0 b 0 b 2
aten::add_ 2.82% 37.000us 2.82% 37.000us 18.500us 0 b 0 b 2
aten::t 1.68% 22.000us 2.75% 36.000us 18.000us 0 b 0 b 2
aten::unsqueeze 1.98% 26.000us 2.67% 35.000us 8.750us 0 b 0 b 4
aten::eq 2.37% 31.000us 2.37% 31.000us 15.500us 2 b 2 b 2
aten::mul 2.37% 31.000us 2.37% 31.000us 15.500us 2 b 2 b 2
aten::is_nonzero 0.53% 7.000us 1.76% 23.000us 11.500us 0 b 0 b 2
aten::item 0.92% 12.000us 1.76% 23.000us 5.750us 0 b 0 b 4
aten::copy_ 1.68% 22.000us 1.68% 22.000us 5.500us 0 b 0 b 4
aten::as_strided 1.37% 18.000us 1.37% 18.000us 2.250us 0 b 0 b 8
aten::view 1.30% 17.000us 1.30% 17.000us 4.250us 0 b 0 b 4
aten::detach 0.31% 4.000us 1.15% 15.000us 7.500us 0 b 0 b 2
aten::transpose 0.69% 9.000us 1.07% 14.000us 7.000us 0 b 0 b 2
detach 0.84% 11.000us 0.84% 11.000us 5.500us 0 b 0 b 2
aten::_local_scalar_dense 0.84% 11.000us 0.84% 11.000us 2.750us 0 b 0 b 4
aten::fill_ 0.46% 6.000us 0.46% 6.000us 3.000us 0 b 0 b 2
aten::zero_ 0.31% 4.000us 0.31% 4.000us 1.000us 0 b 0 b 4
aten::resolve_conj 0.31% 4.000us 0.31% 4.000us 1.000us 0 b 0 b 4
------------------------------------------------------- ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
Self CPU time total: 1.310ms
"""
# set hook afterwards
brain_analyst_raw = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
brain_optimiser_raw = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
brain_analyst_raw.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
profile_analyst(brain_analyst_raw)
brain_analyst_raw.evaluate(test_set=test_set)
assert getattr(brain_analyst_raw.profiler, "record_shapes") is True
assert getattr(brain_analyst_raw.profiler, "with_stack") is True
assert getattr(brain_analyst_raw.profiler, "with_flops") is True
brain_optimiser_raw.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
profile_optimiser(brain_optimiser_raw)
brain_optimiser_raw.evaluate(test_set=test_set)
assert getattr(brain_optimiser_raw.profiler, "record_shapes") is False
assert getattr(brain_optimiser_raw.profiler, "with_stack") is False
assert getattr(brain_optimiser_raw.profiler, "with_flops") is False
# wrap functions
@profile_analyst
def train_analyst(brain: SimpleBrain):
brain.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
@export
@profile_optimiser
def evaluate_optimiser(brain: SimpleBrain):
brain.evaluate(test_set=test_set)
brain_raw = SimpleBrain(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
assert brain_raw.profiler is None
train_analyst(brain_raw)
assert brain_raw.profiler is None
evaluate_optimiser(brain_raw)
assert brain_raw.profiler is None
# profile classes
@export
@profile_analyst
class SimpleBrainAnalyst(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
@profile_optimiser
class SimpleBrainOptimiser(Brain):
def compute_forward(self, batch, stage):
return self.modules.model(batch[0])
def compute_objectives(self, predictions, batch, stage):
return torch.nn.functional.l1_loss(predictions, batch[1])
simple_brain_analyst = SimpleBrainAnalyst(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
assert getattr(simple_brain_analyst.profiler, "record_shapes") is True
assert getattr(simple_brain_analyst.profiler, "with_stack") is True
assert getattr(simple_brain_analyst.profiler, "with_flops") is True
simple_brain_analyst.evaluate(test_set=test_set)
simple_brain_optimiser = SimpleBrainOptimiser(
{"model": model}, lambda x: SGD(x, 0.1), run_opts={"device": device}
)
assert getattr(simple_brain_optimiser.profiler, "record_shapes") is False
assert getattr(simple_brain_optimiser.profiler, "with_stack") is False
assert getattr(simple_brain_optimiser.profiler, "with_flops") is False
simple_brain_optimiser.fit(
epoch_counter=range(10), train_set=train_set, valid_set=valid_set
)
| 81,815 | 78.820488 | 185 | py |
speechbrain | speechbrain-main/tests/unittests/test_multi_mic.py | import torch
def test_gccphat(device):
from speechbrain.processing.features import STFT
from speechbrain.processing.multi_mic import Covariance, GccPhat
# Creating the test signal
fs = 16000
delay = 60
sig = torch.randn([10, fs], device=device)
sig_delayed = torch.cat(
(torch.zeros([10, delay], device=device), sig[:, 0:-delay]), 1
)
xs = torch.stack((sig_delayed, sig), -1)
stft = STFT(sample_rate=fs).to(device)
Xs = stft(xs)
# Computing the covariance matrix for GCC-PHAT
cov = Covariance().to(device)
gccphat = GccPhat().to(device)
XXs = cov(Xs).to(device)
tdoas = torch.abs(gccphat(XXs))
n_valid_tdoas = torch.sum(torch.abs(tdoas[..., 1] - delay) < 1e-3)
assert n_valid_tdoas == Xs.shape[0] * Xs.shape[1]
assert torch.jit.trace(stft, xs)
assert torch.jit.trace(cov, Xs)
assert torch.jit.trace(gccphat, XXs)
| 916 | 24.472222 | 70 | py |
speechbrain | speechbrain-main/tests/unittests/test_signal_processing.py | import torch
def test_normalize(device):
from speechbrain.processing.signal_processing import compute_amplitude
from speechbrain.processing.signal_processing import rescale
import random
import numpy as np
for scale in ["dB", "linear"]:
for amp_type in ["peak", "avg"]:
for test_vec in [
torch.zeros((100), device=device),
torch.rand((10, 100), device=device),
torch.rand((10, 100, 5), device=device),
]:
lengths = (
test_vec.size(1)
if len(test_vec.shape) > 1
else test_vec.size(0)
)
amp = compute_amplitude(test_vec, lengths, amp_type, scale)
scaled_back = rescale(
random.random() * test_vec, lengths, amp, amp_type, scale
)
np.testing.assert_array_almost_equal(
scaled_back.cpu().numpy(), test_vec.cpu().numpy()
)
| 1,035 | 32.419355 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.