repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
speechbrain | speechbrain-main/speechbrain/dataio/sampler.py | """PyTorch compatible samplers.
These determine the order of iteration through a dataset.
Authors:
* Aku Rouhe 2020
* Samuele Cornell 2020
* Ralf Leibold 2020
* Artem Ploujnikov 2021
* Andreas Nautsch 2021
"""
import torch
import logging
from operator import itemgetter
from torch.utils.data import (
RandomSampler,
WeightedRandomSampler,
DistributedSampler,
Sampler,
)
import numpy as np
from typing import List
from speechbrain.dataio.dataset import DynamicItemDataset
from collections import Counter
from scipy.stats import lognorm
logger = logging.getLogger(__name__)
class ReproducibleRandomSampler(RandomSampler):
"""A modification of RandomSampler which always returns the same values.
Also look at `torch.utils.data.RandomSampler`. This has mostly
the same behaviour and arguments, except for adding 'seed' and 'epoch' and
not supporting 'generator'.
Note
----
Call `set_epoch` before every epoch. Otherwise, the sampler will produce the
same sequence of indices every epoch.
Arguments
---------
data_source : Dataset
The data source to sample indices for.
seed : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
epoch : int
The epoch to start at.
Example
-------
>>> import torch
>>> from speechbrain.utils.checkpoints import Checkpointer
>>> from speechbrain.dataio.dataloader import SaveableDataLoader
>>> # An example "dataset"
>>> dataset = torch.arange(10).unsqueeze(1)
>>> # Create the random sampler:
>>> sampler = ReproducibleRandomSampler(dataset)
>>> dataloader = SaveableDataLoader(dataset, sampler = sampler,
... num_workers = 3)
>>> # Setup the checkpointer.
>>> # Note that the sampler doesn't need to be saved itself.
>>> tmpdir = getfixture('tmpdir')
>>> checkpointer = Checkpointer(tmpdir, {"dataloader": dataloader})
>>> # Iterate:
>>> subset = []
>>> for i, data_point in enumerate(dataloader):
... # Say you save a checkpoint on the fourth batch:
... if i == 3:
... _ = checkpointer.save_checkpoint(end_of_epoch = False)
... # So let's save the numbers you would get if you continue
... if i >= 4:
... subset.append(data_point.item())
>>> # What if instead you had to restart the experiment?
>>> new_sampler = ReproducibleRandomSampler(dataset)
>>> new_dataloader = SaveableDataLoader(dataset, sampler = new_sampler,
... num_workers = 3)
>>> new_checkpointer = Checkpointer(tmpdir, {"dataloader": new_dataloader})
>>> _ = new_checkpointer.recover_if_possible()
>>> # You'll get the same random order again:
>>> new_subset = [data_point.item() for data_point in new_dataloader]
>>> assert subset == new_subset
"""
def __init__(self, data_source, seed=563375142, epoch=0, **kwargs):
if "generator" in kwargs:
MSG = (
"Cannot give a separate generator when using "
+ "ReproducibleRandomSampler"
)
raise ValueError(MSG)
super().__init__(data_source, **kwargs)
self.seed = int(seed)
self.epoch = epoch
self.generator = torch.Generator()
def set_epoch(self, epoch):
"""
You can also just access self.epoch, but we maintain this interface
to mirror torch.utils.data.distributed.DistributedSampler
"""
self.epoch = epoch
def __iter__(self):
self.generator.manual_seed(self.seed + self.epoch)
return super().__iter__()
class ReproducibleWeightedRandomSampler(WeightedRandomSampler):
"""A reproducible modification of WeightedRandomSampler.
Also look at `torch.utils.data.WeightedRandomSampler`. This has the
the same behaviour and arguments, except for adding 'seed' and 'epoch' and
not supporting 'generator'.
Note
----
Call `set_epoch` before every epoch. Otherwise, the sampler will produce the
same sequence of indices every epoch.
Arguments
---------
weights : sequence of float
Weights for each index. Doesn't need to sum to one.
num_samples : int
Number of samples to draw
replacement : bool
To draw with replacement or not (within an epoch of num_samples).
seed : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
epoch : int
The epoch to start at.
Example
-------
>>> a = ReproducibleWeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)
>>> b = ReproducibleWeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)
>>> list(a)
[3, 1, 4, 4, 4]
>>> list(b)
[3, 1, 4, 4, 4]
>>> a.set_epoch(1)
>>> list(a)
[4, 5, 4, 4, 3]
>>> b.set_epoch(1)
>>> list(b)
[4, 5, 4, 4, 3]
"""
def __init__(
self,
weights,
num_samples,
replacement,
seed=129491412,
epoch=0,
**kwargs,
):
if "generator" in kwargs:
MSG = (
"Cannot give a separate generator when using "
+ "ReproducibleRandomSampler"
)
raise ValueError(MSG)
super().__init__(weights, num_samples, replacement, **kwargs)
self.seed = int(seed)
self.epoch = epoch
self.generator = torch.Generator()
def set_epoch(self, epoch):
"""
You can also just access self.epoch, but we maintain this interface
to mirror torch.utils.data.distributed.DistributedSampler
"""
self.epoch = epoch
def __iter__(self):
self.generator.manual_seed(self.seed + self.epoch)
return super().__iter__()
class ConcatDatasetBatchSampler(Sampler):
"""This sampler is built to work with a standard Pytorch ConcatDataset.
It is used to retrieve elements from the different concatenated datasets placing them in the same batch
with proportion specified by batch_sizes, e.g 8, 16 means each batch will
be of 24 elements with the first 8 belonging to the first dataset in ConcatDataset
object and the last 16 to the second.
More than two datasets are supported, in that case you need to provide 3 batch
sizes.
Note
----
Batched are drawn from the datasets till the one with smallest length is exhausted.
Thus number of examples in your training epoch is dictated by the dataset
whose length is the smallest.
Arguments
---------
samplers : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
batch_sizes: list
Batch sizes.
epoch : int
The epoch to start at.
Example
-------
>>> import torch
>>> from speechbrain.dataio.sampler import ConcatDatasetBatchSampler, ReproducibleRandomSampler
>>> from speechbrain.dataio.sampler import ReproducibleRandomSampler
>>> from speechbrain.dataio.dataloader import SaveableDataLoader
>>> # example "datasets"
>>> dataset1 = torch.arange(0, 10).unsqueeze(1)
>>> dataset2 = torch.arange(20, 40).unsqueeze(1)
>>> tot_dataset = torch.utils.data.ConcatDataset([dataset1, dataset2])
>>> sampler1 = ReproducibleRandomSampler(dataset1)
>>> sampler2 = ReproducibleRandomSampler(dataset2)
>>> tot_sampler = ConcatDatasetBatchSampler([sampler1, sampler2], [2, 4])
>>> dataloader = SaveableDataLoader(tot_dataset, batch_sampler = tot_sampler,
... num_workers = 3)
>>> for data_point in dataloader:
... assert len(data_point) == 6
... for i in range(2):
... assert data_point[i] in [x for x in range(0, 10)]
... for i in range(2, 4):
... assert data_point[i] in [x for x in range(10, 40)]
"""
def __init__(self, samplers, batch_sizes: (tuple, list), epoch=0) -> None:
if not isinstance(samplers, (list, tuple)):
raise ValueError(
"samplers should be a list or tuple of Pytorch Samplers, "
"but got samplers={}".format(batch_sizes)
)
if not isinstance(batch_sizes, (list, tuple)):
raise ValueError(
"batch_sizes should be a list or tuple of integers, "
"but got batch_sizes={}".format(batch_sizes)
)
if not len(batch_sizes) == len(samplers):
raise ValueError(
"batch_sizes and samplers should be have same length"
)
self.batch_sizes = batch_sizes
self.samplers = samplers
self.offsets = [0] + np.cumsum(
[len(x) for x in self.samplers]
).tolist()[:-1]
self.epoch = epoch
self.set_epoch(self.epoch)
def _iter_one_dataset(self, c_batch_size, c_sampler, c_offset):
batch = []
for idx in c_sampler:
batch.append(c_offset + idx)
if len(batch) == c_batch_size:
yield batch
def set_epoch(self, epoch):
"""You can also just access self.epoch, but we maintain this interface
to mirror ``torch.utils.data.distributed.DistributedSampler``.
"""
if hasattr(self.samplers[0], "epoch"):
for s in self.samplers:
s.set_epoch(epoch)
def __iter__(self):
iterators = [iter(i) for i in self.samplers]
tot_batch = []
for b_num in range(len(self)):
for samp_idx in range(len(self.samplers)):
c_batch = []
while len(c_batch) < self.batch_sizes[samp_idx]:
c_batch.append(
self.offsets[samp_idx] + next(iterators[samp_idx])
)
tot_batch.extend(c_batch)
yield tot_batch
tot_batch = []
def __len__(self):
min_len = float("inf")
for idx, sampler in enumerate(self.samplers):
c_len = len(sampler) // self.batch_sizes[idx]
min_len = min(c_len, min_len)
return min_len
class DynamicBatchSampler(Sampler):
"""This BatchSampler batches examples together by grouping them by their length.
Every example in the batch have approximately the same length and
thus padding is minimized.
This enables faster training on datasets
where length of examples can vary significantly (e.g Librispeech).
Inspired by: https://www.tensorflow.org/api_docs/python/tf/data/experimental/bucket_by_sequence_length
Dynamic batching is performed by specifying a max_batch_length which is the
upper limit for the sum of the length of examples in a batch:
e.g., if ex1 has length 4, ex2 length 5 and if max_batch_length is set to 6
ex1 and ex2 will be placed, alone, in two distinct batches.
Length for each example can be obtained in two manners.
If the input dataset is a DynamicItemDataset it can be obtained by specifying a
length_func. Default assumes a "duration" entry is in the annotation.
Length for each example can also be passed to this class upon instantiation
by specifying a list containing the length for each example and passing it to
lengths_list.
Examples are grouped together by defining a set of possible discrete intervals
(buckets). Examples whose length fall into these intervals can be batched together.
The number of buckets can be specified by using the arg num_buckets.
There is usually an optimal range for the value of this argument.
If num_buckets == 1, all examples can be batched together. You have maximum randomization
but your training speed will be slower due to the fact that a large amount of the values will be padding
as long and short examples can be batched together.
As the number of buckets grows only examples with similar
length can be grouped together.
This trades-off speed with randomization.
TLDR: Low number -> better randomization, High number -> faster training.
NOTE THAT: if set too high the training speed will decrease. If num_buckets -> number of examples in the dataset the batch size
will be small impacting training speed and possibly performance.
The buckets can also be specified by passing a list to the bucket_boundaries
argument instead of specifying a left_bucket_length and a bucket_length_multiplier.
Example
-------
>>> import torch
>>> import speechbrain as sb
>>> from speechbrain.dataio.sampler import DynamicBatchSampler
>>> from speechbrain.dataio.dataset import DynamicItemDataset
>>> from speechbrain.dataio.dataloader import SaveableDataLoader
>>> from speechbrain.dataio.batch import PaddedBatch
>>> import numpy as np
>>> item_lengths = sorted([np.random.randint(10, 100) for x in range(20)])
>>> dataset = {"ex_{}".format(x) : {"wav" :torch.randn(x)} for x in item_lengths}
>>> dataset = DynamicItemDataset(dataset)
>>> dataset.set_output_keys(["wav"])
>>> length_func = lambda x : len(x) # trivial in this example
>>> bsampler = DynamicBatchSampler(dataset, 20, 4, length_func, shuffle=False, batch_ordering='descending')
>>> dataloader = SaveableDataLoader(dataset, batch_sampler=bsampler, collate_fn=PaddedBatch)
>>> for i, b in enumerate(dataloader):
... data, length = b["wav"]
>>> assert data.shape[-1] == max(item_lengths)
Arguments
---------
dataset : torch.utils.data.Dataset
Pytorch Dataset from which elements will be sampled.
max_batch_length : int
Upper limit for the sum of the length of examples in a batch.
Should be chosen based on your GPU memory.
num_buckets : int
Number of discrete buckets used to group examples together.
If num_buckets == 1, all examples can be batched together. As the number of buckets grows only examples with similar
length can be grouped together. This trades-off speed with randomization.
Low number -> better randomization, High number -> faster training.
However if set too high the training speed will decrease. If num_buckets -> number of examples in the dataset the batch size
will be small impacting training speed and possibly performance.
NOTE: you have either to specify manually the bucket_boundaries or the number of buckets.
length_func : callable
Function used to get length of each example from the dataset.
This argument can be used only when the dataset is a Speechbrain DynamicItemDataset object.
Can be anything: e.g. lambda x: x["duration"]*16000 returns number of samples
if duration key in the annotation is in seconds and the file has 16kHz sampling freq.
shuffle : bool
Whether or not shuffle examples between each epoch.
batch_ordering : string
If ``random``, batches are randomly permuted; otherwise ``ascending`` or ``descending`` sorted by length.
max_batch_ex: int
If set, it limits the maximum number of examples that can be in a batch superseeding max_batch_length
in instances where the amount of examples will exceeed the value specified here.
E.g. you have a lot of short examples and the batch size for those will be too high, you can use this argument
to limit the batch size for these short examples.
bucket_boundaries : list
Overrides bucket_length_multiplier and left_bucket_length by specifying manually
the buckets right boundaries.
lengths_list: list
Overrides length_func by passing a list containing the length of each example
in the dataset. This argument must be set when the dataset is a plain
Pytorch Dataset object and not a DynamicItemDataset object as length_func
cannot be used on Pytorch Datasets.
epoch : int
The epoch to start at.
drop_last : bool
If ``True``, the sampler will drop the last examples which
have not been grouped.
verbose: bool
If ``True``, log also the stats for each batch at the first epoch.
"""
def __init__(
self,
dataset,
max_batch_length: int,
num_buckets: int = None,
length_func=lambda x: x["duration"],
shuffle: bool = True,
batch_ordering: str = "random",
max_batch_ex: int = None,
bucket_boundaries: List[int] = [],
lengths_list: List[int] = None,
seed: int = 42,
epoch: int = 0,
drop_last: bool = False,
verbose: bool = False,
):
self._dataset = dataset
self._ex_lengths = {}
ex_ids = self._dataset.data_ids
self.verbose = verbose
# We do not put a default on num_buckets to encourage users to play with this parameter
if num_buckets is None and len(bucket_boundaries) == 0:
raise RuntimeError(
"Please specify either num_buckets or bucket boundaries."
"Check the docs, and/or the tutorial !"
)
if lengths_list is not None:
# take length of examples from this argument and bypass length_key
for indx in range(len(lengths_list)):
self._ex_lengths[str(indx)] = lengths_list[indx]
else:
# use length func
if not isinstance(dataset, DynamicItemDataset):
raise NotImplementedError(
"Dataset should be a Speechbrain DynamicItemDataset when using length function"
)
for indx in range(len(self._dataset)):
self._ex_lengths[str(indx)] = length_func(
self._dataset.data[ex_ids[indx]]
)
if len(bucket_boundaries) > 0:
if not all([x >= 0 for x in bucket_boundaries]):
raise ValueError(
"All elements in bucket boundaries should be non-negative (>= 0)."
)
if not len(set(bucket_boundaries)) == len(bucket_boundaries):
raise ValueError(
"Bucket_boundaries should not contain duplicates."
)
np.testing.assert_array_equal(
np.array(bucket_boundaries),
np.array(sorted(bucket_boundaries)),
err_msg="The arg bucket_boundaries should be an ascending sorted list of non negative values values!",
)
self._bucket_boundaries = np.array(sorted(bucket_boundaries))
else:
# use num_buckets
self._bucket_boundaries = np.array(
self._get_boundaries_through_warping(
max_batch_length=max_batch_length,
num_quantiles=num_buckets,
)
)
self._max_batch_length = max_batch_length
self._shuffle_ex = shuffle
self._batch_ordering = batch_ordering
self._seed = seed
self._drop_last = drop_last
if max_batch_ex is None:
max_batch_ex = np.inf
self._max_batch_ex = max_batch_ex
# Calculate bucket lengths - how often does one bucket boundary fit into max_batch_length?
self._bucket_lens = [
max(1, int(max_batch_length / self._bucket_boundaries[i]))
for i in range(len(self._bucket_boundaries))
] + [1]
self._epoch = epoch
self._generate_batches()
def get_durations(self, batch):
"""Gets durations of the elements in the batch."""
return [self._ex_lengths[str(idx)] for idx in batch]
def _get_boundaries_through_warping(
self, max_batch_length: int, num_quantiles: int,
) -> List[int]:
# NOTE: the following lines do not cover that there is only one example in the dataset
# warp frames (duration) distribution of train data
logger.info("Batch quantisation in latent space")
# linspace set-up
num_boundaries = num_quantiles + 1
# create latent linearly equal spaced buckets
latent_boundaries = np.linspace(
1 / num_boundaries, num_quantiles / num_boundaries, num_quantiles,
)
# get quantiles using lognormal distribution
quantiles = lognorm.ppf(latent_boundaries, 1)
# scale up to to max_batch_length
bucket_boundaries = quantiles * max_batch_length / quantiles[-1]
# compute resulting bucket length multipliers
length_multipliers = [
bucket_boundaries[x + 1] / bucket_boundaries[x]
for x in range(num_quantiles - 1)
]
# logging
logger.info(
"Latent bucket boundary - buckets: {} - length multipliers: {}".format(
list(map("{:.2f}".format, bucket_boundaries)),
list(map("{:.2f}".format, length_multipliers)),
)
)
return list(sorted(bucket_boundaries))
def _permute_batches(self):
if self._batch_ordering == "random":
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self._seed + self._epoch)
sampler = torch.randperm(
len(self._batches), generator=g
).tolist() # type: ignore
tmp = []
for idx in sampler:
tmp.append(self._batches[idx])
self._batches = tmp
elif self._batch_ordering == "ascending":
self._batches = sorted(
self._batches,
key=lambda x: max([self._ex_lengths[str(idx)] for idx in x]),
)
elif self._batch_ordering == "descending":
self._batches = sorted(
self._batches,
key=lambda x: max([self._ex_lengths[str(idx)] for idx in x]),
reverse=True,
)
else:
raise NotImplementedError
def _generate_batches(self):
logger.info("DynamicBatchSampler: Generating dynamic batches")
if self._shuffle_ex:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self._seed + self._epoch)
sampler = torch.randperm(len(self._dataset), generator=g).tolist() # type: ignore
else:
# take examples as they are: e.g. they have been sorted
sampler = range(len(self._dataset)) # type: ignore
self._batches = []
bucket_batches = [[] for i in self._bucket_lens]
stats_tracker = [
{"min": np.inf, "max": -np.inf, "tot": 0, "n_ex": 0}
for i in self._bucket_lens
]
for idx in sampler:
# length of pre-sampled audio
item_len = self._ex_lengths[str(idx)]
# bucket to fill up most padding
bucket_id = np.searchsorted(self._bucket_boundaries, item_len)
# fill audio's duration into that bucket
bucket_batches[bucket_id].append(idx)
stats_tracker[bucket_id]["min"] = min(
stats_tracker[bucket_id]["min"], item_len
)
stats_tracker[bucket_id]["max"] = max(
stats_tracker[bucket_id]["max"], item_len
)
stats_tracker[bucket_id]["tot"] += item_len
stats_tracker[bucket_id]["n_ex"] += 1
# track #samples - why not duration/#frames; rounded up?
# keep track of durations, if necessary
if (
len(bucket_batches[bucket_id]) >= self._bucket_lens[bucket_id]
or len(bucket_batches[bucket_id]) >= self._max_batch_ex
):
self._batches.append(bucket_batches[bucket_id])
bucket_batches[bucket_id] = []
# keep track of durations
# Dump remaining batches
if not self._drop_last:
for batch in bucket_batches:
if batch:
self._batches.append(batch)
self._permute_batches() # possibly reorder batches
if self._epoch == 0: # only log at first epoch
# frames per batch & their padding remaining
boundaries = [0] + self._bucket_boundaries.tolist()
for bucket_indx in range(len(self._bucket_boundaries)):
try:
num_batches = stats_tracker[bucket_indx]["tot"] // (
self._max_batch_length
)
pad_factor = (
stats_tracker[bucket_indx]["max"]
- stats_tracker[bucket_indx]["min"]
) / (
stats_tracker[bucket_indx]["tot"]
/ stats_tracker[bucket_indx]["n_ex"]
)
except ZeroDivisionError:
num_batches = 0
pad_factor = 0
logger.info(
(
"DynamicBatchSampler: Bucket {} with boundary {:.1f}-{:.1f} and "
+ "batch_size {}: Num Examples {:.1f}, Num Full Batches {:.3f}, Pad Factor {:.3f}."
).format(
bucket_indx,
boundaries[bucket_indx],
boundaries[bucket_indx + 1],
self._bucket_lens[bucket_indx],
stats_tracker[bucket_indx]["n_ex"],
num_batches,
pad_factor * 100,
)
)
if self.verbose:
batch_stats = {
"tot_frames": [],
"tot_pad_frames": [],
"pad_%": [],
}
for batch in self._batches:
tot_frames = sum(
[self._ex_lengths[str(idx)] for idx in batch]
)
batch_stats["tot_frames"].append(tot_frames)
max_frames = max(
[self._ex_lengths[str(idx)] for idx in batch]
)
tot_pad = sum(
[
max_frames - self._ex_lengths[str(idx)]
for idx in batch
]
)
batch_stats["tot_pad_frames"].append(tot_pad)
batch_stats["pad_%"].append(tot_pad / tot_frames * 100)
padding_details = "Batch {} with {:.1f} frames with {} files - {:.1f} padding, {:.2f} (%) of total."
padding_details = "DynamicBatchSampler: " + padding_details
for i in range(len(self._batches)):
logger.info(
padding_details.format(
i,
batch_stats["tot_frames"][i],
len(self._batches[i]),
batch_stats["tot_pad_frames"][i],
batch_stats["pad_%"][i],
)
)
def __iter__(self):
for batch in self._batches:
yield batch
if self._shuffle_ex: # re-generate examples if ex_ordering == "random"
self._generate_batches()
if self._batch_ordering == "random":
# we randomly permute the batches only --> faster
self._permute_batches()
def set_epoch(self, epoch):
"""
You can also just access self.epoch, but we maintain this interface
to mirror torch.utils.data.distributed.DistributedSampler
"""
self._epoch = epoch
self._generate_batches()
def __len__(self):
return len(self._batches)
# Heavily inspired by Catalyst, which is under Apache 2.0 licence.
# https://github.com/catalyst-team/catalyst/blob/51428d7756e62b9b8ee5379f38e9fd576eeb36e5/catalyst/data/sampler.py#L522
class DistributedSamplerWrapper(DistributedSampler):
"""This wrapper allows using any sampler (for example batch) with Distributed Data Parallel (DDP)
correctly.
Passing blindly the sampler to each DDP process will cause to have access
within each process to all the data in the dataset instead of only a subset
of it which is unique to each process. This wrapper prevents this and
allows to use only a subset of the original data for each process.
NOTE
----
This is is automatically applied to any sampler in the Brain class when DDP
training is used.
"""
def __init__(self, sampler, *args, **kwargs):
# DistributedSampler only calls len() on dataset
# so a sampler is fine to pass there, as well.
super().__init__(dataset=sampler, *args, **kwargs)
self.sampler = sampler
def __iter__(self):
# It is easiest to use a random access interface to the wrapped
# sampler's indices, so we just fetch all indices from the wrapped
# sampler
sampler_indices = list(self.sampler.__iter__())
indices_of_indices = super().__iter__()
# Itemgetter fetches the wrapped sampler indices from the positions
# pointed to by DistributedSampler
return iter(itemgetter(*indices_of_indices)(sampler_indices))
def set_epoch(self, epoch):
"""Pass set_epoch() through to DistributedSampler and the wrapper one"""
super().set_epoch(epoch)
if hasattr(self.sampler, "set_epoch"):
self.sampler.set_epoch(epoch)
class BalancingDataSampler(ReproducibleWeightedRandomSampler):
"""A data sampler that takes a single key from the dataset and
ensures an approximately equal distribution by that key
Arguments
---------
dataset: DynamicItemDataset
the dataset form which samples will be drawn
key: str
the key from which samples will be taken
num_samples : int
Number of samples to draw
replacement : bool
To draw with replacement or not (within an epoch of num_samples).
seed : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
epoch : int
The epoch to start at.
Example
-------
>>> from speechbrain.dataio.sampler import BalancingDataSampler
>>> from speechbrain.dataio.dataset import DynamicItemDataset
>>> sample_data = {
... 1: {"category": "A",
... "text": "This is a test"},
... 2: {"category": "A",
... "text": "This is a second test"},
... 3: {"category": "B",
... "text": "This is a third test"}
... }
>>> dataset = DynamicItemDataset(data=sample_data)
>>> sampler = BalancingDataSampler(
... dataset=dataset,
... key="category",
... num_samples=10
... )
>>> sampler.weights
tensor([0.5000, 0.5000, 1.0000], dtype=torch.float64)
>>> it = iter(sampler)
>>> [next(it) for _ in range(10)]
[2, 2, 1, 2, 2, 0, 1, 1, 1, 2]
"""
def __init__(
self,
dataset,
key,
num_samples=None,
replacement=True,
seed=563375142,
epoch=0,
**kwargs,
):
self.dataset = dataset
self.key = key
if not num_samples:
num_samples = len(dataset)
weights = self._compute_weights()
super().__init__(
weights, num_samples, replacement, seed, epoch, **kwargs
)
def _compute_weights(self):
with self.dataset.output_keys_as([self.key]):
class_ids = [item[self.key] for item in self.dataset]
class_counter = Counter(class_ids)
weights = 1 / torch.tensor(
[class_counter[class_id] for class_id in class_ids]
)
return weights
| 32,036 | 38.212974 | 132 | py |
speechbrain | speechbrain-main/speechbrain/dataio/batch.py | """Batch collation
Authors
* Aku Rouhe 2020
"""
import collections
import torch
from speechbrain.utils.data_utils import mod_default_collate
from speechbrain.utils.data_utils import recursive_to
from speechbrain.utils.data_utils import batch_pad_right
from torch.utils.data._utils.collate import default_convert
from torch.utils.data._utils.pin_memory import (
pin_memory as recursive_pin_memory,
)
PaddedData = collections.namedtuple("PaddedData", ["data", "lengths"])
class PaddedBatch:
"""Collate_fn when examples are dicts and have variable-length sequences.
Different elements in the examples get matched by key.
All numpy tensors get converted to Torch (PyTorch default_convert)
Then, by default, all torch.Tensor valued elements get padded and support
collective pin_memory() and to() calls.
Regular Python data types are just collected in a list.
Arguments
---------
examples : list
List of example dicts, as produced by Dataloader.
padded_keys : list, None
(Optional) List of keys to pad on. If None, pad all torch.Tensors
device_prep_keys : list, None
(Optional) Only these keys participate in collective memory pinning and moving with
to().
If None, defaults to all items with torch.Tensor values.
padding_func : callable, optional
Called with a list of tensors to be padded together. Needs to return
two tensors: the padded data, and another tensor for the data lengths.
padding_kwargs : dict
(Optional) Extra kwargs to pass to padding_func. E.G. mode, value
apply_default_convert : bool
Whether to apply PyTorch default_convert (numpy to torch recursively,
etc.) on all data. Default:True, usually does the right thing.
nonpadded_stack : bool
Whether to apply PyTorch-default_collate-like stacking on values that
didn't get padded. This stacks if it can, but doesn't error out if it
cannot. Default:True, usually does the right thing.
Example
-------
>>> batch = PaddedBatch([
... {"id": "ex1", "foo": torch.Tensor([1.])},
... {"id": "ex2", "foo": torch.Tensor([2., 1.])}])
>>> # Attribute or key-based access:
>>> batch.id
['ex1', 'ex2']
>>> batch["id"]
['ex1', 'ex2']
>>> # torch.Tensors get padded
>>> type(batch.foo)
<class 'speechbrain.dataio.batch.PaddedData'>
>>> batch.foo.data
tensor([[1., 0.],
[2., 1.]])
>>> batch.foo.lengths
tensor([0.5000, 1.0000])
>>> # Batch supports collective operations:
>>> _ = batch.to(dtype=torch.half)
>>> batch.foo.data
tensor([[1., 0.],
[2., 1.]], dtype=torch.float16)
>>> batch.foo.lengths
tensor([0.5000, 1.0000], dtype=torch.float16)
>>> # Numpy tensors get converted to torch and padded as well:
>>> import numpy as np
>>> batch = PaddedBatch([
... {"wav": np.asarray([1,2,3,4])},
... {"wav": np.asarray([1,2,3])}])
>>> batch.wav # +ELLIPSIS
PaddedData(data=tensor([[1, 2,...
>>> # Basic stacking collation deals with non padded data:
>>> batch = PaddedBatch([
... {"spk_id": torch.tensor([1]), "wav": torch.tensor([.1,.0,.3])},
... {"spk_id": torch.tensor([2]), "wav": torch.tensor([.2,.3,-.1])}],
... padded_keys=["wav"])
>>> batch.spk_id
tensor([[1],
[2]])
>>> # And some data is left alone:
>>> batch = PaddedBatch([
... {"text": ["Hello"]},
... {"text": ["How", "are", "you?"]}])
>>> batch.text
[['Hello'], ['How', 'are', 'you?']]
"""
def __init__(
self,
examples,
padded_keys=None,
device_prep_keys=None,
padding_func=batch_pad_right,
padding_kwargs={},
apply_default_convert=True,
nonpadded_stack=True,
):
self.__length = len(examples)
self.__keys = list(examples[0].keys())
self.__padded_keys = []
self.__device_prep_keys = []
for key in self.__keys:
values = [example[key] for example in examples]
# Default convert usually does the right thing (numpy2torch etc.)
if apply_default_convert:
values = default_convert(values)
if (padded_keys is not None and key in padded_keys) or (
padded_keys is None and isinstance(values[0], torch.Tensor)
):
# Padding and PaddedData
self.__padded_keys.append(key)
padded = PaddedData(*padding_func(values, **padding_kwargs))
setattr(self, key, padded)
else:
# Default PyTorch collate usually does the right thing
# (convert lists of equal sized tensors to batch tensors, etc.)
if nonpadded_stack:
values = mod_default_collate(values)
setattr(self, key, values)
if (device_prep_keys is not None and key in device_prep_keys) or (
device_prep_keys is None and isinstance(values[0], torch.Tensor)
):
self.__device_prep_keys.append(key)
def __len__(self):
return self.__length
def __getitem__(self, key):
if key in self.__keys:
return getattr(self, key)
else:
raise KeyError(f"Batch doesn't have key: {key}")
def __iter__(self):
"""Iterates over the different elements of the batch.
Example
-------
>>> batch = PaddedBatch([
... {"id": "ex1", "val": torch.Tensor([1.])},
... {"id": "ex2", "val": torch.Tensor([2., 1.])}])
>>> ids, vals = batch
>>> ids
['ex1', 'ex2']
"""
return iter((getattr(self, key) for key in self.__keys))
def pin_memory(self):
"""In-place, moves relevant elements to pinned memory."""
for key in self.__device_prep_keys:
value = getattr(self, key)
pinned = recursive_pin_memory(value)
setattr(self, key, pinned)
return self
def to(self, *args, **kwargs):
"""In-place move/cast relevant elements.
Passes all arguments to torch.Tensor.to, see its documentation.
"""
for key in self.__device_prep_keys:
value = getattr(self, key)
moved = recursive_to(value, *args, **kwargs)
setattr(self, key, moved)
return self
def at_position(self, pos):
"""Gets the position."""
key = self.__keys[pos]
return getattr(self, key)
@property
def batchsize(self):
"""Returns the bach size"""
return self.__length
class BatchsizeGuesser:
"""Try to figure out the batchsize, but never error out
If this cannot figure out anything else, will fallback to guessing 1
Example
-------
>>> guesser = BatchsizeGuesser()
>>> # Works with simple tensors:
>>> guesser(torch.randn((2,3)))
2
>>> # Works with sequences of tensors:
>>> guesser((torch.randn((2,3)), torch.randint(high=5, size=(2,))))
2
>>> # Works with PaddedBatch:
>>> guesser(PaddedBatch([{"wav": [1.,2.,3.]}, {"wav": [4.,5.,6.]}]))
2
>>> guesser("Even weird non-batches have a fallback")
1
"""
def __init__(self):
self.method = None
def __call__(self, batch):
try:
return self.method(batch)
except: # noqa: E722
return self.find_suitable_method(batch)
def find_suitable_method(self, batch):
"""Try the different methods and note which worked"""
try:
bs = self.attr_based(batch)
self.method = self.attr_based
return bs
except: # noqa: E722
pass
try:
bs = self.torch_tensor_bs(batch)
self.method = self.torch_tensor_bs
return bs
except: # noqa: E722
pass
try:
bs = self.len_of_first(batch)
self.method = self.len_of_first
return bs
except: # noqa: E722
pass
try:
bs = self.len_of_iter_first(batch)
self.method = self.len_of_iter_first
return bs
except: # noqa: E722
pass
# Last ditch fallback:
bs = self.fallback(batch)
self.method = self.fallback(batch)
return bs
def attr_based(self, batch):
"""Implementation of attr_based."""
return batch.batchsize
def torch_tensor_bs(self, batch):
"""Implementation of torch_tensor_bs."""
return batch.shape[0]
def len_of_first(self, batch):
"""Implementation of len_of_first."""
return len(batch[0])
def len_of_iter_first(self, batch):
"""Implementation of len_of_iter_first."""
return len(next(iter(batch)))
def fallback(self, batch):
"""Implementation of fallback."""
return 1
| 9,022 | 32.172794 | 91 | py |
speechbrain | speechbrain-main/speechbrain/dataio/dataloader.py | """PyTorch compatible DataLoaders
Essentially we extend PyTorch DataLoader by adding the ability to save the
data loading state, so that a checkpoint may be saved in the middle of an
epoch.
Example
-------
>>> import torch
>>> from speechbrain.utils.checkpoints import Checkpointer
>>> # An example "dataset" and its loader
>>> dataset = torch.randn(10, 1)
>>> dataloader = SaveableDataLoader(dataset, num_workers = 3)
>>> # Setup the checkpointer:
>>> tmpdir = getfixture('tmpdir')
>>> checkpointer = Checkpointer(tmpdir, {"dataloader": dataloader})
>>> # Iterate:
>>> for i, data_point in enumerate(dataloader):
... # Here you would process the data:
... rainfall_amount_prediction = data_point * 4.
... # Now, imagine the experiment gets killed on the fifth batch:
... if i == 4:
... break
... # Luckily, you had just saved a checkpoint:
... if i == 3:
... _ = checkpointer.save_checkpoint(end_of_epoch = False)
>>> # So when you restart the experiment:
>>> new_dataloader = SaveableDataLoader(dataset, num_workers = 3)
>>> new_checkpointer = Checkpointer(tmpdir, {"dataloader": new_dataloader})
>>> _ = new_checkpointer.recover_if_possible()
>>> # The dataloader fast-forwards to the position where we left off:
>>> assert next(iter(new_dataloader)) == dataset[4]
Authors:
* Aku Rouhe 2020
"""
from torch.utils.data import DataLoader
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import _BaseDataLoaderIter
import logging
import warnings
import functools
from speechbrain.dataio.batch import PaddedBatch, BatchsizeGuesser
from speechbrain.dataio.dataset import DynamicItemDataset
from speechbrain.dataio.sampler import ReproducibleRandomSampler
from speechbrain.utils.checkpoints import (
register_checkpoint_hooks,
mark_as_saver,
mark_as_loader,
)
# Optional support for webdataset
try:
import webdataset as wds
from importlib_metadata import version
WDS_AVAILABLE = True
# Use appropriate class based on webdataset version
if version("webdataset")[0:4] == "0.1.":
WDS_CLASS = wds.dataset.Composable
else:
WDS_CLASS = wds.DataPipeline
except ImportError:
WDS_AVAILABLE = False
logger = logging.getLogger(__name__)
def make_dataloader(dataset, looped_nominal_epoch=None, **loader_kwargs):
"""Makes a basic DataLoader with SpeechBrain defaults.
For DynamicItemDatasets (which return dicts), use
PaddedBatch as the default collate_fn.
Shuffling gets implemented by ReproducibleRandomSampler.
If the Dataset is not an IterableDataset, the DataLoader
is a SaveableDataLoader.
If the Dataset is a webdataset.dataset.Composable, set default
batch_size = None.
Can also loop over the underlying dataloader continuously,
and stop iterations at nominal epoch lengths.
Arguments
---------
dataset : Dataset
The dataset to make a DataLoader for.
looped_nominal_epoch : None, int
If an integer is given, loop the underlying DataLoader infinitely and
set a nominal epoch length in batches (or whatever the DataLoader
yields).
**loader_kwargs : dict
Keyword args to DataLoader, see PyTorch DataLoader for
options.
Returns
-------
DataLoader
If looped_nominal_epoch is None
LoopedLoader
If looped_nominal_epoch is not None
"""
# PaddedBatch as default collation for DynamicItemDataset
if "collate_fn" not in loader_kwargs and isinstance(
dataset, DynamicItemDataset
):
loader_kwargs["collate_fn"] = PaddedBatch
# Reproducible random sampling
if loader_kwargs.get("shuffle", False):
if loader_kwargs.get("sampler") is not None:
raise ValueError(
"Cannot specify both shuffle=True and a "
"sampler in loader_kwargs"
)
sampler = ReproducibleRandomSampler(dataset)
loader_kwargs["sampler"] = sampler
# Should delete shuffle because you can't set both Sampler and
# shuffle
# NOTE: the dict of loader options may get used elsewhere!
# However, this del doesn't touch those because loader_kwargs comes
# from a **kwargs dict.
del loader_kwargs["shuffle"]
# With WDS it is recommended to do batching in the dataset itself,
# which requires batch_size = None in the DataLoader
if (
WDS_AVAILABLE
and isinstance(dataset, WDS_CLASS)
and "batch_size" not in loader_kwargs
):
loader_kwargs["batch_size"] = None
# Create the loader
if isinstance(dataset, IterableDataset):
dataloader = DataLoader(dataset, **loader_kwargs)
else:
dataloader = SaveableDataLoader(dataset, **loader_kwargs)
if looped_nominal_epoch is not None:
dataloader = LoopedLoader(dataloader, looped_nominal_epoch)
return dataloader
# We essentially want to make the DataLoader iterators able to skip ahead
# after checkpoint recovery
# This should be handled by the DataLoader iterators' base class.
# To make the implementation here a little more maintainable
# we decide to patch some PyTorch functionality
def __new_init(self, loader, *args, **kwargs):
self.__old_init__(loader, *args, **kwargs)
if (
hasattr(loader, "_speechbrain_recovery_skip_to")
and loader._speechbrain_recovery_skip_to is not None
):
# Fast forward the sampler iterator since we have recovered:
for i in range(loader._speechbrain_recovery_skip_to):
try:
next(self._sampler_iter)
except StopIteration:
MSG = "Tried to fast-forward Sampler after checkpoint "
f"recovery by {loader._speechbrain_recovery_skip_to} "
"indices, but now Sampler raised StopIteration after "
f"{i} indices. Ignoring this mismatch."
warnings.warn(MSG)
break
self._num_yielded = i + 1
# Mark recovery as done:
loader._speechbrain_recovery_skip_to = None
def __new_reset(self, loader, first_iter=False, *args, **kwargs):
# On the first iteration, these have already normally been set by the init anyway.
# And we don't want to overwrite them if we've recovered
if not first_iter:
self._sampler_iter = iter(self._index_sampler)
self._num_yielded = 0
self._IterableDataset_len_called = loader._IterableDataset_len_called
# functools.update_wrapper is meant for decorators, but it should basically
# preserve what we want:
functools.update_wrapper(__new_init, _BaseDataLoaderIter.__init__)
_BaseDataLoaderIter.__old_init__ = _BaseDataLoaderIter.__init__
_BaseDataLoaderIter.__init__ = __new_init
if hasattr(_BaseDataLoaderIter, "_reset"):
_BaseDataLoaderIter._reset = __new_reset
@register_checkpoint_hooks
class SaveableDataLoader(DataLoader):
"""A saveable version of the PyTorch DataLoader.
See `torch.utils.data.DataLoader` for usage. This class should work exactly
like the PyTorch basic DataLoader, but this can be checkpointed with
SpeechBrain's Checkpointer.
Note
----
1. The saveability is implemented via some unfortunately slightly magical
means.
2. The data loader cannot recover after entering __iter__. Normally this is
not a problem, as recovery should happen before training begins. However,
just before evaluation, it is also typical to recover the checkpoint at
which performance was the best. Thus, if a checkpoint is loaded after
entering __iter__, we just assume it is for this reason. A warning is
logged, but that is all.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(self.dataset, IterableDataset):
logging.warning(
"SaveableDataLoader cannot save the position in an "
"IterableDataset. Save the position on the dataset itself."
)
self._speechbrain_recovery_skip_to = None
self._speechbrain_iterator = None
def __iter__(self):
iterator = super().__iter__()
# Keep a reference to the iterator,
# to be able to access the iterator._num_yielded value.
# Keep a full reference (keeping the iterator alive)
# rather than e.g. a weakref, as we may want to save a checkpoint
# after the iterator has been exhausted, but before the full epoch has
# ended (e.g. validation is still running)
self._speechbrain_iterator = iterator
return iterator
@mark_as_saver
def _speechbrain_save(self, path):
if isinstance(self.dataset, IterableDataset):
logging.warning(
"Warning again: a checkpoint was requested on "
"SaveableDataLoader, but the dataset is an IterableDataset. "
"Cannot save the position in an IterableDataset. Not raising "
"an error; assuming that you know what you're doing."
)
if self._speechbrain_iterator is None:
to_save = None
else:
to_save = self._speechbrain_iterator._num_yielded
with open(path, "w") as fo:
fo.write(str(to_save))
@mark_as_loader
def _speechbrain_load(self, path, end_of_epoch, device=None):
del device # Unused here
if self._speechbrain_iterator is not None:
logging.debug(
"SaveableDataLoader was requested to load a "
"checkpoint, but the DataLoader has already been "
"iterated. The DataLoader file will be ignored. "
"This is normal in evaluation, when a checkpoint is "
"loaded just to retrieve the best model."
)
return
if end_of_epoch:
# Don't load at end of epoch, as we actually want to start a fresh
# epoch iteration next.
return
with open(path) as fi:
saved = fi.read()
if saved == str(None):
# Saved at a point where e.g. an iterator did not yet exist.
return
else:
self._speechbrain_recovery_skip_to = int(saved)
@register_checkpoint_hooks
class LoopedLoader:
"""Loops an underlying iterable indefinitely, with nominal epoch lengths
This is useful for working with IterableDatasets, and particularly
webdataset-style loading. We recommend using ``.repeat()`` on the
webdataset IterableDataset instance, so that the underlying dataloader
naturally continues for ever.
Arguments
---------
loader : iterable
A DataLoader or other iterable that is looped repeatedly.
epoch_length : int
The length of the nominal epoch. After this many steps, raises
StopIteration
"""
def __init__(self, loader, epoch_length, batchsize_fn=None):
self.loader = loader
self.iterator = None
self.epoch_length = epoch_length
self.step = 0 # Step in epoch
self.total_steps = 0 # Total steps ever
self.total_samples = 0 # Total samples seen on this process
if batchsize_fn is None:
self.batchsize_fn = BatchsizeGuesser()
def __iter__(self):
if self.iterator is None:
self.iterator = iter(self.loader)
return self
def __next__(self):
if self.step < self.epoch_length:
self.step += 1
self.total_steps += 1
try:
batch = next(self.iterator)
except StopIteration:
self.iterator = iter(self.loader)
batch = next(self.iterator)
self.total_samples += self.batchsize_fn(batch)
return batch
else:
self.step = 0
raise StopIteration
def __len__(self):
return self.epoch_length
@mark_as_saver
def save(self, path):
"""Saves the needed information."""
with open(path, "w") as fo:
print(self.step, file=fo)
print(self.total_steps, file=fo)
print(self.total_samples, file=fo)
@mark_as_loader
def load(self, path, end_of_epoch=True, device=None):
"""Loads the needed information."""
del device # Unused here
with open(path) as fi:
self.step = int(fi.readline().strip())
self.total_steps = int(fi.readline().strip())
self.total_samples = int(fi.readline().strip())
if not end_of_epoch and self.step == 0 and self.total_steps > 0:
# Step has been set to 0 at the end of iteration,
# so return it to epoch_length, so that first iteration
# of this will immediately raise StopIteration.
# Basically, this can happen when e.g. the main training
# loop has already finished but there is a checkpoint in the
# middle of validation.
self.step = self.epoch_length
| 13,097 | 36.637931 | 86 | py |
speechbrain | speechbrain-main/speechbrain/dataio/encoder.py | """Encoding categorical data as integers
Authors
* Samuele Cornell 2020
* Aku Rouhe 2020
"""
import ast
import torch
import collections
import itertools
import logging
import speechbrain as sb
from speechbrain.utils.checkpoints import (
mark_as_saver,
mark_as_loader,
register_checkpoint_hooks,
)
logger = logging.getLogger(__name__)
# NOTE: Changing these does NOT change the defaults in the classes.
# Consider these read-only.
DEFAULT_UNK = "<unk>"
DEFAULT_BOS = "<bos>"
DEFAULT_EOS = "<eos>"
DEFAULT_BLANK = "<blank>"
@register_checkpoint_hooks
class CategoricalEncoder:
"""Encode labels of a discrete set.
Used for encoding, e.g., speaker identities in speaker recognition.
Given a collection of hashables (e.g a strings) it encodes
every unique item to an integer value: ["spk0", "spk1"] --> [0, 1]
Internally the correspondence between each label to its index is handled by
two dictionaries: lab2ind and ind2lab.
The label integer encoding can be generated automatically from a SpeechBrain
DynamicItemDataset by specifying the desired entry (e.g., spkid) in the annotation
and calling update_from_didataset method:
>>> from speechbrain.dataio.encoder import CategoricalEncoder
>>> from speechbrain.dataio.dataset import DynamicItemDataset
>>> dataset = {"ex_{}".format(x) : {"spkid" : "spk{}".format(x)} for x in range(20)}
>>> dataset = DynamicItemDataset(dataset)
>>> encoder = CategoricalEncoder()
>>> encoder.update_from_didataset(dataset, "spkid")
>>> assert len(encoder) == len(dataset) # different speaker for each utterance
However can also be updated from an iterable:
>>> from speechbrain.dataio.encoder import CategoricalEncoder
>>> from speechbrain.dataio.dataset import DynamicItemDataset
>>> dataset = ["spk{}".format(x) for x in range(20)]
>>> encoder = CategoricalEncoder()
>>> encoder.update_from_iterable(dataset)
>>> assert len(encoder) == len(dataset)
Note
----
In both methods it can be specified it the single element in the iterable
or in the dataset should be treated as a sequence or not (default False).
If it is a sequence each element in the sequence will be encoded.
>>> from speechbrain.dataio.encoder import CategoricalEncoder
>>> from speechbrain.dataio.dataset import DynamicItemDataset
>>> dataset = [[x+1, x+2] for x in range(20)]
>>> encoder = CategoricalEncoder()
>>> encoder.update_from_iterable(dataset, sequence_input=True)
>>> assert len(encoder) == 21 # there are only 21 unique elements 1-21
This class offers 4 different methods to explicitly add a label in the internal
dicts: add_label, ensure_label, insert_label, enforce_label.
add_label and insert_label will raise an error if it is already present in the
internal dicts. insert_label, enforce_label allow also to specify the integer value
to which the desired label is encoded.
Encoding can be performed using 4 different methods:
encode_label, encode_sequence, encode_label_torch and encode_sequence_torch.
encode_label operate on single labels and simply returns the corresponding
integer encoding:
>>> from speechbrain.dataio.encoder import CategoricalEncoder
>>> from speechbrain.dataio.dataset import DynamicItemDataset
>>> dataset = ["spk{}".format(x) for x in range(20)]
>>> encoder.update_from_iterable(dataset)
>>>
22
>>>
encode_sequence on sequences of labels:
>>> encoder.encode_sequence(["spk1", "spk19"])
[22, 40]
>>>
encode_label_torch and encode_sequence_torch return torch tensors
>>> encoder.encode_sequence_torch(["spk1", "spk19"])
tensor([22, 40])
>>>
Decoding can be performed using decode_torch and decode_ndim methods.
>>> encoded = encoder.encode_sequence_torch(["spk1", "spk19"])
>>> encoder.decode_torch(encoded)
['spk1', 'spk19']
>>>
decode_ndim is used for multidimensional list or pytorch tensors
>>> encoded = encoded.unsqueeze(0).repeat(3, 1)
>>> encoder.decode_torch(encoded)
[['spk1', 'spk19'], ['spk1', 'spk19'], ['spk1', 'spk19']]
>>>
In some applications, it can happen that during testing a label which has not
been encountered during training is encountered. To handle this out-of-vocabulary
problem add_unk can be used. Every out-of-vocab label is mapped to this special
<unk> label and its corresponding integer encoding.
>>> import torch
>>> try:
... encoder.encode_label("spk42")
... except KeyError:
... print("spk42 is not in the encoder this raises an error!")
spk42 is not in the encoder this raises an error!
>>> encoder.add_unk()
41
>>> encoder.encode_label("spk42")
41
>>>
returns the <unk> encoding
This class offers also methods to save and load the internal mappings between
labels and tokens using: save and load methods as well as load_or_create.
"""
VALUE_SEPARATOR = " => "
EXTRAS_SEPARATOR = "================\n"
def __init__(self, starting_index=0, **special_labels):
self.lab2ind = {}
self.ind2lab = {}
self.starting_index = starting_index
# NOTE: unk_label is not necessarily set at all!
# This is because None is a suitable value for unk.
# So the test is: hasattr(self, "unk_label")
# rather than self.unk_label is not None
self.handle_special_labels(special_labels)
def handle_special_labels(self, special_labels):
"""Handles special labels such as unk_label."""
if "unk_label" in special_labels:
self.add_unk(special_labels["unk_label"])
def __len__(self):
return len(self.lab2ind)
@classmethod
def from_saved(cls, path):
"""Recreate a previously saved encoder directly"""
obj = cls()
obj.load(path)
return obj
def update_from_iterable(self, iterable, sequence_input=False):
"""Update from iterator
Arguments
---------
iterable : iterable
Input sequence on which to operate.
sequence_input : bool
Whether iterable yields sequences of labels or individual labels
directly. (default False)
"""
if sequence_input:
label_iterator = itertools.chain.from_iterable(iterable)
else:
label_iterator = iter(iterable)
for label in label_iterator:
self.ensure_label(label)
def update_from_didataset(
self, didataset, output_key, sequence_input=False
):
"""Update from DynamicItemDataset.
Arguments
---------
didataset : DynamicItemDataset
Dataset on which to operate.
output_key : str
Key in the dataset (in data or a dynamic item) to encode.
sequence_input : bool
Whether the data yielded with the specified key consists of
sequences of labels or individual labels directly.
"""
with didataset.output_keys_as([output_key]):
self.update_from_iterable(
(data_point[output_key] for data_point in didataset),
sequence_input=sequence_input,
)
def limited_labelset_from_iterable(
self, iterable, sequence_input=False, n_most_common=None, min_count=1
):
"""Produce label mapping from iterable based on label counts
Used to limit label set size.
Arguments
---------
iterable : iterable
Input sequence on which to operate.
sequence_input : bool
Whether iterable yields sequences of labels or individual labels
directly. False by default.
n_most_common : int, None
Take at most this many labels as the label set, keeping the most
common ones. If None (as by default), take all.
min_count : int
Don't take labels if they appear less than this many times.
Returns
-------
collections.Counter
The counts of the different labels (unfiltered).
"""
if self.lab2ind:
clsname = self.__class__.__name__
logger.info(
f"Limited_labelset_from_iterable called, "
f"but {clsname} is not empty. "
"The new labels will be added, i.e. won't overwrite. "
"This is normal if there is e.g. an unk label already."
)
if sequence_input:
label_iterator = itertools.chain.from_iterable(iterable)
else:
label_iterator = iter(iterable)
counts = collections.Counter(label_iterator)
for label, count in counts.most_common(n_most_common):
if count < min_count:
# .most_common() produces counts in descending order,
# so no more labels can be found
break
self.add_label(label)
return counts
def load_or_create(
self,
path,
from_iterables=[],
from_didatasets=[],
sequence_input=False,
output_key=None,
special_labels={},
):
"""Convenient syntax for creating the encoder conditionally
This pattern would be repeated in so many experiments that
we decided to add a convenient shortcut for it here. The
current version is multi-gpu (DDP) safe.
"""
try:
if sb.utils.distributed.if_main_process():
if not self.load_if_possible(path):
for iterable in from_iterables:
self.update_from_iterable(iterable, sequence_input)
for didataset in from_didatasets:
if output_key is None:
raise ValueError(
"Provide an output_key for "
"DynamicItemDataset"
)
self.update_from_didataset(
didataset, output_key, sequence_input
)
self.handle_special_labels(special_labels)
self.save(path)
finally:
sb.utils.distributed.ddp_barrier()
self.load(path)
def add_label(self, label):
"""Add new label to the encoder, at the next free position.
Arguments
---------
label : hashable
Most often labels are str, but anything that can act as dict key is
supported. Note that default save/load only supports Python
literals.
Returns
-------
int
The index that was used to encode this label.
"""
if label in self.lab2ind:
clsname = self.__class__.__name__
raise KeyError(f"Label already present in {clsname}")
index = self._next_index()
self.lab2ind[label] = index
self.ind2lab[index] = label
return index
def ensure_label(self, label):
"""Add a label if it is not already present.
Arguments
---------
label : hashable
Most often labels are str, but anything that can act as dict key is
supported. Note that default save/load only supports Python
literals.
Returns
-------
int
The index that was used to encode this label.
"""
if label in self.lab2ind:
return self.lab2ind[label]
else:
return self.add_label(label)
def insert_label(self, label, index):
"""Add a new label, forcing its index to a specific value.
If a label already has the specified index, it is moved to the end
of the mapping.
Arguments
---------
label : hashable
Most often labels are str, but anything that can act as dict key is
supported. Note that default save/load only supports Python
literals.
index : int
The specific index to use.
"""
if label in self.lab2ind:
clsname = self.__class__.__name__
raise KeyError(f"Label already present in {clsname}")
else:
self.enforce_label(label, index)
def enforce_label(self, label, index):
"""Make sure label is present and encoded to a particular index.
If the label is present but encoded to some other index, it is
moved to the given index.
If there is already another label at the
given index, that label is moved to the next free position.
"""
index = int(index)
if label in self.lab2ind:
if index == self.lab2ind[label]:
return
else:
# Delete old index mapping. Everything else gets overwritten.
del self.ind2lab[self.lab2ind[label]]
# Move other label out of the way:
if index in self.ind2lab:
saved_label = self.ind2lab[index]
moving_other = True
else:
moving_other = False
# Ready to push the new index.
self.lab2ind[label] = index
self.ind2lab[index] = label
# And finally put the moved index in new spot.
if moving_other:
logger.info(
f"Moving label {repr(saved_label)} from index "
f"{index}, because {repr(label)} was put at its place."
)
new_index = self._next_index()
self.lab2ind[saved_label] = new_index
self.ind2lab[new_index] = saved_label
def add_unk(self, unk_label=DEFAULT_UNK):
"""Add label for unknown tokens (out-of-vocab).
When asked to encode unknown labels, they can be mapped to this.
Arguments
---------
label : hashable, optional
Most often labels are str, but anything that can act as dict key is
supported. Note that default save/load only supports Python
literals. Default: <unk>. This can be None, as well!
Returns
-------
int
The index that was used to encode this.
"""
self.unk_label = unk_label
return self.add_label(unk_label)
def _next_index(self):
"""The index to use for the next new label"""
index = self.starting_index
while index in self.ind2lab:
index += 1
return index
def is_continuous(self):
"""Check that the set of indices doesn't have gaps
For example:
If starting index = 1
Continuous: [1,2,3,4]
Continuous: [0,1,2]
Non-continuous: [2,3,4]
Non-continuous: [1,2,4]
Returns
-------
bool
True if continuous.
"""
# Because of Python indexing this also handles the special cases
# of 0 or 1 labels.
indices = sorted(self.ind2lab.keys())
return self.starting_index in indices and all(
j - i == 1 for i, j in zip(indices[:-1], indices[1:])
)
def encode_label(self, label, allow_unk=True):
"""Encode label to int
Arguments
---------
label : hashable
Label to encode, must exist in the mapping.
allow_unk : bool
If given, that label is not in the label set
AND unk_label has been added with add_unk(),
allows encoding to unk_label's index.
Returns
-------
int
Corresponding encoded int value.
"""
try:
return self.lab2ind[label]
except KeyError:
if hasattr(self, "unk_label") and allow_unk:
return self.lab2ind[self.unk_label]
elif hasattr(self, "unk_label") and not allow_unk:
raise KeyError(
f"Unknown label {label}, and explicitly "
"disallowed the use of the existing unk-label"
)
elif not hasattr(self, "unk_label") and allow_unk:
raise KeyError(
f"Cannot encode unknown label {label}. "
"You have not called add_unk() to add a special "
"unk-label for unknown labels."
)
else:
raise KeyError(
f"Couldn't and wouldn't encode unknown label " f"{label}."
)
def encode_label_torch(self, label, allow_unk=True):
"""Encode label to torch.LongTensor.
Arguments
---------
label : hashable
Label to encode, must exist in the mapping.
Returns
-------
torch.LongTensor
Corresponding encoded int value.
Tensor shape [1].
"""
return torch.LongTensor([self.encode_label(label, allow_unk)])
def encode_sequence(self, sequence, allow_unk=True):
"""Encode a sequence of labels to list
Arguments
---------
x : iterable
Labels to encode, must exist in the mapping.
Returns
-------
list
Corresponding integer labels.
"""
return [self.encode_label(label, allow_unk) for label in sequence]
def encode_sequence_torch(self, sequence, allow_unk=True):
"""Encode a sequence of labels to torch.LongTensor
Arguments
---------
x : iterable
Labels to encode, must exist in the mapping.
Returns
-------
torch.LongTensor
Corresponding integer labels.
Tensor shape [len(sequence)].
"""
return torch.LongTensor(
[self.encode_label(label, allow_unk) for label in sequence]
)
def decode_torch(self, x):
"""Decodes an arbitrarily nested torch.Tensor to a list of labels.
Provided separately because Torch provides clearer introspection,
and so doesn't require try-except.
Arguments
---------
x : torch.Tensor
Torch tensor of some integer dtype (Long, int) and any shape to
decode.
Returns
-------
list
list of original labels
"""
decoded = []
# Recursively operates on the different dimensions.
if x.ndim == 1: # Last dimension!
for element in x:
decoded.append(self.ind2lab[int(element)])
else:
for subtensor in x:
decoded.append(self.decode_torch(subtensor))
return decoded
def decode_ndim(self, x):
"""Decodes an arbitrarily nested iterable to a list of labels.
This works for essentially any pythonic iterable (including torch), and
also single elements.
Arguments
---------
x : Any
Python list or other iterable or torch.Tensor or a single integer element
Returns
-------
list, Any
ndim list of original labels, or if input was single element,
output will be, too.
"""
# Recursively operates on the different dimensions.
try:
decoded = []
for subtensor in x:
decoded.append(self.decode_ndim(subtensor))
return decoded
except TypeError: # Not an iterable, bottom level!
return self.ind2lab[int(x)]
@mark_as_saver
def save(self, path):
"""Save the categorical encoding for later use and recovery
Saving uses a Python literal format, which supports things like
tuple labels, but is considered safe to load (unlike e.g. pickle).
Arguments
---------
path : str, Path
Where to save. Will overwrite.
"""
extras = self._get_extras()
self._save_literal(path, self.lab2ind, extras)
def load(self, path):
"""Loads from the given path.
CategoricalEncoder uses a Python literal format, which supports things
like tuple labels, but is considered safe to load (unlike e.g. pickle).
Arguments
---------
path : str, Path
Where to load from.
"""
if self.lab2ind:
clsname = self.__class__.__name__
logger.info(
f"Load called, but {clsname} is not empty. "
"Loaded data will overwrite everything. "
"This is normal if there is e.g. an unk label defined at init."
)
lab2ind, ind2lab, extras = self._load_literal(path)
self.lab2ind = lab2ind
self.ind2lab = ind2lab
self._set_extras(extras)
# If we're here, load was a success!
logger.debug(f"Loaded categorical encoding from {path}")
@mark_as_loader
def load_if_possible(self, path, end_of_epoch=False, device=None):
"""Loads if possible, returns a bool indicating if loaded or not.
Arguments
---------
path : str, Path
Where to load from.
Returns
-------
bool :
If load was successful.
Example
-------
>>> encoding_file = getfixture('tmpdir') / "encoding.txt"
>>> encoder = CategoricalEncoder()
>>> # The idea is in an experiment script to have something like this:
>>> if not encoder.load_if_possible(encoding_file):
... encoder.update_from_iterable("abcd")
... encoder.save(encoding_file)
>>> # So the first time you run the experiment, the encoding is created.
>>> # However, later, the encoding exists:
>>> encoder = CategoricalEncoder()
>>> if not encoder.load_if_possible(encoding_file):
... assert False # We won't get here!
>>> encoder.decode_ndim(range(4))
['a', 'b', 'c', 'd']
"""
del end_of_epoch # Unused here.
del device # Unused here.
try:
self.load(path)
except FileNotFoundError:
logger.debug(
f"Would load categorical encoding from {path}, "
"but file doesn't exist yet."
)
return False
except (ValueError, SyntaxError):
logger.debug(
f"Would load categorical encoding from {path}, "
"and file existed but seems to be corrupted or otherwise couldn't load."
)
return False
return True # If here, all good
def _get_extras(self):
"""Override this to provide any additional things to save
Call super()._get_extras() to get the base extras
"""
extras = {"starting_index": self.starting_index}
if hasattr(self, "unk_label"):
extras["unk_label"] = self.unk_label
return extras
def _set_extras(self, extras):
"""Override this to e.g. load any extras needed
Call super()._set_extras(extras) to set the base extras
"""
if "unk_label" in extras:
self.unk_label = extras["unk_label"]
self.starting_index = extras["starting_index"]
@staticmethod
def _save_literal(path, lab2ind, extras):
"""Save which is compatible with _load_literal"""
with open(path, "w") as f:
for label, ind in lab2ind.items():
f.write(
repr(label)
+ CategoricalEncoder.VALUE_SEPARATOR
+ str(ind)
+ "\n"
)
f.write(CategoricalEncoder.EXTRAS_SEPARATOR)
for key, value in extras.items():
f.write(
repr(key)
+ CategoricalEncoder.VALUE_SEPARATOR
+ repr(value)
+ "\n"
)
f.flush()
@staticmethod
def _load_literal(path):
"""Load which supports Python literals as keys.
This is considered safe for user input, as well (unlike e.g. pickle).
"""
lab2ind = {}
ind2lab = {}
extras = {}
with open(path) as f:
# Load the label to index mapping (until EXTRAS_SEPARATOR)
for line in f:
if line == CategoricalEncoder.EXTRAS_SEPARATOR:
break
literal, ind = line.strip().split(
CategoricalEncoder.VALUE_SEPARATOR, maxsplit=1
)
ind = int(ind)
label = ast.literal_eval(literal)
lab2ind[label] = ind
ind2lab[ind] = label
# Load the extras:
for line in f:
literal_key, literal_value = line.strip().split(
CategoricalEncoder.VALUE_SEPARATOR, maxsplit=1
)
key = ast.literal_eval(literal_key)
value = ast.literal_eval(literal_value)
extras[key] = value
return lab2ind, ind2lab, extras
class TextEncoder(CategoricalEncoder):
"""CategoricalEncoder subclass which offers specific methods for encoding text and handle
special tokens for training of sequence to sequence models.
In detail, aside special <unk> token already present in CategoricalEncoder
for handling out-of-vocab tokens here special methods to handle
<bos> beginning of sequence and <eos> tokens are defined.
Note: update_from_iterable and update_from_didataset here have as default
sequence_input=True because it is assumed that this encoder is used on
iterables of strings: e.g.
>>> from speechbrain.dataio.encoder import TextEncoder
>>> dataset = [["encode", "this", "textencoder"], ["foo", "bar"]]
>>> encoder = TextEncoder()
>>> encoder.update_from_iterable(dataset)
>>> encoder.encode_label("this")
1
>>> encoder.add_unk()
5
>>> encoder.encode_sequence(["this", "out-of-vocab"])
[1, 5]
>>>
Two methods can be used to add <bos> and <eos> to the internal dicts:
insert_bos_eos, add_bos_eos.
>>> encoder.add_bos_eos()
>>> encoder.lab2ind[encoder.eos_label]
7
>>>
add_bos_eos adds the special tokens at the end of the dict indexes
>>> encoder = TextEncoder()
>>> encoder.update_from_iterable(dataset)
>>> encoder.insert_bos_eos(bos_index=0, eos_index=1)
>>> encoder.lab2ind[encoder.eos_label]
1
>>>
insert_bos_eos allows to specify whose index will correspond to each of them.
Note that you can also specify the same integer encoding for both.
Four methods can be used to prepend <bos> and append <eos>.
prepend_bos_label and append_eos_label add respectively the <bos> and <eos>
string tokens to the input sequence
>>> words = ["foo", "bar"]
>>> encoder.prepend_bos_label(words)
['<bos>', 'foo', 'bar']
>>> encoder.append_eos_label(words)
['foo', 'bar', '<eos>']
prepend_bos_index and append_eos_index add respectively the <bos> and <eos>
indexes to the input encoded sequence.
>>> words = ["foo", "bar"]
>>> encoded = encoder.encode_sequence(words)
>>> encoder.prepend_bos_index(encoded)
[0, 3, 4]
>>> encoder.append_eos_index(encoded)
[3, 4, 1]
"""
def handle_special_labels(self, special_labels):
"""Handles special labels such as bos and eos."""
super().handle_special_labels(special_labels)
# NOTE: bos_label and eos_label are not necessarily set at all!
# This is because None is a suitable value.
# So the test is: hasattr(self, "bos_label")
# rather than self.bos_label is not None
# Same thing with unk, see base class.
if "bos_label" in special_labels and "eos_label" in special_labels:
self.insert_bos_eos(
bos_label="<bos>",
eos_label="<eos>",
bos_index=special_labels["bos_label"],
eos_index=special_labels["eos_label"],
)
elif "bos_label" in special_labels or "eos_label" in special_labels:
raise TypeError("Only BOS or EOS specified. Need both for init.")
def update_from_iterable(self, iterable, sequence_input=True):
"""Change default for sequence_input to True."""
return super().update_from_iterable(iterable, sequence_input)
def update_from_didataset(self, didataset, output_key, sequence_input=True):
"""Change default for sequence_input to True."""
return super().update_from_didataset(
didataset, output_key, sequence_input
)
def limited_labelset_from_iterable(
self, iterable, sequence_input=True, n_most_common=None, min_count=1
):
"""Change default for sequence_input to True."""
return super().limited_labelset_from_iterable(
iterable, sequence_input=True, n_most_common=None, min_count=1
)
def add_bos_eos(
self, bos_label=DEFAULT_BOS, eos_label=DEFAULT_EOS,
):
"""Add sentence boundary markers in the label set.
If the beginning-of-sentence and end-of-sentence markers
are the same, will just use one sentence-boundary label.
This method adds to the end of the index, rather than at the beginning,
like insert_bos_eos.
Arguments
---------
bos_label : hashable
Beginning-of-sentence label, any label.
eos_label : hashable
End-of-sentence label, any label. If set to the same label as
bos_label, will just use one sentence-boundary label.
"""
if bos_label == eos_label:
logger.debug(
"BOS and EOS labels are the same so using just one sentence "
"boundary label"
)
self.add_label(bos_label)
else:
self.add_label(bos_label)
self.add_label(eos_label)
self.bos_label = bos_label
self.eos_label = eos_label
def insert_bos_eos(
self,
bos_label=DEFAULT_BOS,
eos_label=DEFAULT_EOS,
bos_index=0,
eos_index=None,
):
"""Insert sentence boundary markers in the label set.
If the beginning-of-sentence and end-of-sentence markers
are the same, will just use one sentence-boundary label.
Arguments
---------
bos_label : hashable
Beginning-of-sentence label, any label
eos_label : hashable
End-of-sentence label, any label. If set to the same label as
bos_label, will just use one sentence-boundary label.
bos_index : int
Where to insert bos_label. eos_index = bos_index + 1
bos_index : optional, int
Where to insert eos_label. Default: eos_index = bos_index + 1
"""
if bos_label == eos_label:
logger.debug(
"BOS and EOS labels are the same so using just one sentence "
"boundary label"
)
self.insert_label(bos_label, bos_index)
else:
self.insert_label(bos_label, bos_index)
if eos_index is None:
logger.debug("EOS label not specified, using BOS label + 1")
self.insert_label(eos_label, bos_index + 1)
else:
self.insert_label(eos_label, eos_index)
self.bos_label = bos_label
self.eos_label = eos_label
def get_bos_index(self):
"""Returns the index to which blank encodes"""
if not hasattr(self, "bos_label"):
raise RuntimeError("BOS label is not set!")
return self.encode_label(self.bos_label)
def get_eos_index(self):
"""Returns the index to which blank encodes"""
if not hasattr(self, "eos_label"):
raise RuntimeError("EOS label is not set!")
return self.encode_label(self.eos_label)
def prepend_bos_label(self, x):
"""Returns a list version of x, with BOS prepended"""
if not hasattr(self, "bos_label"):
raise KeyError("BOS label has not been added to label set!")
return [self.bos_label] + list(x)
def prepend_bos_index(self, x):
"""Returns a list version of x, with BOS index prepended.
If the input is a tensor, a tensor is returned."""
if not hasattr(self, "bos_label"):
raise KeyError("BOS label has not been added to label set!")
if torch.is_tensor(x):
bos_ind = torch.Tensor([self.lab2ind[self.bos_label]])
return torch.cat([bos_ind, x])
return [self.lab2ind[self.bos_label]] + list(x)
def append_eos_label(self, x):
"""Returns a list version of x, with EOS appended."""
if not hasattr(self, "eos_label"):
raise KeyError("EOS label has not been added to label set!")
return list(x) + [self.eos_label]
def append_eos_index(self, x):
"""Returns a list version of x, with EOS index appended.
If the input is a tensor, a tensor is returned."""
if not hasattr(self, "eos_label"):
raise KeyError("EOS label has not been added to label set!")
if torch.is_tensor(x):
eos_ind = torch.Tensor([self.lab2ind[self.eos_label]])
return torch.cat([x, eos_ind])
return list(x) + [self.lab2ind[self.eos_label]]
def _get_extras(self):
extras = super()._get_extras()
if hasattr(self, "bos_label"):
extras["bos_label"] = self.bos_label
if hasattr(self, "eos_label"):
extras["eos_label"] = self.eos_label
return extras
def _set_extras(self, extras):
super()._set_extras(extras)
if "bos_label" in extras:
self.bos_label = extras["bos_label"]
if "eos_label" in extras:
self.eos_label = extras["eos_label"]
class CTCTextEncoder(TextEncoder):
"""Subclass of TextEncoder which also provides methods to handle CTC blank token.
add_blank and insert_blank can be used to add <blank> special token to the encoder
state.
>>> from speechbrain.dataio.encoder import CTCTextEncoder
>>> chars = ["a", "b", "c", "d"]
>>> encoder = CTCTextEncoder()
>>> encoder.update_from_iterable(chars)
>>> encoder.add_blank()
>>> encoder.encode_sequence(chars)
[0, 1, 2, 3]
>>> encoder.get_blank_index()
4
>>> encoder.decode_ndim([0, 1, 2, 3, 4])
['a', 'b', 'c', 'd', '<blank>']
collapse_labels and collapse_indices_ndim can be used to apply CTC collapsing
rules:
>>> encoder.collapse_labels(["a", "a", "b", "c", "d"])
['a', 'b', 'c', 'd']
>>> encoder.collapse_indices_ndim([4, 4, 0, 1, 2, 3, 4, 4]) # 4 is <blank>
[0, 1, 2, 3]
"""
def handle_special_labels(self, special_labels):
"""Handles special labels such as blanks."""
# super().handle_special_labels(special_labels)
# NOTE: blank_label is not necessarily set at all!
# This is because None is a suitable value.
# So the test is: hasattr(self, "blank_label")
# rather than self.blank_label is not None
# Same thing with unk, see base class.
if "blank_label" in special_labels:
self.insert_blank(index=special_labels["blank_label"])
super().handle_special_labels(special_labels)
def add_blank(self, blank_label=DEFAULT_BLANK):
"""Add blank symbol to labelset."""
self.add_label(blank_label)
self.blank_label = blank_label
def insert_blank(self, blank_label=DEFAULT_BLANK, index=0):
"""Insert blank symbol at a given labelset."""
self.insert_label(blank_label, index)
self.blank_label = blank_label
def get_blank_index(self):
"""Returns the index to which blank encodes."""
if not hasattr(self, "blank_label"):
raise RuntimeError("Blank label is not set!")
return self.encode_label(self.blank_label)
def collapse_labels(self, x, merge_repeats=True):
"""Applies the CTC collapsing rules on one label sequence.
Arguments
---------
x : iterable
Label sequence on which to operate.
merge_repeats : bool
Whether to merge repeated labels before removing blanks.
In the basic CTC label topology, repeated labels are merged.
However, in RNN-T, they are not.
Returns
-------
list
List of labels with collapsing rules applied.
"""
# This cannot work on arbitrary "ndim", because strings can be
# infinitely iterated. Iterating "a" produces "a" over and over again.
if not hasattr(self, "blank_label"):
raise KeyError("Blank label has not been added")
if merge_repeats:
return [
label
for i, label in enumerate(x)
if (i == 0 or label != x[i - 1]) and label != self.blank_label
]
else:
return [label for label in x if label != self.blank_label]
def collapse_indices_ndim(self, x, merge_repeats=True):
"""Applies the CTC collapsing rules on arbitrarily label sequence.
Arguments
---------
x : iterable
Label sequence on which to operate.
merge_repeats : bool
Whether to merge repeated labels before removing blanks.
In the basic CTC label topology, repeated labels are merged.
However, in RNN-T, they are not.
Returns
-------
list
List of labels with collapsing rules applied.
"""
if not hasattr(self, "blank_label"):
raise KeyError("Blank label has not been added")
# Recursively operates on the different dimensions.
collapsed = []
for subtensor in x:
try:
collapsed.append(
self.collapse_indices_ndim(subtensor, merge_repeats)
)
except TypeError: # Not an iterable at next level!
# So we should rather operate on this dimension.
break
else: # For-else: only enter else if NO break.
return collapsed
# We get here if we DID break:
blank_index = self.lab2ind[self.blank_label]
if merge_repeats:
return [
index
for i, index in enumerate(x)
if (i == 0 or index != x[i - 1]) and index != blank_index
]
else:
return [index for index in x if index != blank_index]
def _get_extras(self):
extras = super()._get_extras()
if hasattr(self, "blank_label"):
extras["blank_label"] = self.blank_label
return extras
def _set_extras(self, extras):
super()._set_extras(extras)
if "blank_label" in extras:
self.blank_label = extras["blank_label"]
| 39,147 | 34.718978 | 93 | py |
speechbrain | speechbrain-main/speechbrain/dataio/dataset.py | """Dataset examples for loading individual data points
Authors
* Aku Rouhe 2020
* Samuele Cornell 2020
"""
import copy
import contextlib
from types import MethodType
from torch.utils.data import Dataset
from speechbrain.utils.data_pipeline import DataPipeline
from speechbrain.dataio.dataio import load_data_json, load_data_csv
import logging
logger = logging.getLogger(__name__)
class DynamicItemDataset(Dataset):
"""Dataset that reads, wrangles, and produces dicts.
Each data point dict provides some items (by key), for example, a path to a
wavefile with the key "wav_file". When a data point is fetched from this
Dataset, more items are produced dynamically, based on pre-existing items
and other dynamic created items. For example, a dynamic item could take the
wavfile path and load the audio from the disk.
The dynamic items can depend on other dynamic items: a suitable evaluation
order is used automatically, as long as there are no circular dependencies.
A specified list of keys is collected in the output dict. These can be items
in the original data or dynamic items. If some dynamic items are not
requested, nor depended on by other requested items, they won't be computed.
So for example if a user simply wants to iterate over the text, the
time-consuming audio loading can be skipped.
About the format:
Takes a dict of dicts as the collection of data points to read/wrangle.
The top level keys are data point IDs.
Each data point (example) dict should have the same keys, corresponding to
different items in that data point.
Altogether the data collection could look like this:
>>> data = {
... "spk1utt1": {
... "wav_file": "/path/to/spk1utt1.wav",
... "text": "hello world",
... "speaker": "spk1",
... },
... "spk1utt2": {
... "wav_file": "/path/to/spk1utt2.wav",
... "text": "how are you world",
... "speaker": "spk1",
... }
... }
NOTE
----
The top-level key, the data point id, is implicitly added as an item
in the data point, with the key "id"
Each dynamic item is configured by three things: a key, a func, and a list
of argkeys. The key should be unique among all the items (dynamic or not) in
each data point. The func is any callable, and it returns the dynamic item's
value. The callable is called with the values of other items as specified
by the argkeys list (as positional args, passed in the order specified by
argkeys).
The dynamic_items configuration could look like this:
>>> import torch
>>> dynamic_items = [
... {"func": lambda l: torch.Tensor(l),
... "takes": ["wav_loaded"],
... "provides": "wav"},
... {"func": lambda path: [ord(c)/100 for c in path], # Fake "loading"
... "takes": ["wav_file"],
... "provides": "wav_loaded"},
... {"func": lambda t: t.split(),
... "takes": ["text"],
... "provides": "words"}]
With these, different views of the data can be loaded:
>>> from speechbrain.dataio.dataloader import SaveableDataLoader
>>> from speechbrain.dataio.batch import PaddedBatch
>>> dataset = DynamicItemDataset(data, dynamic_items)
>>> dataloader = SaveableDataLoader(dataset, collate_fn=PaddedBatch,
... batch_size=2)
>>> # First, create encoding for words:
>>> dataset.set_output_keys(["words"])
>>> encoding = {}
>>> next_id = 1
>>> for batch in dataloader:
... for sent in batch.words:
... for word in sent:
... if word not in encoding:
... encoding[word] = next_id
... next_id += 1
>>> # Next, add an encoded words_tensor dynamic item:
>>> dataset.add_dynamic_item(
... func = lambda ws: torch.tensor([encoding[w] for w in ws],
... dtype=torch.long),
... takes = ["words"],
... provides = "words_encoded")
>>> # Now we can get word and audio tensors:
>>> dataset.set_output_keys(["id", "wav", "words_encoded"])
>>> batch = next(iter(dataloader))
>>> batch.id
['spk1utt1', 'spk1utt2']
>>> batch.wav # +ELLIPSIS
PaddedData(data=tensor([[0.4700, 1.1200, ...
>>> batch.words_encoded
PaddedData(data=tensor([[1, 2, 0, 0],
[3, 4, 5, 2]]), lengths=tensor([0.5000, 1.0000]))
Output keys can also be a map:
>>> dataset.set_output_keys({"id":"id", "signal": "wav", "words": "words_encoded"})
>>> batch = next(iter(dataloader))
>>> batch.words
PaddedData(data=tensor([[1, 2, 0, 0],
[3, 4, 5, 2]]), lengths=tensor([0.5000, 1.0000]))
Arguments
---------
data : dict
Dictionary containing single data points (e.g. utterances).
dynamic_items : list, optional
Configuration for the dynamic items produced when fetching an example.
List of DynamicItems or dicts with the format::
func: <callable> # To be called
takes: <list> # key or list of keys of args this takes
provides: key # key or list of keys that this provides
output_keys : dict, list, optional
List of keys (either directly available in data or dynamic items)
to include in the output dict when data points are fetched.
If a dict is given; it is used to map internal keys to output keys.
From the output_keys dict key:value pairs the key appears outside,
and value is the internal key.
"""
def __init__(
self, data, dynamic_items=[], output_keys=[],
):
self.data = data
self.data_ids = list(self.data.keys())
static_keys = list(self.data[self.data_ids[0]].keys())
if "id" in static_keys:
raise ValueError("The key 'id' is reserved for the data point id.")
else:
static_keys.append("id")
self.pipeline = DataPipeline(static_keys, dynamic_items)
self.set_output_keys(output_keys)
def __len__(self):
return len(self.data_ids)
def __getitem__(self, index):
data_id = self.data_ids[index]
data_point = self.data[data_id]
return self.pipeline.compute_outputs({"id": data_id, **data_point})
def add_dynamic_item(self, func, takes=None, provides=None):
"""Makes a new dynamic item available on the dataset.
Two calling conventions. For DynamicItem objects, just use:
add_dynamic_item(dynamic_item).
But otherwise, should use:
add_dynamic_item(func, takes, provides).
See `speechbrain.utils.data_pipeline`.
Arguments
---------
func : callable, DynamicItem
If a DynamicItem is given, adds that directly. Otherwise a
DynamicItem is created, and this specifies the callable to use. If
a generator function is given, then create a GeneratorDynamicItem.
Otherwise creates a normal DynamicItem.
takes : list, str
List of keys. When func is called, each key is resolved to
either an entry in the data or the output of another dynamic_item.
The func is then called with these as positional arguments,
in the same order as specified here.
A single arg can be given directly.
provides : str
Unique key or keys that this provides.
"""
self.pipeline.add_dynamic_item(func, takes, provides)
def set_output_keys(self, keys):
"""Use this to change the output keys.
These are the keys that are actually evaluated when a data point
is fetched from the dataset.
Arguments
---------
keys : dict, list
List of keys (str) to produce in output.
If a dict is given; it is used to map internal keys to output keys.
From the output_keys dict key:value pairs the key appears outside,
and value is the internal key.
"""
self.pipeline.set_output_keys(keys)
@contextlib.contextmanager
def output_keys_as(self, keys):
"""Context manager to temporarily set output keys.
Example
-------
>>> dataset = DynamicItemDataset({"a":{"x":1,"y":2},"b":{"x":3,"y":4}},
... output_keys = ["x"])
>>> with dataset.output_keys_as(["y"]):
... print(dataset[0])
{'y': 2}
>>> print(dataset[0])
{'x': 1}
NOTE
----
Not thread-safe. While in this context manager, the output keys
are affected for any call.
"""
saved_output = self.pipeline.output_mapping
self.pipeline.set_output_keys(keys)
yield self
self.pipeline.set_output_keys(saved_output)
def filtered_sorted(
self,
key_min_value={},
key_max_value={},
key_test={},
sort_key=None,
reverse=False,
select_n=None,
):
"""Get a filtered and/or sorted version of this, shares static data.
The reason to implement these operations in the same method is that
computing some dynamic items may be expensive, and this way the
filtering and sorting steps don't need to compute the dynamic items
twice.
Arguments
---------
key_min_value : dict
Map from key (in data or in dynamic items) to limit, will only keep
data_point if data_point[key] >= limit
key_max_value : dict
Map from key (in data or in dynamic items) to limit, will only keep
data_point if data_point[key] <= limit
key_test : dict
Map from key (in data or in dynamic items) to func, will only keep
data_point if bool(func(data_point[key])) == True
sort_key : None, str
If not None, sort by data_point[sort_key]. Default is ascending
order.
reverse : bool
If True, sort in descending order.
select_n : None, int
If not None, only keep (at most) the first n filtered data_points.
The possible sorting is applied, but only on the first n data
points found. Meant for debugging.
Returns
-------
FilteredSortedDynamicItemDataset
Shares the static data, but has its own output keys and
dynamic items (initially deep copied from this, so they have the
same dynamic items available)
NOTE
----
Temporarily changes the output keys!
"""
filtered_sorted_ids = self._filtered_sorted_ids(
key_min_value, key_max_value, key_test, sort_key, reverse, select_n,
)
return FilteredSortedDynamicItemDataset(
self, filtered_sorted_ids
) # NOTE: defined below
def _filtered_sorted_ids(
self,
key_min_value={},
key_max_value={},
key_test={},
sort_key=None,
reverse=False,
select_n=None,
):
"""Returns a list of data ids, fulfilling the sorting and filtering."""
def combined_filter(computed):
"""Applies filter."""
for key, limit in key_min_value.items():
# NOTE: docstring promises >= so using that.
# Mathematically could also use < for nicer syntax, but
# maybe with some super special weird edge case some one can
# depend on the >= operator
if computed[key] >= limit:
continue
return False
for key, limit in key_max_value.items():
if computed[key] <= limit:
continue
return False
for key, func in key_test.items():
if bool(func(computed[key])):
continue
return False
return True
temp_keys = (
set(key_min_value.keys())
| set(key_max_value.keys())
| set(key_test.keys())
| set([] if sort_key is None else [sort_key])
)
filtered_ids = []
with self.output_keys_as(temp_keys):
for i, data_id in enumerate(self.data_ids):
if select_n is not None and len(filtered_ids) == select_n:
break
data_point = self.data[data_id]
data_point["id"] = data_id
computed = self.pipeline.compute_outputs(data_point)
if combined_filter(computed):
if sort_key is not None:
# Add (main sorting index, current index, data_id)
# So that we maintain current sorting and don't compare
# data_id values ever.
filtered_ids.append((computed[sort_key], i, data_id))
else:
filtered_ids.append(data_id)
if sort_key is not None:
filtered_sorted_ids = [
tup[2] for tup in sorted(filtered_ids, reverse=reverse)
]
else:
filtered_sorted_ids = filtered_ids
return filtered_sorted_ids
@classmethod
def from_json(
cls, json_path, replacements={}, dynamic_items=[], output_keys=[]
):
"""Load a data prep JSON file and create a Dataset based on it."""
data = load_data_json(json_path, replacements)
return cls(data, dynamic_items, output_keys)
@classmethod
def from_csv(
cls, csv_path, replacements={}, dynamic_items=[], output_keys=[]
):
"""Load a data prep CSV file and create a Dataset based on it."""
data = load_data_csv(csv_path, replacements)
return cls(data, dynamic_items, output_keys)
@classmethod
def from_arrow_dataset(
cls, dataset, replacements={}, dynamic_items=[], output_keys=[]
):
"""Loading a prepared huggingface dataset"""
# define an unbound method to generate puesdo keys
def keys(self):
"Returns the keys."
return [i for i in range(dataset.__len__())]
# bind this method to arrow dataset
dataset.keys = MethodType(keys, dataset)
return cls(dataset, dynamic_items, output_keys)
class FilteredSortedDynamicItemDataset(DynamicItemDataset):
"""Possibly filtered, possibly sorted DynamicItemDataset.
Shares the static data (reference).
Has its own dynamic_items and output_keys (deepcopy).
"""
def __init__(self, from_dataset, data_ids):
self.data = from_dataset.data
self.data_ids = data_ids
self.pipeline = copy.deepcopy(from_dataset.pipeline)
@classmethod
def from_json(
cls, json_path, replacements={}, dynamic_items=None, output_keys=None
):
raise TypeError("Cannot create SubsetDynamicItemDataset directly!")
@classmethod
def from_csv(
cls, csv_path, replacements={}, dynamic_items=None, output_keys=None
):
raise TypeError("Cannot create SubsetDynamicItemDataset directly!")
def add_dynamic_item(datasets, func, takes=None, provides=None):
"""Helper for adding the same item to multiple datasets."""
for dataset in datasets:
dataset.add_dynamic_item(func, takes, provides)
def set_output_keys(datasets, output_keys):
"""Helper for setting the same item to multiple datasets."""
for dataset in datasets:
dataset.set_output_keys(output_keys)
| 15,593 | 36.30622 | 87 | py |
speechbrain | speechbrain-main/speechbrain/dataio/preprocess.py | """Preprocessors for audio"""
import torch
import functools
from speechbrain.processing.speech_augmentation import Resample
class AudioNormalizer:
"""Normalizes audio into a standard format
Arguments
---------
sample_rate : int
The sampling rate to which the incoming signals should be converted.
mix : {"avg-to-mono", "keep"}
"avg-to-mono" - add all channels together and normalize by number of
channels. This also removes the channel dimension, resulting in [time]
format tensor.
"keep" - don't normalize channel information
Example
-------
>>> import torchaudio
>>> example_file = 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
>>> signal, sr = torchaudio.load(example_file, channels_first = False)
>>> normalizer = AudioNormalizer(sample_rate=8000)
>>> normalized = normalizer(signal, sr)
>>> signal.shape
torch.Size([160000, 4])
>>> normalized.shape
torch.Size([80000])
NOTE
----
This will also upsample audio. However, upsampling cannot produce meaningful
information in the bandwidth which it adds. Generally models will not work
well for upsampled data if they have not specifically been trained to do so.
"""
def __init__(self, sample_rate=16000, mix="avg-to-mono"):
self.sample_rate = sample_rate
if mix not in ["avg-to-mono", "keep"]:
raise ValueError(f"Unexpected mixing configuration {mix}")
self.mix = mix
self._cached_resample = functools.lru_cache(maxsize=12)(Resample)
def __call__(self, audio, sample_rate):
"""Perform normalization
Arguments
---------
audio : tensor
The input waveform torch tensor. Assuming [time, channels],
or [time].
"""
resampler = self._cached_resample(sample_rate, self.sample_rate)
resampled = resampler(audio.unsqueeze(0)).squeeze(0)
return self._mix(resampled)
def _mix(self, audio):
"""Handle channel mixing"""
flat_input = audio.dim() == 1
if self.mix == "avg-to-mono":
if flat_input:
return audio
return torch.mean(audio, 1)
if self.mix == "keep":
return audio
| 2,293 | 32.735294 | 87 | py |
speechbrain | speechbrain-main/speechbrain/alignment/ctc_segmentation.py | #!/usr/bin/env python3
# 2021, Technische Universität München, Ludwig Kürzinger
"""Perform CTC segmentation to align utterances within audio files.
This uses the ctc-segmentation Python package.
Install it with pip or see the installing instructions in
https://github.com/lumaku/ctc-segmentation
"""
import logging
from pathlib import Path
from types import SimpleNamespace
from typing import Optional
from typing import Union
import numpy as np
import torch
from typing import List
# speechbrain interface
from speechbrain.pretrained.interfaces import EncoderASR, EncoderDecoderASR
# imports for CTC segmentation
try:
from ctc_segmentation import ctc_segmentation
from ctc_segmentation import CtcSegmentationParameters
from ctc_segmentation import determine_utterance_segments
from ctc_segmentation import prepare_text
from ctc_segmentation import prepare_token_list
except ImportError:
print(
"ImportError: "
"Is the ctc_segmentation module installed "
"and in your PYTHONPATH?"
)
raise ImportError("The ctc_segmentation module is missing.")
logger = logging.getLogger(__name__)
class CTCSegmentationTask(SimpleNamespace):
"""Task object for CTC segmentation.
This object is automatically generated and acts as
a container for results of a CTCSegmentation object.
When formatted with str(·), this object returns
results in a kaldi-style segments file formatting.
The human-readable output can be configured with
the printing options.
Properties
---------
text : list
Utterance texts, separated by line. But without the utterance
name at the beginning of the line (as in kaldi-style text).
ground_truth_mat : array
Ground truth matrix (CTC segmentation).
utt_begin_indices : np.ndarray
Utterance separator for the Ground truth matrix.
timings : np.ndarray
Time marks of the corresponding chars.
state_list : list
Estimated alignment of chars/tokens.
segments : list
Calculated segments as: (start, end, confidence score).
config : CtcSegmentationParameters
CTC Segmentation configuration object.
name : str
Name of aligned audio file (Optional). If given, name is
considered when generating the text.
Default: "utt".
utt_ids : list
The list of utterance names (Optional). This list should
have the same length as the number of utterances.
lpz : np.ndarray
CTC posterior log probabilities (Optional).
Properties for printing
----------------------
print_confidence_score : bool
Include the confidence score.
Default: True.
print_utterance_text : bool
Include utterance text.
Default: True.
"""
text = None
ground_truth_mat = None
utt_begin_indices = None
timings = None
char_probs = None
state_list = None
segments = None
config = None
done = False
# Optional
name = "utt"
utt_ids = None
lpz = None
# Printing
print_confidence_score = True
print_utterance_text = True
def set(self, **kwargs):
"""Update object attributes."""
self.__dict__.update(kwargs)
def __str__(self):
"""Return a kaldi-style ``segments`` file (string)."""
output = ""
num_utts = len(self.segments)
if self.utt_ids is None:
utt_names = [f"{self.name}_{i:04}" for i in range(num_utts)]
else:
# ensure correct mapping of segments to utterance ids
assert num_utts == len(self.utt_ids)
utt_names = self.utt_ids
for i, boundary in enumerate(self.segments):
# utterance name and file name
utt_entry = f"{utt_names[i]} {self.name} "
# segment start and end
utt_entry += f"{boundary[0]:.2f} {boundary[1]:.2f}"
# confidence score
if self.print_confidence_score:
utt_entry += f" {boundary[2]:3.4f}"
# utterance ground truth
if self.print_utterance_text:
utt_entry += f" {self.text[i]}"
output += utt_entry + "\n"
return output
class CTCSegmentation:
"""Align text to audio using CTC segmentation.
Usage
-----
Initialize with given ASR model and parameters.
If needed, parameters for CTC segmentation can be set with ``set_config(·)``.
Then call the instance as function to align text within an audio file.
Arguments
---------
asr_model : EncoderDecoderASR
Speechbrain ASR interface. This requires a model that has a
trained CTC layer for inference. It is better to use a model with
single-character tokens to get a better time resolution.
Please note that the inference complexity with Transformer models
usually increases quadratically with audio length.
It is therefore recommended to use RNN-based models, if available.
kaldi_style_text : bool
A kaldi-style text file includes the name of the
utterance at the start of the line. If True, the utterance name
is expected as first word at each line. If False, utterance
names are automatically generated. Set this option according to
your input data. Default: True.
text_converter : str
How CTC segmentation handles text.
"tokenize": Use the ASR model tokenizer to tokenize the text.
"classic": The text is preprocessed as text pieces which takes
token length into account. If the ASR model has longer tokens,
this option may yield better results. Default: "tokenize".
time_stamps : str
Choose the method how the time stamps are
calculated. While "fixed" and "auto" use both the sample rate,
the ratio of samples to one frame is either automatically
determined for each inference or fixed at a certain ratio that
is initially determined by the module, but can be changed via
the parameter ``samples_to_frames_ratio``. Recommended for
longer audio files: "auto".
**ctc_segmentation_args
Parameters for CTC segmentation.
The full list of parameters is found in ``set_config``.
Example
-------
>>> # using example file included in the SpeechBrain repository
>>> from speechbrain.pretrained import EncoderDecoderASR
>>> from speechbrain.alignment.ctc_segmentation import CTCSegmentation
>>> # load an ASR model
>>> pre_trained = "speechbrain/asr-transformer-transformerlm-librispeech"
>>> asr_model = EncoderDecoderASR.from_hparams(source=pre_trained)
>>> aligner = CTCSegmentation(asr_model, kaldi_style_text=False)
>>> # load data
>>> audio_path = "tests/samples/single-mic/example1.wav"
>>> text = ["THE BIRCH CANOE", "SLID ON THE", "SMOOTH PLANKS"]
>>> segments = aligner(audio_path, text, name="example1")
On multiprocessing
------------------
To parallelize the computation with multiprocessing, these three steps
can be separated:
(1) ``get_lpz``: obtain the lpz,
(2) ``prepare_segmentation_task``: prepare the task, and
(3) ``get_segments``: perform CTC segmentation.
Note that the function `get_segments` is a staticmethod and therefore
independent of an already initialized CTCSegmentation obj́ect.
References
----------
CTC-Segmentation of Large Corpora for German End-to-end Speech Recognition
2020, Kürzinger, Winkelbauer, Li, Watzel, Rigoll
https://arxiv.org/abs/2007.09127
More parameters are described in https://github.com/lumaku/ctc-segmentation
"""
fs = 16000
kaldi_style_text = True
samples_to_frames_ratio = None
time_stamps = "auto"
choices_time_stamps = ["auto", "fixed"]
text_converter = "tokenize"
choices_text_converter = ["tokenize", "classic"]
warned_about_misconfiguration = False
config = CtcSegmentationParameters()
def __init__(
self,
asr_model: Union[EncoderASR, EncoderDecoderASR],
kaldi_style_text: bool = True,
text_converter: str = "tokenize",
time_stamps: str = "auto",
**ctc_segmentation_args,
):
"""Initialize the CTCSegmentation module."""
# Prepare ASR model
if (
isinstance(asr_model, EncoderDecoderASR)
and not (
hasattr(asr_model, "mods")
and hasattr(asr_model.mods, "decoder")
and hasattr(asr_model.mods.decoder, "ctc_weight")
)
) or (
isinstance(asr_model, EncoderASR)
and not (
hasattr(asr_model, "mods")
and hasattr(asr_model.mods, "encoder")
and hasattr(asr_model.mods.encoder, "ctc_lin")
)
):
raise AttributeError("The given asr_model has no CTC module!")
if not hasattr(asr_model, "tokenizer"):
raise AttributeError(
"The given asr_model has no tokenizer in asr_model.tokenizer!"
)
self.asr_model = asr_model
self._encode = self.asr_model.encode_batch
if isinstance(asr_model, EncoderDecoderASR):
# Assumption: log-softmax is already included in ctc_forward_step
self._ctc = self.asr_model.mods.decoder.ctc_forward_step
else:
# Apply log-softmax to encoder output
self._ctc = self.asr_model.hparams.log_softmax
self._tokenizer = self.asr_model.tokenizer
# Apply configuration
self.set_config(
fs=self.asr_model.hparams.sample_rate,
time_stamps=time_stamps,
kaldi_style_text=kaldi_style_text,
text_converter=text_converter,
**ctc_segmentation_args,
)
# determine token or character list
char_list = [
asr_model.tokenizer.id_to_piece(i)
for i in range(asr_model.tokenizer.vocab_size())
]
self.config.char_list = char_list
# Warn about possible misconfigurations
max_char_len = max([len(c) for c in char_list])
if len(char_list) > 500 and max_char_len >= 8:
logger.warning(
f"The dictionary has {len(char_list)} tokens with "
f"a max length of {max_char_len}. This may lead "
f"to low alignment performance and low accuracy."
)
def set_config(
self,
time_stamps: Optional[str] = None,
fs: Optional[int] = None,
samples_to_frames_ratio: Optional[float] = None,
set_blank: Optional[int] = None,
replace_spaces_with_blanks: Optional[bool] = None,
kaldi_style_text: Optional[bool] = None,
text_converter: Optional[str] = None,
gratis_blank: Optional[bool] = None,
min_window_size: Optional[int] = None,
max_window_size: Optional[int] = None,
scoring_length: Optional[int] = None,
):
"""Set CTC segmentation parameters.
Parameters for timing
---------------------
time_stamps : str
Select method how CTC index duration is estimated, and
thus how the time stamps are calculated.
fs : int
Sample rate. Usually derived from ASR model; use this parameter
to overwrite the setting.
samples_to_frames_ratio : float
If you want to directly determine the
ratio of samples to CTC frames, set this parameter, and
set ``time_stamps`` to "fixed".
Note: If you want to calculate the time stamps from a model
with fixed subsampling, set this parameter to:
``subsampling_factor * frame_duration / 1000``.
Parameters for text preparation
-------------------------------
set_blank : int
Index of blank in token list. Default: 0.
replace_spaces_with_blanks : bool
Inserts blanks between words, which is
useful for handling long pauses between words. Only used in
``text_converter="classic"`` preprocessing mode. Default: False.
kaldi_style_text : bool
Determines whether the utterance name is expected
as fist word of the utterance. Set at module initialization.
text_converter : str
How CTC segmentation handles text.
Set at module initialization.
Parameters for alignment
------------------------
min_window_size : int
Minimum number of frames considered for a single
utterance. The current default value of 8000 corresponds to
roughly 4 minutes (depending on ASR model) and should be OK in
most cases. If your utterances are further apart, increase
this value, or decrease it for smaller audio files.
max_window_size : int
Maximum window size. It should not be necessary
to change this value.
gratis_blank : bool
If True, the transition cost of blank is set to zero.
Useful for long preambles or if there are large unrelated segments
between utterances. Default: False.
Parameters for calculation of confidence score
----------------------------------------------
scoring_length : int
Block length to calculate confidence score. The
default value of 30 should be OK in most cases.
30 corresponds to roughly 1-2s of audio.
"""
# Parameters for timing
if time_stamps is not None:
if time_stamps not in self.choices_time_stamps:
raise NotImplementedError(
f"Parameter ´time_stamps´ has to be one of "
f"{list(self.choices_time_stamps)}",
)
self.time_stamps = time_stamps
if fs is not None:
self.fs = float(fs)
if samples_to_frames_ratio is not None:
self.samples_to_frames_ratio = float(samples_to_frames_ratio)
# Parameters for text preparation
if set_blank is not None:
self.config.blank = int(set_blank)
if replace_spaces_with_blanks is not None:
self.config.replace_spaces_with_blanks = bool(
replace_spaces_with_blanks
)
if kaldi_style_text is not None:
self.kaldi_style_text = bool(kaldi_style_text)
if text_converter is not None:
if text_converter not in self.choices_text_converter:
raise NotImplementedError(
f"Parameter ´text_converter´ has to be one of "
f"{list(self.choices_text_converter)}",
)
self.text_converter = text_converter
# Parameters for alignment
if min_window_size is not None:
self.config.min_window_size = int(min_window_size)
if max_window_size is not None:
self.config.max_window_size = int(max_window_size)
if gratis_blank is not None:
self.config.blank_transition_cost_zero = bool(gratis_blank)
if (
self.config.blank_transition_cost_zero
and self.config.replace_spaces_with_blanks
and not self.warned_about_misconfiguration
):
logger.error(
"Blanks are inserted between words, and also the transition cost of"
" blank is zero. This configuration may lead to misalignments!"
)
self.warned_about_misconfiguration = True
# Parameter for calculation of confidence score
if scoring_length is not None:
self.config.score_min_mean_over_L = int(scoring_length)
def get_timing_config(self, speech_len=None, lpz_len=None):
"""Obtain parameters to determine time stamps."""
timing_cfg = {
"index_duration": self.config.index_duration,
}
# As the parameter ctc_index_duration vetoes the other
if self.time_stamps == "fixed":
# Initialize the value, if not yet available
if self.samples_to_frames_ratio is None:
ratio = self.estimate_samples_to_frames_ratio()
self.samples_to_frames_ratio = ratio
index_duration = self.samples_to_frames_ratio / self.fs
else:
assert self.time_stamps == "auto"
samples_to_frames_ratio = speech_len / lpz_len
index_duration = samples_to_frames_ratio / self.fs
timing_cfg["index_duration"] = index_duration
return timing_cfg
def estimate_samples_to_frames_ratio(self, speech_len=215040):
"""Determine the ratio of encoded frames to sample points.
This method helps to determine the time a single encoded frame occupies.
As the sample rate already gave the number of samples, only the ratio
of samples per encoded CTC frame are needed. This function estimates them by
doing one inference, which is only needed once.
Args
----
speech_len : int
Length of randomly generated speech vector for single
inference. Default: 215040.
Returns
-------
int
Estimated ratio.
"""
random_input = torch.rand(speech_len)
lpz = self.get_lpz(random_input)
lpz_len = lpz.shape[0]
# CAVEAT assumption: Frontend does not discard trailing data!
samples_to_frames_ratio = speech_len / lpz_len
return samples_to_frames_ratio
@torch.no_grad()
def get_lpz(self, speech: Union[torch.Tensor, np.ndarray]):
"""Obtain CTC posterior log probabilities for given speech data.
Args
----
speech : Union[torch.Tensor, np.ndarray]
Speech audio input.
Returns
-------
np.ndarray
Numpy vector with CTC log posterior probabilities.
"""
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# Batch data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(self.asr_model.device)
wav_lens = torch.tensor([1.0]).to(self.asr_model.device)
enc = self._encode(speech, wav_lens)
# Apply ctc layer to obtain log character probabilities
lpz = self._ctc(enc).detach()
# Shape should be ( <time steps>, <classes> )
lpz = lpz.squeeze(0).cpu().numpy()
return lpz
def _split_text(self, text):
"""Convert text to list and extract utterance IDs."""
utt_ids = None
# Handle multiline strings
if isinstance(text, str):
text = text.splitlines()
# Remove empty lines
text = list(filter(len, text))
# Handle kaldi-style text format
if self.kaldi_style_text:
utt_ids_and_text = [utt.split(" ", 1) for utt in text]
# remove utterances with empty text
utt_ids_and_text = filter(lambda ui: len(ui) == 2, utt_ids_and_text)
utt_ids_and_text = list(utt_ids_and_text)
utt_ids = [utt[0] for utt in utt_ids_and_text]
text = [utt[1] for utt in utt_ids_and_text]
return utt_ids, text
def prepare_segmentation_task(self, text, lpz, name=None, speech_len=None):
"""Preprocess text, and gather text and lpz into a task object.
Text is pre-processed and tokenized depending on configuration.
If ``speech_len`` is given, the timing configuration is updated.
Text, lpz, and configuration is collected in a CTCSegmentationTask
object. The resulting object can be serialized and passed in a
multiprocessing computation.
It is recommended that you normalize the text beforehand, e.g.,
change numbers into their spoken equivalent word, remove special
characters, and convert UTF-8 characters to chars corresponding to
your ASR model dictionary.
The text is tokenized based on the ``text_converter`` setting:
The "tokenize" method is more efficient and the easiest for models
based on latin or cyrillic script that only contain the main chars,
["a", "b", ...] or for Japanese or Chinese ASR models with ~3000
short Kanji / Hanzi tokens.
The "classic" method improves the the accuracy of the alignments
for models that contain longer tokens, but with a greater complexity
for computation. The function scans for partial tokens which may
improve time resolution.
For example, the word "▁really" will be broken down into
``['▁', '▁r', '▁re', '▁real', '▁really']``. The alignment will be
based on the most probable activation sequence given by the network.
Args
----
text : list
List or multiline-string with utterance ground truths.
lpz : np.ndarray
Log CTC posterior probabilities obtained from the CTC-network;
numpy array shaped as ( <time steps>, <classes> ).
name : str
Audio file name that will be included in the segments output.
Choose a unique name, or the original audio
file name, to distinguish multiple audio files. Default: None.
speech_len : int
Number of sample points. If given, the timing
configuration is automatically derived from length of fs, length
of speech and length of lpz. If None is given, make sure the
timing parameters are correct, see time_stamps for reference!
Default: None.
Returns
-------
CTCSegmentationTask
Task object that can be passed to
``CTCSegmentation.get_segments()`` in order to obtain alignments.
"""
config = self.config
# Update timing parameters, if needed
if speech_len is not None:
lpz_len = lpz.shape[0]
timing_cfg = self.get_timing_config(speech_len, lpz_len)
config.set(**timing_cfg)
# `text` is needed in the form of a list.
utt_ids, text = self._split_text(text)
# Obtain utterance & label sequence from text
if self.text_converter == "tokenize":
# list of str --tokenize--> list of np.array
token_list = [
np.array(self._tokenizer.encode_as_ids(utt)) for utt in text
]
# filter out any instances of the <unk> token
unk = config.char_list.index("<unk>")
token_list = [utt[utt != unk] for utt in token_list]
ground_truth_mat, utt_begin_indices = prepare_token_list(
config, token_list
)
else:
assert self.text_converter == "classic"
text_pieces = [
"".join(self._tokenizer.encode_as_pieces(utt)) for utt in text
]
# filter out any instances of the <unk> token
text_pieces = [utt.replace("<unk>", "") for utt in text_pieces]
ground_truth_mat, utt_begin_indices = prepare_text(
config, text_pieces
)
task = CTCSegmentationTask(
config=config,
name=name,
text=text,
ground_truth_mat=ground_truth_mat,
utt_begin_indices=utt_begin_indices,
utt_ids=utt_ids,
lpz=lpz,
)
return task
@staticmethod
def get_segments(task: CTCSegmentationTask):
"""Obtain segments for given utterance texts and CTC log posteriors.
Args
----
task : CTCSegmentationTask
Task object that contains ground truth and
CTC posterior probabilities.
Returns
-------
dict
Dictionary with alignments. Combine this with the task
object to obtain a human-readable segments representation.
"""
assert type(task) == CTCSegmentationTask
assert task.config is not None
config = task.config
lpz = task.lpz
ground_truth_mat = task.ground_truth_mat
utt_begin_indices = task.utt_begin_indices
text = task.text
# Align using CTC segmentation
timings, char_probs, state_list = ctc_segmentation(
config, lpz, ground_truth_mat
)
# Obtain list of utterances with time intervals and confidence score
segments = determine_utterance_segments(
config, utt_begin_indices, char_probs, timings, text
)
# Store results
result = {
"name": task.name,
"timings": timings,
"char_probs": char_probs,
"state_list": state_list,
"segments": segments,
"done": True,
}
return result
def __call__(
self,
speech: Union[torch.Tensor, np.ndarray, str, Path],
text: Union[List[str], str],
name: Optional[str] = None,
) -> CTCSegmentationTask:
"""Align utterances.
Args
----
speech : Union[torch.Tensor, np.ndarray, str, Path]
Audio file that can be given as path or as array.
text : Union[List[str], str]
List or multiline-string with utterance ground truths.
The required formatting depends on the setting ``kaldi_style_text``.
name : str
Name of the file. Utterance names are derived from it.
Returns
-------
CTCSegmentationTask
Task object with segments. Apply str(·) or print(·) on it
to obtain the segments list.
"""
if isinstance(speech, str) or isinstance(speech, Path):
speech = self.asr_model.load_audio(speech)
# Get log CTC posterior probabilities
lpz = self.get_lpz(speech)
# Conflate text & lpz & config as a segmentation task object
task = self.prepare_segmentation_task(text, lpz, name, speech.shape[0])
# Apply CTC segmentation
segments = self.get_segments(task)
task.set(**segments)
return task
| 26,318 | 38.577444 | 84 | py |
speechbrain | speechbrain-main/speechbrain/alignment/aligner.py | """
Alignment code
Authors
* Elena Rastorgueva 2020
* Loren Lugosch 2020
"""
import torch
import random
from speechbrain.utils.checkpoints import register_checkpoint_hooks
from speechbrain.utils.checkpoints import mark_as_saver
from speechbrain.utils.checkpoints import mark_as_loader
from speechbrain.utils.data_utils import undo_padding
@register_checkpoint_hooks
class HMMAligner(torch.nn.Module):
"""This class calculates Viterbi alignments in the forward method.
It also records alignments and creates batches of them for use
in Viterbi training.
Arguments
---------
states_per_phoneme : int
Number of hidden states to use per phoneme.
output_folder : str
It is the folder that the alignments will be stored in when
saved to disk. Not yet implemented.
neg_inf : float
The float used to represent a negative infinite log probability.
Using `-float("Inf")` tends to give numerical instability.
A number more negative than -1e5 also sometimes gave errors when
the `genbmm` library was used (currently not in use). (default: -1e5)
batch_reduction : string
One of "none", "sum" or "mean".
What kind of batch-level reduction to apply to the loss calculated
in the forward method.
input_len_norm : bool
Whether to normalize the loss in the forward method by the length of
the inputs.
target_len_norm : bool
Whether to normalize the loss in the forward method by the length of
the targets.
lexicon_path : string
The location of the lexicon.
Example
-------
>>> log_posteriors = torch.tensor([[[ -1., -10., -10.],
... [-10., -1., -10.],
... [-10., -10., -1.]],
...
... [[ -1., -10., -10.],
... [-10., -1., -10.],
... [-10., -10., -10.]]])
>>> lens = torch.tensor([1., 0.66])
>>> phns = torch.tensor([[0, 1, 2],
... [0, 1, 0]])
>>> phn_lens = torch.tensor([1., 0.66])
>>> aligner = HMMAligner()
>>> forward_scores = aligner(
... log_posteriors, lens, phns, phn_lens, 'forward'
... )
>>> forward_scores.shape
torch.Size([2])
>>> viterbi_scores, alignments = aligner(
... log_posteriors, lens, phns, phn_lens, 'viterbi'
... )
>>> alignments
[[0, 1, 2], [0, 1]]
>>> viterbi_scores.shape
torch.Size([2])
"""
def __init__(
self,
states_per_phoneme=1,
output_folder="",
neg_inf=-1e5,
batch_reduction="none",
input_len_norm=False,
target_len_norm=False,
lexicon_path=None,
):
super().__init__()
self.states_per_phoneme = states_per_phoneme
self.output_folder = output_folder
self.neg_inf = neg_inf
self.batch_reduction = batch_reduction
self.input_len_norm = input_len_norm
self.target_len_norm = target_len_norm
self.align_dict = {}
self.lexicon_path = lexicon_path
if self.lexicon_path is not None:
with open(self.lexicon_path, "r") as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line[0] != ";":
start_index = i
break
lexicon = {} # {"read": {0: "r eh d", 1: "r iy d"}}
lexicon_phones = set()
for i in range(start_index, len(lines)):
line = lines[i]
word = line.split()[0]
phones = line.split("/")[1]
phones = "".join([p for p in phones if not p.isdigit()])
for p in phones.split(" "):
lexicon_phones.add(p)
if "~" in word:
word = word.split("~")[0]
if word in lexicon:
number_of_existing_pronunciations = len(lexicon[word])
lexicon[word][number_of_existing_pronunciations] = phones
else:
lexicon[word] = {0: phones}
self.lexicon = lexicon
lexicon_phones = list(lexicon_phones)
lexicon_phones.sort()
self.lex_lab2ind = {p: i + 1 for i, p in enumerate(lexicon_phones)}
self.lex_ind2lab = {i + 1: p for i, p in enumerate(lexicon_phones)}
# add sil, which is not in the lexicon
self.lex_lab2ind["sil"] = 0
self.lex_ind2lab[0] = "sil"
def _use_lexicon(self, words, interword_sils, sample_pron):
"""Do processing using the lexicon to return a sequence of the possible
phonemes, the transition/pi probabilities, and the possible final states.
Inputs correspond to a single utterance, not a whole batch.
Arguments
---------
words : list
List of the words in the transcript.
interword_sils : bool
If True, optional silences will be inserted between every word.
If False, optional silences will only be placed at the beginning
and end of each utterance.
sample_pron : bool
If True, it will sample a single possible sequence of phonemes.
If False, it will return statistics for all possible sequences of
phonemes.
Returns
-------
poss_phns : torch.Tensor (phoneme)
The phonemes that are thought to be in each utterance.
log_transition_matrix : torch.Tensor (batch, from, to)
Tensor containing transition (log) probabilities.
start_states : list of ints
A list of the possible starting states in each utterance.
final_states : list of ints
A list of the possible final states for each utterance.
"""
number_of_states = 0
words_prime = (
[]
) # This will contain one "word" for each optional silence and pronunciation.
# structure of each "word_prime":
# [word index, [[state sequence 1], [state sequence 2]], <is this an optional silence?>]
word_index = 0
phoneme_indices = []
for word in words:
if word_index == 0 or interword_sils is True:
# optional silence
word_prime = [
word_index,
[
[
number_of_states + i
for i in range(self.states_per_phoneme)
]
],
True,
]
words_prime.append(word_prime)
phoneme_indices += [
self.silence_index * self.states_per_phoneme + i
for i in range(self.states_per_phoneme)
]
number_of_states += self.states_per_phoneme
word_index += 1
# word
word_prime = [word_index, [], False]
if sample_pron and len(self.lexicon[word]) > 1:
random.shuffle(self.lexicon[word])
for pron_idx in range(len(self.lexicon[word])):
pronunciation = self.lexicon[word][pron_idx]
phonemes = pronunciation.split()
word_prime[1].append([])
for p in phonemes:
phoneme_indices += [
self.lex_lab2ind[p] * self.states_per_phoneme + i
for i in range(self.states_per_phoneme)
]
word_prime[1][pron_idx] += [
number_of_states + i
for i in range(self.states_per_phoneme)
]
number_of_states += self.states_per_phoneme
if sample_pron:
break
words_prime.append(word_prime)
word_index += 1
# optional final silence
word_prime = [
word_index,
[[number_of_states + i for i in range(self.states_per_phoneme)]],
True,
]
words_prime.append(word_prime)
phoneme_indices += [
self.silence_index * self.states_per_phoneme + i
for i in range(self.states_per_phoneme)
]
number_of_states += self.states_per_phoneme
word_index += 1
transition_matrix = 1.0 * torch.eye(
number_of_states
) # diagonal = all states have a self-loop
final_states = []
for word_prime in words_prime:
word_idx = word_prime[0]
is_optional_silence = word_prime[-1]
next_word_exists = word_idx < len(words_prime) - 2
this_word_last_states = [
word_prime[1][i][-1] for i in range(len(word_prime[1]))
]
# create transitions to next state from previous state within each pronunciation
for pronunciation in word_prime[1]:
for state_idx in range(len(pronunciation) - 1):
state = pronunciation[state_idx]
next_state = pronunciation[state_idx + 1]
transition_matrix[state, next_state] = 1.0
# create transitions to next word's starting states
if next_word_exists:
if is_optional_silence or not interword_sils:
next_word_idx = word_idx + 1
else:
next_word_idx = word_idx + 2
next_word_starting_states = [
words_prime[next_word_idx][1][i][0]
for i in range(len(words_prime[next_word_idx][1]))
]
for this_word_last_state in this_word_last_states:
for next_word_starting_state in next_word_starting_states:
transition_matrix[
this_word_last_state, next_word_starting_state
] = 1.0
else:
final_states += this_word_last_states
if not is_optional_silence:
next_silence_idx = word_idx + 1
next_silence_starting_state = words_prime[next_silence_idx][1][
0
][0]
for this_word_last_state in this_word_last_states:
transition_matrix[
this_word_last_state, next_silence_starting_state
] = 1.0
log_transition_matrix = transition_matrix.log().log_softmax(1)
start_states = [words_prime[0][1][0][0]]
start_states += [
words_prime[1][1][i][0] for i in range(len(words_prime[1][1]))
]
poss_phns = torch.tensor(phoneme_indices)
return poss_phns, log_transition_matrix, start_states, final_states
def use_lexicon(self, words, interword_sils=True, sample_pron=False):
"""Do processing using the lexicon to return a sequence of the possible
phonemes, the transition/pi probabilities, and the possible final
states.
Does processing on an utterance-by-utterance basis. Each utterance
in the batch is processed by a helper method `_use_lexicon`.
Arguments
---------
words : list
List of the words in the transcript
interword_sils : bool
If True, optional silences will be inserted between every word.
If False, optional silences will only be placed at the beginning
and end of each utterance.
sample_pron: bool
If True, it will sample a single possible sequence of phonemes.
If False, it will return statistics for all possible sequences of
phonemes.
Returns
-------
poss_phns: torch.Tensor (batch, phoneme in possible phn sequence)
The phonemes that are thought to be in each utterance.
poss_phn_lens: torch.Tensor (batch)
The relative length of each possible phoneme sequence in the batch.
trans_prob: torch.Tensor (batch, from, to)
Tensor containing transition (log) probabilities.
pi_prob: torch.Tensor (batch, state)
Tensor containing initial (log) probabilities.
final_state: list of lists of ints
A list of lists of possible final states for each utterance.
Example
-------
>>> aligner = HMMAligner()
>>> aligner.lexicon = {
... "a": {0: "a"},
... "b": {0: "b", 1: "c"}
... }
>>> words = [["a", "b"]]
>>> aligner.lex_lab2ind = {
... "sil": 0,
... "a": 1,
... "b": 2,
... "c": 3,
... }
>>> poss_phns, poss_phn_lens, trans_prob, pi_prob, final_states = aligner.use_lexicon(
... words,
... interword_sils = True
... )
>>> poss_phns
tensor([[0, 1, 0, 2, 3, 0]])
>>> poss_phn_lens
tensor([1.])
>>> trans_prob
tensor([[[-6.9315e-01, -6.9315e-01, -1.0000e+05, -1.0000e+05, -1.0000e+05,
-1.0000e+05],
[-1.0000e+05, -1.3863e+00, -1.3863e+00, -1.3863e+00, -1.3863e+00,
-1.0000e+05],
[-1.0000e+05, -1.0000e+05, -1.0986e+00, -1.0986e+00, -1.0986e+00,
-1.0000e+05],
[-1.0000e+05, -1.0000e+05, -1.0000e+05, -6.9315e-01, -1.0000e+05,
-6.9315e-01],
[-1.0000e+05, -1.0000e+05, -1.0000e+05, -1.0000e+05, -6.9315e-01,
-6.9315e-01],
[-1.0000e+05, -1.0000e+05, -1.0000e+05, -1.0000e+05, -1.0000e+05,
0.0000e+00]]])
>>> pi_prob
tensor([[-6.9315e-01, -6.9315e-01, -1.0000e+05, -1.0000e+05, -1.0000e+05,
-1.0000e+05]])
>>> final_states
[[3, 4, 5]]
>>> # With no optional silences between words
>>> poss_phns_, _, trans_prob_, pi_prob_, final_states_ = aligner.use_lexicon(
... words,
... interword_sils = False
... )
>>> poss_phns_
tensor([[0, 1, 2, 3, 0]])
>>> trans_prob_
tensor([[[-6.9315e-01, -6.9315e-01, -1.0000e+05, -1.0000e+05, -1.0000e+05],
[-1.0000e+05, -1.0986e+00, -1.0986e+00, -1.0986e+00, -1.0000e+05],
[-1.0000e+05, -1.0000e+05, -6.9315e-01, -1.0000e+05, -6.9315e-01],
[-1.0000e+05, -1.0000e+05, -1.0000e+05, -6.9315e-01, -6.9315e-01],
[-1.0000e+05, -1.0000e+05, -1.0000e+05, -1.0000e+05, 0.0000e+00]]])
>>> pi_prob_
tensor([[-6.9315e-01, -6.9315e-01, -1.0000e+05, -1.0000e+05, -1.0000e+05]])
>>> final_states_
[[2, 3, 4]]
>>> # With sampling of a single possible pronunciation
>>> import random
>>> random.seed(0)
>>> poss_phns_, _, trans_prob_, pi_prob_, final_states_ = aligner.use_lexicon(
... words,
... sample_pron = True
... )
>>> poss_phns_
tensor([[0, 1, 0, 2, 0]])
>>> trans_prob_
tensor([[[-6.9315e-01, -6.9315e-01, -1.0000e+05, -1.0000e+05, -1.0000e+05],
[-1.0000e+05, -1.0986e+00, -1.0986e+00, -1.0986e+00, -1.0000e+05],
[-1.0000e+05, -1.0000e+05, -6.9315e-01, -6.9315e-01, -1.0000e+05],
[-1.0000e+05, -1.0000e+05, -1.0000e+05, -6.9315e-01, -6.9315e-01],
[-1.0000e+05, -1.0000e+05, -1.0000e+05, -1.0000e+05, 0.0000e+00]]])
"""
self.silence_index = self.lex_lab2ind["sil"]
poss_phns = []
trans_prob = []
start_states = []
final_states = []
for words_ in words:
(
poss_phns_,
trans_prob_,
start_states_,
final_states_,
) = self._use_lexicon(words_, interword_sils, sample_pron)
poss_phns.append(poss_phns_)
trans_prob.append(trans_prob_)
start_states.append(start_states_)
final_states.append(final_states_)
# pad poss_phns, trans_prob with 0 to have same length
poss_phn_lens = [len(poss_phns_) for poss_phns_ in poss_phns]
U_max = max(poss_phn_lens)
batch_size = len(poss_phns)
for index in range(batch_size):
phn_pad_length = U_max - len(poss_phns[index])
poss_phns[index] = torch.nn.functional.pad(
poss_phns[index], (0, phn_pad_length), value=0
)
trans_prob[index] = torch.nn.functional.pad(
trans_prob[index],
(0, phn_pad_length, 0, phn_pad_length),
value=self.neg_inf,
)
# Stack into single tensor
poss_phns = torch.stack(poss_phns)
trans_prob = torch.stack(trans_prob)
trans_prob[trans_prob == -float("Inf")] = self.neg_inf
# make pi prob
pi_prob = self.neg_inf * torch.ones([batch_size, U_max])
for start_state in start_states:
pi_prob[:, start_state] = 1
pi_prob = torch.nn.functional.log_softmax(pi_prob, dim=1)
# Convert poss_phn_lens from absolute to relative lengths
poss_phn_lens = torch.tensor(poss_phn_lens).float() / U_max
return poss_phns, poss_phn_lens, trans_prob, pi_prob, final_states
def _make_pi_prob(self, phn_lens_abs):
"""Creates tensor of initial (log) probabilities (known as 'pi').
Assigns all probability mass to the first phoneme in the sequence.
Arguments
---------
phn_lens_abs : torch.Tensor (batch)
The absolute length of each phoneme sequence in the batch.
Returns
-------
pi_prob : torch.Tensor (batch, phn)
"""
batch_size = len(phn_lens_abs)
U_max = int(phn_lens_abs.max())
pi_prob = self.neg_inf * torch.ones([batch_size, U_max])
pi_prob[:, 0] = 0
return pi_prob
def _make_trans_prob(self, phn_lens_abs):
"""Creates tensor of transition (log) probabilities.
Only allows transitions to the same phoneme (self-loop) or the next
phoneme in the phn sequence
Arguments
---------
phn_lens_abs : torch.Tensor (batch)
The absolute length of each phoneme sequence in the batch.
Returns
-------
trans_prob : torch.Tensor (batch, from, to)
"""
# Extract useful values for later
batch_size = len(phn_lens_abs)
U_max = int(phn_lens_abs.max())
device = phn_lens_abs.device
## trans_prob matrix consists of 2 diagonals:
## (1) offset diagonal (next state) &
## (2) main diagonal (self-loop)
# make offset diagonal
trans_prob_off_diag = torch.eye(U_max - 1)
zero_side = torch.zeros([U_max - 1, 1])
zero_bottom = torch.zeros([1, U_max])
trans_prob_off_diag = torch.cat((zero_side, trans_prob_off_diag), 1)
trans_prob_off_diag = torch.cat((trans_prob_off_diag, zero_bottom), 0)
# make main diagonal
trans_prob_main_diag = torch.eye(U_max)
# join the diagonals and repeat for whole batch
trans_prob = trans_prob_off_diag + trans_prob_main_diag
trans_prob = (
trans_prob.reshape(1, U_max, U_max)
.repeat(batch_size, 1, 1)
.to(device)
)
# clear probabilities for too-long sequences
mask_a = (
torch.arange(U_max, device=device)[None, :] < phn_lens_abs[:, None]
)
mask_a = mask_a.unsqueeze(2)
mask_a = mask_a.expand(-1, -1, U_max)
mask_b = mask_a.permute(0, 2, 1)
trans_prob = trans_prob * (mask_a & mask_b).float()
## put -infs in place of zeros:
trans_prob = torch.where(
trans_prob == 1,
trans_prob,
torch.tensor(-float("Inf"), device=device),
)
## normalize
trans_prob = torch.nn.functional.log_softmax(trans_prob, dim=2)
## set nans to v neg numbers
trans_prob[trans_prob != trans_prob] = self.neg_inf
## set -infs to v neg numbers
trans_prob[trans_prob == -float("Inf")] = self.neg_inf
return trans_prob
def _make_emiss_pred_useful(
self, emission_pred, lens_abs, phn_lens_abs, phns
):
"""Creates a 'useful' form of the posterior probabilities, rearranged
into the order of phoneme appearance in phns.
Arguments
---------
emission_pred : torch.Tensor (batch, time, phoneme in vocabulary)
posterior probabilities from our acoustic model
lens_abs : torch.Tensor (batch)
The absolute length of each input to the acoustic model,
i.e., the number of frames.
phn_lens_abs : torch.Tensor (batch)
The absolute length of each phoneme sequence in the batch.
phns : torch.Tensor (batch, phoneme in phn sequence)
The phonemes that are known/thought to be in each utterance.
Returns
-------
emiss_pred_useful : torch.Tensor
Tensor shape (batch, phoneme in phn sequence, time).
"""
# Extract useful values for later
U_max = int(phn_lens_abs.max().item())
fb_max_length = int(lens_abs.max().item())
device = emission_pred.device
# apply mask based on lens_abs
mask_lens = (
torch.arange(fb_max_length).to(device)[None, :] < lens_abs[:, None]
)
emiss_pred_acc_lens = torch.where(
mask_lens[:, :, None],
emission_pred,
torch.tensor([0.0], device=device),
)
# manipulate phn tensor, and then 'torch.gather'
phns = phns.to(device)
phns_copied = phns.unsqueeze(1).expand(-1, fb_max_length, -1)
emiss_pred_useful = torch.gather(emiss_pred_acc_lens, 2, phns_copied)
# apply mask based on phn_lens_abs
mask_phn_lens = (
torch.arange(U_max).to(device)[None, :] < phn_lens_abs[:, None]
)
emiss_pred_useful = torch.where(
mask_phn_lens[:, None, :],
emiss_pred_useful,
torch.tensor([self.neg_inf], device=device),
)
emiss_pred_useful = emiss_pred_useful.permute(0, 2, 1)
return emiss_pred_useful
def _dp_forward(
self,
pi_prob,
trans_prob,
emiss_pred_useful,
lens_abs,
phn_lens_abs,
phns,
):
"""Does forward dynamic programming algorithm.
Arguments
---------
pi_prob : torch.Tensor (batch, phn)
Tensor containing initial (log) probabilities.
trans_prob : torch.Tensor (batch, from, to)
Tensor containing transition (log) probabilities.
emiss_pred_useful : torch.Tensor (batch, phoneme in phn sequence, time)
A 'useful' form of the posterior probabilities, rearranged
into the order of phoneme appearance in phns.
lens_abs : torch.Tensor (batch)
The absolute length of each input to the acoustic model,
i.e., the number of frames.
phn_lens_abs : torch.Tensor (batch)
The absolute length of each phoneme sequence in the batch.
phns : torch.Tensor (batch, phoneme in phn sequence)
The phonemes that are known/thought to be in each utterance.
Returns
-------
sum_alpha_T : torch.Tensor (batch)
The (log) likelihood of each utterance in the batch.
"""
# useful values
batch_size = len(phn_lens_abs)
U_max = phn_lens_abs.max()
fb_max_length = lens_abs.max()
device = emiss_pred_useful.device
pi_prob = pi_prob.to(device)
trans_prob = trans_prob.to(device)
# initialise
alpha_matrix = self.neg_inf * torch.ones(
[batch_size, U_max, fb_max_length], device=device
)
alpha_matrix[:, :, 0] = pi_prob + emiss_pred_useful[:, :, 0]
for t in range(1, fb_max_length):
utt_lens_passed = lens_abs < t
if True in utt_lens_passed:
n_passed = utt_lens_passed.sum()
I_tensor = self.neg_inf * torch.ones(n_passed, U_max, U_max)
I_tensor[:, torch.arange(U_max), torch.arange(U_max)] = 0.0
I_tensor = I_tensor.to(device)
trans_prob[utt_lens_passed] = I_tensor
alpha_times_trans = batch_log_matvecmul(
trans_prob.permute(0, 2, 1), alpha_matrix[:, :, t - 1]
)
alpha_matrix[:, :, t] = (
alpha_times_trans + emiss_pred_useful[:, :, t]
)
sum_alpha_T = torch.logsumexp(
alpha_matrix[torch.arange(batch_size), :, -1], dim=1
)
return sum_alpha_T
def _dp_viterbi(
self,
pi_prob,
trans_prob,
emiss_pred_useful,
lens_abs,
phn_lens_abs,
phns,
final_states,
):
"""Calculates Viterbi alignment using dynamic programming.
Arguments
---------
pi_prob : torch.Tensor (batch, phn)
Tensor containing initial (log) probabilities.
trans_prob : torch.Tensor (batch, from, to)
Tensor containing transition (log) probabilities.
emiss_pred_useful : torch.Tensor (batch, phoneme in phn sequence, time)
A 'useful' form of the posterior probabilities, rearranged
into the order of phoneme appearance in phns.
lens_abs : torch.Tensor (batch)
The absolute length of each input to the acoustic model,
i.e., the number of frames.
phn_lens_abs : torch.Tensor (batch)
The absolute length of each phoneme sequence in the batch.
phns : torch.Tensor (batch, phoneme in phn sequence)
The phonemes that are known/thought to be in each utterance.
Returns
-------
z_stars : list of lists of int
Viterbi alignments for the files in the batch.
z_stars_loc : list of lists of int
The locations of the Viterbi alignments for the files in the batch.
e.g., for a batch with a single utterance with 5 phonemes,
`z_stars_loc` will look like:
[[0, 0, 0, 1, 1, 2, 3, 3, 3, 4, 4]].
viterbi_scores : torch.Tensor (batch)
The (log) likelihood of the Viterbi path for each utterance.
"""
# useful values
batch_size = len(phn_lens_abs)
U_max = phn_lens_abs.max()
fb_max_length = lens_abs.max()
device = emiss_pred_useful.device
pi_prob = pi_prob.to(device)
trans_prob = trans_prob.to(device)
v_matrix = self.neg_inf * torch.ones(
[batch_size, U_max, fb_max_length], device=device
)
backpointers = -99 * torch.ones(
[batch_size, U_max, fb_max_length], device=device
)
# initialise
v_matrix[:, :, 0] = pi_prob + emiss_pred_useful[:, :, 0]
for t in range(1, fb_max_length):
x, argmax = batch_log_maxvecmul(
trans_prob.permute(0, 2, 1), v_matrix[:, :, t - 1]
)
v_matrix[:, :, t] = x + emiss_pred_useful[:, :, t]
backpointers[:, :, t] = argmax.type(torch.FloatTensor)
z_stars = []
z_stars_loc = []
for utterance_in_batch in range(batch_size):
len_abs = lens_abs[utterance_in_batch]
if final_states is not None:
final_states_utter = final_states[utterance_in_batch]
# Pick most probable of the final states
viterbi_finals = v_matrix[
utterance_in_batch, final_states_utter, len_abs - 1
]
final_state_chosen = torch.argmax(viterbi_finals).item()
U = final_states_utter[final_state_chosen]
else:
U = phn_lens_abs[utterance_in_batch].long().item() - 1
z_star_i_loc = [U]
z_star_i = [phns[utterance_in_batch, z_star_i_loc[0]].item()]
for time_step in range(len_abs, 1, -1):
current_best_loc = z_star_i_loc[0]
earlier_best_loc = (
backpointers[
utterance_in_batch, current_best_loc, time_step - 1
]
.long()
.item()
)
earlier_z_star = phns[
utterance_in_batch, earlier_best_loc
].item()
z_star_i_loc.insert(0, earlier_best_loc)
z_star_i.insert(0, earlier_z_star)
z_stars.append(z_star_i)
z_stars_loc.append(z_star_i_loc)
# picking out viterbi_scores
viterbi_scores = v_matrix[
torch.arange(batch_size), phn_lens_abs - 1, lens_abs - 1
]
return z_stars, z_stars_loc, viterbi_scores
def _loss_reduction(self, loss, input_lens, target_lens):
"""Applies reduction to loss as specified during object initialization.
Arguments
---------
loss : torch.Tensor (batch)
The loss tensor to be reduced.
input_lens : torch.Tensor (batch)
The absolute durations of the inputs.
target_lens : torch.Tensor (batch)
The absolute durations of the targets.
Returns
-------
loss : torch.Tensor (batch, or scalar)
The loss with reduction applied if it is specified.
"""
if self.input_len_norm is True:
loss = torch.div(loss, input_lens)
if self.target_len_norm is True:
loss = torch.div(loss, target_lens)
if self.batch_reduction == "none":
pass
elif self.batch_reduction == "sum":
loss = loss.sum()
elif self.batch_reduction == "mean":
loss = loss.mean()
else:
raise ValueError(
"`batch_reduction` parameter must be one of 'none', 'sum' or 'mean'"
)
return loss
def forward(
self,
emission_pred,
lens,
phns,
phn_lens,
dp_algorithm,
prob_matrices=None,
):
"""Prepares relevant (log) probability tensors and does dynamic
programming: either the forward or the Viterbi algorithm. Applies
reduction as specified during object initialization.
Arguments
---------
emission_pred : torch.Tensor (batch, time, phoneme in vocabulary)
Posterior probabilities from our acoustic model.
lens : torch.Tensor (batch)
The relative duration of each utterance sound file.
phns : torch.Tensor (batch, phoneme in phn sequence)
The phonemes that are known/thought to be in each utterance
phn_lens : torch.Tensor (batch)
The relative length of each phoneme sequence in the batch.
dp_algorithm : string
Either "forward" or "viterbi".
prob_matrices : dict
(Optional) Must contain keys 'trans_prob', 'pi_prob' and 'final_states'.
Used to override the default forward and viterbi operations which
force traversal over all of the states in the `phns` sequence.
Returns
-------
tensor
(1) if dp_algorithm == "forward".
``forward_scores`` : torch.Tensor (batch, or scalar)
The (log) likelihood of each utterance in the batch, with reduction
applied if specified. (OR)
(2) if dp_algorithm == "viterbi".
``viterbi_scores`` : torch.Tensor (batch, or scalar)
The (log) likelihood of the Viterbi path for each utterance, with
reduction applied if specified.
``alignments`` : list of lists of int
Viterbi alignments for the files in the batch.
"""
lens_abs = torch.round(emission_pred.shape[1] * lens).long()
phn_lens_abs = torch.round(phns.shape[1] * phn_lens).long()
phns = phns.long()
if prob_matrices is None:
pi_prob = self._make_pi_prob(phn_lens_abs)
trans_prob = self._make_trans_prob(phn_lens_abs)
final_states = None
else:
if (
("pi_prob" in prob_matrices)
and ("trans_prob" in prob_matrices)
and ("final_states" in prob_matrices)
):
pi_prob = prob_matrices["pi_prob"]
trans_prob = prob_matrices["trans_prob"]
final_states = prob_matrices["final_states"]
else:
ValueError(
"""`prob_matrices` must contain the keys
`pi_prob`, `trans_prob` and `final_states`"""
)
emiss_pred_useful = self._make_emiss_pred_useful(
emission_pred, lens_abs, phn_lens_abs, phns
)
if dp_algorithm == "forward":
# do forward training
forward_scores = self._dp_forward(
pi_prob,
trans_prob,
emiss_pred_useful,
lens_abs,
phn_lens_abs,
phns,
)
forward_scores = self._loss_reduction(
forward_scores, lens_abs, phn_lens_abs
)
return forward_scores
elif dp_algorithm == "viterbi":
alignments, _, viterbi_scores = self._dp_viterbi(
pi_prob,
trans_prob,
emiss_pred_useful,
lens_abs,
phn_lens_abs,
phns,
final_states,
)
viterbi_scores = self._loss_reduction(
viterbi_scores, lens_abs, phn_lens_abs
)
return viterbi_scores, alignments
else:
raise ValueError(
"dp_algorithm input must be either 'forward' or 'viterbi'"
)
def expand_phns_by_states_per_phoneme(self, phns, phn_lens):
"""Expands each phoneme in the phn sequence by the number of hidden
states per phoneme defined in the HMM.
Arguments
---------
phns : torch.Tensor (batch, phoneme in phn sequence)
The phonemes that are known/thought to be in each utterance.
phn_lens : torch.Tensor (batch)
The relative length of each phoneme sequence in the batch.
Returns
-------
expanded_phns : torch.Tensor (batch, phoneme in expanded phn sequence)
Example
-------
>>> phns = torch.tensor([[0., 3., 5., 0.],
... [0., 2., 0., 0.]])
>>> phn_lens = torch.tensor([1., 0.75])
>>> aligner = HMMAligner(states_per_phoneme = 3)
>>> expanded_phns = aligner.expand_phns_by_states_per_phoneme(
... phns, phn_lens
... )
>>> expanded_phns
tensor([[ 0., 1., 2., 9., 10., 11., 15., 16., 17., 0., 1., 2.],
[ 0., 1., 2., 6., 7., 8., 0., 1., 2., 0., 0., 0.]])
"""
# Initialise expanded_phns
expanded_phns = torch.zeros(
phns.shape[0], phns.shape[1] * self.states_per_phoneme
)
expanded_phns = expanded_phns.to(phns.device)
phns = undo_padding(phns, phn_lens)
for i, phns_utt in enumerate(phns):
expanded_phns_utt = []
for phoneme_index in phns_utt:
expanded_phns_utt += [
self.states_per_phoneme * phoneme_index + i_
for i_ in range(self.states_per_phoneme)
]
expanded_phns[i, : len(expanded_phns_utt)] = torch.tensor(
expanded_phns_utt
)
return expanded_phns
def store_alignments(self, ids, alignments):
"""Records Viterbi alignments in `self.align_dict`.
Arguments
---------
ids : list of str
IDs of the files in the batch.
alignments : list of lists of int
Viterbi alignments for the files in the batch.
Without padding.
Example
-------
>>> aligner = HMMAligner()
>>> ids = ['id1', 'id2']
>>> alignments = [[0, 2, 4], [1, 2, 3, 4]]
>>> aligner.store_alignments(ids, alignments)
>>> aligner.align_dict.keys()
dict_keys(['id1', 'id2'])
>>> aligner.align_dict['id1']
tensor([0, 2, 4], dtype=torch.int16)
"""
for i, id in enumerate(ids):
alignment_i = alignments[i]
alignment_i = torch.tensor(alignment_i, dtype=torch.int16).cpu()
self.align_dict[id] = alignment_i
def _get_flat_start_batch(self, lens_abs, phn_lens_abs, phns):
"""Prepares flat start alignments (with zero padding) for every utterance
in the batch.
Every phoneme will have an equal duration, except for the final phoneme
potentially. E.g. if 104 frames and 10 phonemes, 9 phonemes will have
duration of 10 frames, and one phoneme will have a duration of 14 frames.
Arguments
---------
lens_abs : torch.Tensor (batch)
The absolute length of each input to the acoustic model,
i.e., the number of frames.
phn_lens_abs : torch.Tensor (batch)
The absolute length of each phoneme sequence in the batch.
phns : torch.Tensor (batch, phoneme in phn sequence)
The phonemes that are known/thought to be in each utterance.
Returns
-------
flat_start_batch : torch.Tensor (batch, time)
Flat start alignments for utterances in the batch, with zero padding.
"""
phns = phns.long()
batch_size = len(lens_abs)
fb_max_length = torch.max(lens_abs)
flat_start_batch = torch.zeros(
batch_size, fb_max_length, device=phns.device
).long()
for i in range(batch_size):
utter_phns = phns[i]
utter_phns = utter_phns[: phn_lens_abs[i]] # crop out zero padding
repeat_amt = int(lens_abs[i].item() / len(utter_phns))
# make sure repeat_amt is at least 1. (the code above
# may make repeat_amt==0 if self.states_per_phoneme is too large).
if repeat_amt == 0:
repeat_amt = 1
# repeat each phoneme in utter_phns by repeat_amt
utter_phns = utter_phns.repeat_interleave(repeat_amt)
# len(utter_phns) may be <, == or > lens_abs[i], so
# make sure len(utter_phns) == lens_abs[i]
utter_phns = utter_phns[: lens_abs[i]]
utter_phns = torch.nn.functional.pad(
utter_phns,
(0, int(lens_abs[i]) - len(utter_phns)),
value=utter_phns[-1], # pad out with final phoneme
)
flat_start_batch[i, : len(utter_phns)] = utter_phns
return flat_start_batch
def _get_viterbi_batch(self, ids, lens_abs):
"""Retrieves Viterbi alignments stored in `self.align_dict` and
creates a batch of them, with zero padding.
Arguments
---------
ids : list of str
IDs of the files in the batch.
lens_abs : torch.Tensor (batch)
The absolute length of each input to the acoustic model,
i.e., the number of frames.
Returns
-------
viterbi_batch : torch.Tensor (batch, time)
The previously-recorded Viterbi alignments for the utterances
in the batch.
"""
batch_size = len(lens_abs)
fb_max_length = torch.max(lens_abs)
viterbi_batch = torch.zeros(
batch_size, fb_max_length, device=lens_abs.device
).long()
for i in range(batch_size):
viterbi_preds = self.align_dict[ids[i]]
viterbi_preds = torch.nn.functional.pad(
viterbi_preds, (0, fb_max_length - len(viterbi_preds))
)
viterbi_batch[i] = viterbi_preds.long()
return viterbi_batch
def get_prev_alignments(self, ids, emission_pred, lens, phns, phn_lens):
"""Fetches previously recorded Viterbi alignments if they are available.
If not, fetches flat start alignments.
Currently, assumes that if a Viterbi alignment is not available for the
first utterance in the batch, it will not be available for the rest of
the utterances.
Arguments
---------
ids : list of str
IDs of the files in the batch.
emission_pred : torch.Tensor (batch, time, phoneme in vocabulary)
Posterior probabilities from our acoustic model. Used to infer the
duration of the longest utterance in the batch.
lens : torch.Tensor (batch)
The relative duration of each utterance sound file.
phns : torch.Tensor (batch, phoneme in phn sequence)
The phonemes that are known/thought to be in each utterance.
phn_lens : torch.Tensor (batch)
The relative length of each phoneme sequence in the batch.
Returns
-------
torch.Tensor (batch, time)
Zero-padded alignments.
Example
-------
>>> ids = ['id1', 'id2']
>>> emission_pred = torch.tensor([[[ -1., -10., -10.],
... [-10., -1., -10.],
... [-10., -10., -1.]],
...
... [[ -1., -10., -10.],
... [-10., -1., -10.],
... [-10., -10., -10.]]])
>>> lens = torch.tensor([1., 0.66])
>>> phns = torch.tensor([[0, 1, 2],
... [0, 1, 0]])
>>> phn_lens = torch.tensor([1., 0.66])
>>> aligner = HMMAligner()
>>> alignment_batch = aligner.get_prev_alignments(
... ids, emission_pred, lens, phns, phn_lens
... )
>>> alignment_batch
tensor([[0, 1, 2],
[0, 1, 0]])
"""
lens_abs = torch.round(emission_pred.shape[1] * lens).long()
phn_lens_abs = torch.round(phns.shape[1] * phn_lens).long()
if ids[0] in self.align_dict:
return self._get_viterbi_batch(ids, lens_abs)
else:
return self._get_flat_start_batch(lens_abs, phn_lens_abs, phns)
def _calc_accuracy_sent(self, alignments_, ends_, phns_):
"""Calculates the accuracy between predicted alignments and ground truth
alignments for a single sentence/utterance.
Arguments
---------
alignments_ : list of ints
The predicted alignments for the utterance.
ends_ : list of ints
A list of the sample indices where each ground truth phoneme
ends, according to the transcription.
phns_ : list of ints
The unpadded list of ground truth phonemes in the utterance.
Returns
-------
mean_acc : float
The mean percentage of times that the upsampled predicted alignment
matches the ground truth alignment.
"""
# Create array containing the true alignment at each sample
ends_ = [0] + [int(end) for end in ends_]
true_durations = [ends_[i] - ends_[i - 1] for i in range(1, len(ends_))]
true_alignments = []
for i in range(len(phns_)):
true_alignments += [phns_[i]] * (true_durations[i])
true_alignments = torch.tensor(true_alignments)
# Upsample the predicted alignment array
# and make sure length matches that of `true_alignment`
upsample_factor = int(
torch.round(torch.tensor(len(true_alignments) / len(alignments_)))
)
alignments_ = torch.tensor(alignments_)
alignments_upsampled = alignments_.repeat_interleave(upsample_factor)
alignments_upsampled = alignments_upsampled[: len(true_alignments)]
if len(true_alignments) > len(alignments_upsampled):
alignments_upsampled = torch.nn.functional.pad(
alignments_upsampled,
(0, len(true_alignments) - len(alignments_upsampled)),
)
# Measure sample-wise accuracy
accuracy = (
alignments_upsampled == true_alignments
).float().mean().item() * 100
return accuracy
def calc_accuracy(self, alignments, ends, phns, ind2labs=None):
"""Calculates mean accuracy between predicted alignments and ground truth
alignments. Ground truth alignments are derived from ground truth phns
and their ends in the audio sample.
Arguments
---------
alignments : list of lists of ints/floats
The predicted alignments for each utterance in the batch.
ends : list of lists of ints
A list of lists of sample indices where each ground truth phoneme
ends, according to the transcription.
Note: current implementation assumes that 'ends' mark the index
where the next phoneme begins.
phns : list of lists of ints/floats
The unpadded list of lists of ground truth phonemes in the batch.
ind2labs : tuple
(Optional)
Contains the original index-to-label dicts for the first and second
sequence of phonemes.
Returns
-------
mean_acc : float
The mean percentage of times that the upsampled predicted alignment
matches the ground truth alignment.
Example
-------
>>> aligner = HMMAligner()
>>> alignments = [[0., 0., 0., 1.]]
>>> phns = [[0., 1.]]
>>> ends = [[2, 4]]
>>> mean_acc = aligner.calc_accuracy(alignments, ends, phns)
>>> mean_acc.item()
75.0
"""
acc_hist = []
# Do conversion if states_per_phoneme > 1
if self.states_per_phoneme > 1:
alignments = [
[i // self.states_per_phoneme for i in utt]
for utt in alignments
]
# convert to common alphabet if need be
if ind2labs is not None:
alignments, phns = map_inds_to_intersect(alignments, phns, ind2labs)
for alignments_, ends_, phns_ in zip(alignments, ends, phns):
acc = self._calc_accuracy_sent(alignments_, ends_, phns_)
acc_hist.append(acc)
acc_hist = torch.tensor(acc_hist)
mean_acc = acc_hist.mean()
return mean_acc.unsqueeze(0)
def collapse_alignments(self, alignments):
"""
Converts alignments to 1 state per phoneme style.
Arguments
---------
alignments : list of ints
Predicted alignments for a single utterance.
Returns
-------
sequence : list of ints
The predicted alignments converted to a 1 state per phoneme style.
Example
-------
>>> aligner = HMMAligner(states_per_phoneme = 3)
>>> alignments = [0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2]
>>> sequence = aligner.collapse_alignments(alignments)
>>> sequence
[0, 1, 1, 0]
"""
# Filter the repetitions
sequence = [
v
for i, v in enumerate(alignments)
if i == 0 or v != alignments[i - 1]
]
# Pick out only multiples of self.states_per_phoneme
sequence = [v for v in sequence if v % self.states_per_phoneme == 0]
# Divide by self.states_per_phoneme
sequence = [v // self.states_per_phoneme for v in sequence]
return sequence
@mark_as_saver
def _save(self, path):
torch.save(self.align_dict, path)
@mark_as_loader
def _load(self, path, end_of_epoch=False, device=None):
del end_of_epoch # Not used here.
del device
self.align_dict = torch.load(path)
def map_inds_to_intersect(lists1, lists2, ind2labs):
"""Converts 2 lists containing indices for phonemes from different
phoneme sets to a single phoneme so that comparing the equality
of the indices of the resulting lists will yield the correct
accuracy.
Arguments
---------
lists1 : list of lists of ints
Contains the indices of the first sequence of phonemes.
lists2 : list of lists of ints
Contains the indices of the second sequence of phonemes.
ind2labs : tuple (dict, dict)
Contains the original index-to-label dicts for the first and second
sequence of phonemes.
Returns
-------
lists1_new : list of lists of ints
Contains the indices of the first sequence of phonemes, mapped
to the new phoneme set.
lists2_new : list of lists of ints
Contains the indices of the second sequence of phonemes, mapped
to the new phoneme set.
Example
-------
>>> lists1 = [[0, 1]]
>>> lists2 = [[0, 1]]
>>> ind2lab1 = {
... 0: "a",
... 1: "b",
... }
>>> ind2lab2 = {
... 0: "a",
... 1: "c",
... }
>>> ind2labs = (ind2lab1, ind2lab2)
>>> out1, out2 = map_inds_to_intersect(lists1, lists2, ind2labs)
>>> out1
[[0, 1]]
>>> out2
[[0, 2]]
"""
ind2lab1, ind2lab2 = ind2labs
# Form 3 sets:
# (1) labs in both mappings
# (2) labs in only 1st mapping
# (3) labs in only 2nd mapping
set1, set2 = set(ind2lab1.values()), set(ind2lab2.values())
intersect = set1.intersection(set2)
set1_only = set1.difference(set2)
set2_only = set2.difference(set1)
new_lab2ind = {lab: i for i, lab in enumerate(intersect)}
new_lab2ind.update(
{lab: len(new_lab2ind) + i for i, lab in enumerate(set1_only)}
)
new_lab2ind.update(
{lab: len(new_lab2ind) + i for i, lab in enumerate(set2_only)}
)
# Map lists to labels and apply new_lab2ind
lists1_lab = [[ind2lab1[ind] for ind in utt] for utt in lists1]
lists2_lab = [[ind2lab2[ind] for ind in utt] for utt in lists2]
lists1_new = [[new_lab2ind[lab] for lab in utt] for utt in lists1_lab]
lists2_new = [[new_lab2ind[lab] for lab in utt] for utt in lists2_lab]
return lists1_new, lists2_new
def batch_log_matvecmul(A, b):
"""For each 'matrix' and 'vector' pair in the batch, do matrix-vector
multiplication in the log domain, i.e., logsumexp instead of add,
add instead of multiply.
Arguments
---------
A : torch.Tensor (batch, dim1, dim2)
Tensor
b : torch.Tensor (batch, dim1)
Tensor.
Outputs
-------
x : torch.Tensor (batch, dim1)
Example
-------
>>> A = torch.tensor([[[ 0., 0.],
... [ -1e5, 0.]]])
>>> b = torch.tensor([[0., 0.,]])
>>> x = batch_log_matvecmul(A, b)
>>> x
tensor([[0.6931, 0.0000]])
>>>
>>> # non-log domain equivalent without batching functionality
>>> A_ = torch.tensor([[1., 1.],
... [0., 1.]])
>>> b_ = torch.tensor([1., 1.,])
>>> x_ = torch.matmul(A_, b_)
>>> x_
tensor([2., 1.])
"""
b = b.unsqueeze(1)
x = torch.logsumexp(A + b, dim=2)
return x
def batch_log_maxvecmul(A, b):
"""Similar to batch_log_matvecmul, but takes a maximum instead of
logsumexp. Returns both the max and the argmax.
Arguments
---------
A : torch.Tensor (batch, dim1, dim2)
Tensor.
b : torch.Tensor (batch, dim1)
Tensor
Outputs
-------
x : torch.Tensor (batch, dim1)
Tensor.
argmax : torch.Tensor (batch, dim1)
Tensor.
Example
-------
>>> A = torch.tensor([[[ 0., -1.],
... [ -1e5, 0.]]])
>>> b = torch.tensor([[0., 0.,]])
>>> x, argmax = batch_log_maxvecmul(A, b)
>>> x
tensor([[0., 0.]])
>>> argmax
tensor([[0, 1]])
"""
b = b.unsqueeze(1)
x, argmax = torch.max(A + b, dim=2)
return x, argmax
| 52,837 | 34.944218 | 96 | py |
speechbrain | speechbrain-main/speechbrain/utils/edit_distance.py | """Edit distance and WER computation.
Authors
* Aku Rouhe 2020
* Salima Mdhaffar 2021
"""
import collections
EDIT_SYMBOLS = {
"eq": "=", # when tokens are equal
"ins": "I",
"del": "D",
"sub": "S",
}
# NOTE: There is a danger in using mutables as default arguments, as they are
# only initialized once, and not every time the function is run. However,
# here the default is not actually ever mutated,
# and simply serves as an empty Counter.
def accumulatable_wer_stats(refs, hyps, stats=collections.Counter()):
"""Computes word error rate and the related counts for a batch.
Can also be used to accumulate the counts over many batches, by passing
the output back to the function in the call for the next batch.
Arguments
----------
ref : iterable
Batch of reference sequences.
hyp : iterable
Batch of hypothesis sequences.
stats : collections.Counter
The running statistics.
Pass the output of this function back as this parameter
to accumulate the counts. It may be cleanest to initialize
the stats yourself; then an empty collections.Counter() should
be used.
Returns
-------
collections.Counter
The updated running statistics, with keys:
* "WER" - word error rate
* "insertions" - number of insertions
* "deletions" - number of deletions
* "substitutions" - number of substitutions
* "num_ref_tokens" - number of reference tokens
Example
-------
>>> import collections
>>> batches = [[[[1,2,3],[4,5,6]], [[1,2,4],[5,6]]],
... [[[7,8], [9]], [[7,8], [10]]]]
>>> stats = collections.Counter()
>>> for batch in batches:
... refs, hyps = batch
... stats = accumulatable_wer_stats(refs, hyps, stats)
>>> print("%WER {WER:.2f}, {num_ref_tokens} ref tokens".format(**stats))
%WER 33.33, 9 ref tokens
"""
updated_stats = stats + _batch_stats(refs, hyps)
if updated_stats["num_ref_tokens"] == 0:
updated_stats["WER"] = float("nan")
else:
num_edits = sum(
[
updated_stats["insertions"],
updated_stats["deletions"],
updated_stats["substitutions"],
]
)
updated_stats["WER"] = (
100.0 * num_edits / updated_stats["num_ref_tokens"]
)
return updated_stats
def _batch_stats(refs, hyps):
"""Internal function which actually computes the counts.
Used by accumulatable_wer_stats
Arguments
----------
ref : iterable
Batch of reference sequences.
hyp : iterable
Batch of hypothesis sequences.
Returns
-------
collections.Counter
Edit statistics over the batch, with keys:
* "insertions" - number of insertions
* "deletions" - number of deletions
* "substitutions" - number of substitutions
* "num_ref_tokens" - number of reference tokens
Example
-------
>>> from speechbrain.utils.edit_distance import _batch_stats
>>> batch = [[[1,2,3],[4,5,6]], [[1,2,4],[5,6]]]
>>> refs, hyps = batch
>>> print(_batch_stats(refs, hyps))
Counter({'num_ref_tokens': 6, 'substitutions': 1, 'deletions': 1})
"""
if len(refs) != len(hyps):
raise ValueError(
"The reference and hypothesis batches are not of the same size"
)
stats = collections.Counter()
for ref_tokens, hyp_tokens in zip(refs, hyps):
table = op_table(ref_tokens, hyp_tokens)
edits = count_ops(table)
stats += edits
stats["num_ref_tokens"] += len(ref_tokens)
return stats
def op_table(a, b):
"""Table of edit operations between a and b.
Solves for the table of edit operations, which is mainly used to
compute word error rate. The table is of size ``[|a|+1, |b|+1]``,
and each point ``(i, j)`` in the table has an edit operation. The
edit operations can be deterministically followed backwards to
find the shortest edit path to from ``a[:i-1] to b[:j-1]``. Indexes
of zero (``i=0`` or ``j=0``) correspond to an empty sequence.
The algorithm itself is well known, see
`Levenshtein distance <https://en.wikipedia.org/wiki/Levenshtein_distance>`_
Note that in some cases there are multiple valid edit operation
paths which lead to the same edit distance minimum.
Arguments
---------
a : iterable
Sequence for which the edit operations are solved.
b : iterable
Sequence for which the edit operations are solved.
Returns
-------
list
List of lists, Matrix, Table of edit operations.
Example
-------
>>> ref = [1,2,3]
>>> hyp = [1,2,4]
>>> for row in op_table(ref, hyp):
... print(row)
['=', 'I', 'I', 'I']
['D', '=', 'I', 'I']
['D', 'D', '=', 'I']
['D', 'D', 'D', 'S']
"""
# For the dynamic programming algorithm, only two rows are really needed:
# the one currently being filled in, and the previous one
# The following is also the right initialization
prev_row = [j for j in range(len(b) + 1)]
curr_row = [0] * (len(b) + 1) # Just init to zero
# For the edit operation table we will need the whole matrix.
# We will initialize the table with no-ops, so that we only need to change
# where an edit is made.
table = [
[EDIT_SYMBOLS["eq"] for j in range(len(b) + 1)]
for i in range(len(a) + 1)
]
# We already know the operations on the first row and column:
for i in range(len(a) + 1):
table[i][0] = EDIT_SYMBOLS["del"]
for j in range(len(b) + 1):
table[0][j] = EDIT_SYMBOLS["ins"]
table[0][0] = EDIT_SYMBOLS["eq"]
# The rest of the table is filled in row-wise:
for i, a_token in enumerate(a, start=1):
curr_row[0] += 1 # This trick just deals with the first column.
for j, b_token in enumerate(b, start=1):
# The dynamic programming algorithm cost rules
insertion_cost = curr_row[j - 1] + 1
deletion_cost = prev_row[j] + 1
substitution = 0 if a_token == b_token else 1
substitution_cost = prev_row[j - 1] + substitution
# Here copying the Kaldi compute-wer comparison order, which in
# ties prefers:
# insertion > deletion > substitution
if (
substitution_cost < insertion_cost
and substitution_cost < deletion_cost
):
curr_row[j] = substitution_cost
# Again, note that if not substitution, the edit table already
# has the correct no-op symbol.
if substitution:
table[i][j] = EDIT_SYMBOLS["sub"]
elif deletion_cost < insertion_cost:
curr_row[j] = deletion_cost
table[i][j] = EDIT_SYMBOLS["del"]
else:
curr_row[j] = insertion_cost
table[i][j] = EDIT_SYMBOLS["ins"]
# Move to the next row:
prev_row[:] = curr_row[:]
return table
def alignment(table):
"""Get the edit distance alignment from an edit op table.
Walks back an edit operations table, produced by calling ``table(a, b)``,
and collects the edit distance alignment of a to b. The alignment
shows which token in a corresponds to which token in b. Note that the
alignment is monotonic, one-to-zero-or-one.
Arguments
----------
table : list
Edit operations table from ``op_table(a, b)``.
Returns
-------
list
Schema: ``[(str <edit-op>, int-or-None <i>, int-or-None <j>),]``
List of edit operations, and the corresponding indices to a and b.
See the EDIT_SYMBOLS dict for the edit-ops.
The i indexes a, j indexes b, and the indices can be None, which means
aligning to nothing.
Example
-------
>>> # table for a=[1,2,3], b=[1,2,4]:
>>> table = [['I', 'I', 'I', 'I'],
... ['D', '=', 'I', 'I'],
... ['D', 'D', '=', 'I'],
... ['D', 'D', 'D', 'S']]
>>> print(alignment(table))
[('=', 0, 0), ('=', 1, 1), ('S', 2, 2)]
"""
# The alignment will be the size of the longer sequence.
# form: [(op, a_index, b_index)], index is None when aligned to empty
alignment = []
# Now we'll walk back the table to get the alignment.
i = len(table) - 1
j = len(table[0]) - 1
while not (i == 0 and j == 0):
if i == 0:
j -= 1
alignment.insert(0, (EDIT_SYMBOLS["ins"], None, j))
elif j == 0:
i -= 1
alignment.insert(0, (EDIT_SYMBOLS["del"], i, None))
else:
if table[i][j] == EDIT_SYMBOLS["ins"]:
j -= 1
alignment.insert(0, (EDIT_SYMBOLS["ins"], None, j))
elif table[i][j] == EDIT_SYMBOLS["del"]:
i -= 1
alignment.insert(0, (EDIT_SYMBOLS["del"], i, None))
elif table[i][j] == EDIT_SYMBOLS["sub"]:
i -= 1
j -= 1
alignment.insert(0, (EDIT_SYMBOLS["sub"], i, j))
else:
i -= 1
j -= 1
alignment.insert(0, (EDIT_SYMBOLS["eq"], i, j))
return alignment
def count_ops(table):
"""Count the edit operations in the shortest edit path in edit op table.
Walks back an edit operations table produced by table(a, b) and
counts the number of insertions, deletions, and substitutions in the
shortest edit path. This information is typically used in speech
recognition to report the number of different error types separately.
Arguments
----------
table : list
Edit operations table from ``op_table(a, b)``.
Returns
-------
collections.Counter
The counts of the edit operations, with keys:
* "insertions"
* "deletions"
* "substitutions"
NOTE: not all of the keys might appear explicitly in the output,
but for the missing keys collections. The counter will return 0.
Example
-------
>>> table = [['I', 'I', 'I', 'I'],
... ['D', '=', 'I', 'I'],
... ['D', 'D', '=', 'I'],
... ['D', 'D', 'D', 'S']]
>>> print(count_ops(table))
Counter({'substitutions': 1})
"""
edits = collections.Counter()
# Walk back the table, gather the ops.
i = len(table) - 1
j = len(table[0]) - 1
while not (i == 0 and j == 0):
if i == 0:
edits["insertions"] += 1
j -= 1
elif j == 0:
edits["deletions"] += 1
i -= 1
else:
if table[i][j] == EDIT_SYMBOLS["ins"]:
edits["insertions"] += 1
j -= 1
elif table[i][j] == EDIT_SYMBOLS["del"]:
edits["deletions"] += 1
i -= 1
else:
if table[i][j] == EDIT_SYMBOLS["sub"]:
edits["substitutions"] += 1
i -= 1
j -= 1
return edits
def _batch_to_dict_format(ids, seqs):
# Used by wer_details_for_batch
return dict(zip(ids, seqs))
def wer_details_for_batch(ids, refs, hyps, compute_alignments=False):
"""Convenient batch interface for ``wer_details_by_utterance``.
``wer_details_by_utterance`` can handle missing hypotheses, but
sometimes (e.g. CTC training with greedy decoding) they are not needed,
and this is a convenient interface in that case.
Arguments
---------
ids : list, torch.tensor
Utterance ids for the batch.
refs : list, torch.tensor
Reference sequences.
hyps : list, torch.tensor
Hypothesis sequences.
compute_alignments : bool, optional
Whether to compute alignments or not. If computed, the details
will also store the refs and hyps. (default: False)
Returns
-------
list
See ``wer_details_by_utterance``
Example
-------
>>> ids = [['utt1'], ['utt2']]
>>> refs = [[['a','b','c']], [['d','e']]]
>>> hyps = [[['a','b','d']], [['d','e']]]
>>> wer_details = []
>>> for ids_batch, refs_batch, hyps_batch in zip(ids, refs, hyps):
... details = wer_details_for_batch(ids_batch, refs_batch, hyps_batch)
... wer_details.extend(details)
>>> print(wer_details[0]['key'], ":",
... "{:.2f}".format(wer_details[0]['WER']))
utt1 : 33.33
"""
refs = _batch_to_dict_format(ids, refs)
hyps = _batch_to_dict_format(ids, hyps)
return wer_details_by_utterance(
refs, hyps, compute_alignments=compute_alignments, scoring_mode="strict"
)
def wer_details_by_utterance(
ref_dict, hyp_dict, compute_alignments=False, scoring_mode="strict"
):
"""Computes a wealth WER info about each single utterance.
This info can then be used to compute summary details (WER, SER).
Arguments
---------
ref_dict : dict
Should be indexable by utterance ids, and return the reference tokens
for each utterance id as iterable
hyp_dict : dict
Should be indexable by utterance ids, and return
the hypothesis tokens for each utterance id as iterable
compute_alignments : bool
Whether alignments should also be saved.
This also saves the tokens themselves, as they are probably
required for printing the alignments.
scoring_mode : {'strict', 'all', 'present'}
How to deal with missing hypotheses (reference utterance id
not found in hyp_dict).
* 'strict': Raise error for missing hypotheses.
* 'all': Score missing hypotheses as empty.
* 'present': Only score existing hypotheses.
Returns
-------
list
A list with one entry for every reference utterance. Each entry is a
dict with keys:
* "key": utterance id
* "scored": (bool) Whether utterance was scored.
* "hyp_absent": (bool) True if a hypothesis was NOT found.
* "hyp_empty": (bool) True if hypothesis was considered empty
(either because it was empty, or not found and mode 'all').
* "num_edits": (int) Number of edits in total.
* "num_ref_tokens": (int) Number of tokens in the reference.
* "WER": (float) Word error rate of the utterance.
* "insertions": (int) Number of insertions.
* "deletions": (int) Number of deletions.
* "substitutions": (int) Number of substitutions.
* "alignment": If compute_alignments is True, alignment as list,
see ``speechbrain.utils.edit_distance.alignment``.
If compute_alignments is False, this is None.
* "ref_tokens": (iterable) The reference tokens
only saved if alignments were computed, else None.
* "hyp_tokens": (iterable) the hypothesis tokens,
only saved if alignments were computed, else None.
Raises
------
KeyError
If scoring mode is 'strict' and a hypothesis is not found.
"""
details_by_utterance = []
for key, ref_tokens in ref_dict.items():
# Initialize utterance_details
utterance_details = {
"key": key,
"scored": False,
"hyp_absent": None,
"hyp_empty": None,
"num_edits": None,
"num_ref_tokens": len(ref_tokens),
"WER": None,
"insertions": None,
"deletions": None,
"substitutions": None,
"alignment": None,
"ref_tokens": ref_tokens if compute_alignments else None,
"hyp_tokens": None,
}
if key in hyp_dict:
utterance_details.update({"hyp_absent": False})
hyp_tokens = hyp_dict[key]
elif scoring_mode == "all":
utterance_details.update({"hyp_absent": True})
hyp_tokens = []
elif scoring_mode == "present":
utterance_details.update({"hyp_absent": True})
details_by_utterance.append(utterance_details)
continue # Skip scoring this utterance
elif scoring_mode == "strict":
raise KeyError(
"Key "
+ key
+ " in reference but missing in hypothesis and strict mode on."
)
else:
raise ValueError("Invalid scoring mode: " + scoring_mode)
# Compute edits for this utterance
table = op_table(ref_tokens, hyp_tokens)
ops = count_ops(table)
# Take into account "" outputs as empty
if ref_tokens[0] == "" and hyp_tokens[0] == "":
num_ref_tokens = 0
else:
num_ref_tokens = len(ref_tokens)
# Update the utterance-level details if we got this far:
utterance_details.update(
{
"scored": True,
"hyp_empty": True
if len(hyp_tokens) == 0
else False, # This also works for e.g. torch tensors
"num_edits": sum(ops.values()),
"num_ref_tokens": num_ref_tokens,
"WER": 100.0 * sum(ops.values()) / len(ref_tokens),
"insertions": ops["insertions"],
"deletions": ops["deletions"],
"substitutions": ops["substitutions"],
"alignment": alignment(table) if compute_alignments else None,
"ref_tokens": ref_tokens if compute_alignments else None,
"hyp_tokens": hyp_tokens if compute_alignments else None,
}
)
details_by_utterance.append(utterance_details)
return details_by_utterance
def wer_summary(details_by_utterance):
"""
Computes summary stats from the output of details_by_utterance
Summary stats like WER
Arguments
---------
details_by_utterance : list
See the output of wer_details_by_utterance
Returns
-------
dict
Dictionary with keys:
* "WER": (float) Word Error Rate.
* "SER": (float) Sentence Error Rate (percentage of utterances
which had at least one error).
* "num_edits": (int) Total number of edits.
* "num_scored_tokens": (int) Total number of tokens in scored
reference utterances (a missing hypothesis might still
have been scored with 'all' scoring mode).
* "num_erraneous_sents": (int) Total number of utterances
which had at least one error.
* "num_scored_sents": (int) Total number of utterances
which were scored.
* "num_absent_sents": (int) Hypotheses which were not found.
* "num_ref_sents": (int) Number of all reference utterances.
* "insertions": (int) Total number of insertions.
* "deletions": (int) Total number of deletions.
* "substitutions": (int) Total number of substitutions.
NOTE: Some cases lead to ambiguity over number of
insertions, deletions and substitutions. We
aim to replicate Kaldi compute_wer numbers.
"""
# Build the summary details:
ins = dels = subs = 0
num_scored_tokens = (
num_scored_sents
) = num_edits = num_erraneous_sents = num_absent_sents = num_ref_sents = 0
for dets in details_by_utterance:
num_ref_sents += 1
if dets["scored"]:
num_scored_sents += 1
num_scored_tokens += dets["num_ref_tokens"]
ins += dets["insertions"]
dels += dets["deletions"]
subs += dets["substitutions"]
num_edits += dets["num_edits"]
if dets["num_edits"] > 0:
num_erraneous_sents += 1
if dets["hyp_absent"]:
num_absent_sents += 1
if num_scored_tokens != 0:
WER = 100.0 * num_edits / num_scored_tokens
else:
WER = 0.0
wer_details = {
"WER": WER,
"SER": 100.0 * num_erraneous_sents / num_scored_sents,
"num_edits": num_edits,
"num_scored_tokens": num_scored_tokens,
"num_erraneous_sents": num_erraneous_sents,
"num_scored_sents": num_scored_sents,
"num_absent_sents": num_absent_sents,
"num_ref_sents": num_ref_sents,
"insertions": ins,
"deletions": dels,
"substitutions": subs,
}
return wer_details
def wer_details_by_speaker(details_by_utterance, utt2spk):
"""Compute word error rate and another salient info grouping by speakers.
Arguments
---------
details_by_utterance : list
See the output of wer_details_by_utterance
utt2spk : dict
Map from utterance id to speaker id
Returns
-------
dict
Maps speaker id to a dictionary of the statistics, with keys:
* "speaker": Speaker id,
* "num_edits": (int) Number of edits in total by this speaker.
* "insertions": (int) Number insertions by this speaker.
* "dels": (int) Number of deletions by this speaker.
* "subs": (int) Number of substitutions by this speaker.
* "num_scored_tokens": (int) Number of scored reference
tokens by this speaker (a missing hypothesis might still
have been scored with 'all' scoring mode).
* "num_scored_sents": (int) number of scored utterances
by this speaker.
* "num_erraneous_sents": (int) number of utterance with at least
one error, by this speaker.
* "num_absent_sents": (int) number of utterances for which no
hypotheses was found, by this speaker.
* "num_ref_sents": (int) number of utterances by this speaker
in total.
"""
# Build the speakerwise details:
details_by_speaker = {}
for dets in details_by_utterance:
speaker = utt2spk[dets["key"]]
spk_dets = details_by_speaker.setdefault(
speaker,
collections.Counter(
{
"speaker": speaker,
"insertions": 0,
"dels": 0,
"subs": 0,
"num_scored_tokens": 0,
"num_scored_sents": 0,
"num_edits": 0,
"num_erraneous_sents": 0,
"num_absent_sents": 0,
"num_ref_sents": 0,
}
),
)
utt_stats = collections.Counter()
if dets["hyp_absent"]:
utt_stats.update({"num_absent_sents": 1})
if dets["scored"]:
utt_stats.update(
{
"num_scored_sents": 1,
"num_scored_tokens": dets["num_ref_tokens"],
"insertions": dets["insertions"],
"dels": dets["deletions"],
"subs": dets["substitutions"],
"num_edits": dets["num_edits"],
}
)
if dets["num_edits"] > 0:
utt_stats.update({"num_erraneous_sents": 1})
spk_dets.update(utt_stats)
# We will in the end return a list of normal dicts
# We want the output to be sortable
details_by_speaker_dicts = []
# Now compute speakerwise summary details
for speaker, spk_dets in details_by_speaker.items():
spk_dets["speaker"] = speaker
if spk_dets["num_scored_sents"] > 0:
spk_dets["WER"] = (
100.0 * spk_dets["num_edits"] / spk_dets["num_scored_tokens"]
)
spk_dets["SER"] = (
100.0
* spk_dets["num_erraneous_sents"]
/ spk_dets["num_scored_sents"]
)
else:
spk_dets["WER"] = None
spk_dets["SER"] = None
details_by_speaker_dicts.append(spk_dets)
return details_by_speaker_dicts
def top_wer_utts(details_by_utterance, top_k=20):
"""
Finds the k utterances with highest word error rates.
Useful for diagnostic purposes, to see where the system
is making the most mistakes.
Returns results utterances which were not empty
i.e. had to have been present in the hypotheses, with output produced
Arguments
---------
details_by_utterance : list
See output of wer_details_by_utterance.
top_k : int
Number of utterances to return.
Returns
-------
list
List of at most K utterances,
with the highest word error rates, which were not empty.
The utterance dict has the same keys as
details_by_utterance.
"""
scored_utterances = [
dets for dets in details_by_utterance if dets["scored"]
]
utts_by_wer = sorted(
scored_utterances, key=lambda d: d["WER"], reverse=True
)
top_non_empty = []
top_empty = []
while utts_by_wer and (
len(top_non_empty) < top_k or len(top_empty) < top_k
):
utt = utts_by_wer.pop(0)
if utt["hyp_empty"] and len(top_empty) < top_k:
top_empty.append(utt)
elif not utt["hyp_empty"] and len(top_non_empty) < top_k:
top_non_empty.append(utt)
return top_non_empty, top_empty
def top_wer_spks(details_by_speaker, top_k=10):
"""
Finds the K speakers with the highest word error rates.
Useful for diagnostic purposes.
Arguments
---------
details_by_speaker : list
See output of wer_details_by_speaker.
top_k : int
Number of seakers to return.
Returns
-------
list
List of at most K dicts (with the same keys as details_by_speaker)
of speakers sorted by WER.
"""
scored_speakers = [
dets for dets in details_by_speaker if dets["num_scored_sents"] > 0
]
spks_by_wer = sorted(scored_speakers, key=lambda d: d["WER"], reverse=True)
if len(spks_by_wer) >= top_k:
return spks_by_wer[:top_k]
else:
return spks_by_wer
| 25,986 | 33.788487 | 80 | py |
speechbrain | speechbrain-main/speechbrain/utils/checkpoints.py | """This module implements a checkpoint saver and loader.
A checkpoint in an experiment usually needs to save the state of many different
things: the model parameters, optimizer parameters, what epoch is this, etc.
The save format for a checkpoint is a directory, where each of these separate
saveable things gets its own file. Additionally, a special file holds meta
information about the checkpoint (by default just time of creation, but you
can specify anything else you may wish, e.g. validation loss).
The interface for the checkpoint system requires you to specify what things to
save. This approach is flexible and agnostic of how your experiment is actually
run.
The interface requires you to specify names for each thing to save. This name
is used to give the right parameter file to the right object when recovering.
Default saving and loading methods are only added for torch.nn.Modules (and
their subclasses), and torch.optim.Optimizers. If those methods do not work for
your object, you can specify your own saving and/or loading methods, either for
a particular instance or a for a class.
Example
-------
>>> # Toy example Module:
>>> class Recoverable(torch.nn.Module):
... def __init__(self, param):
... super().__init__()
... self.param = torch.nn.Parameter(torch.tensor([param]))
... def forward(self, x):
... return x * self.param
>>> model = Recoverable(1.)
>>> tempdir = getfixture('tmpdir')
>>> # In simple cases, the module aims to have a terse syntax,
>>> # consisting of three steps.
>>> # 1. Specifying where to save checkpoints and what is included in a
>>> # checkpoint:
>>> checkpointer = Checkpointer(tempdir, {"network": model})
>>> # 2. Recover from the latest checkpoint, if one is found:
>>> checkpointer.recover_if_possible()
>>> # Run your experiment:
>>> data = [(0.1, 0.9), (0.3, 0.8)]
>>> for example, target in data:
... loss = (model(example) - target)**2
... # 3. Save checkpoints, and keep by default just one, the newest:
... ckpt = checkpointer.save_and_keep_only()
Authors
* Aku Rouhe 2020
"""
import torch
import collections
import collections.abc
import os
import time
import yaml
import pathlib
import inspect
import shutil
import logging
import warnings
from packaging import version
import speechbrain.utils._workarounds as __wa
logger = logging.getLogger(__name__)
CKPT_PREFIX = "CKPT"
METAFNAME = f"{CKPT_PREFIX}.yaml" # Important that this is not .ckpt
PARAMFILE_EXT = ".ckpt" # ...because these files will be
def torch_recovery(obj, path, end_of_epoch, device=None):
"""Loads a torch.nn.Module state_dict from the given path instantly.
This can be made the default for torch.nn.Modules with:
>>> DEFAULT_LOAD_HOOKS[torch.nn.Module] = torch_recovery
Arguments
---------
obj : torch.nn.Module
Instance for which to load the parameters.
path : str, pathlib.Path
Path where to load from.
end_of_epoch : bool
Whether the recovery comes from an end of epoch checkpoint.
device : str
Torch device, where to map the loaded parameters.
Returns
-------
None
Given object is modified in place.
"""
del end_of_epoch # Unused
try:
obj.load_state_dict(torch.load(path, map_location=device), strict=True)
except TypeError:
obj.load_state_dict(torch.load(path, map_location=device))
def torch_save(obj, path):
"""Saves the obj's parameters to path.
Default save hook for torch.nn.Modules
For saving torch.nn.Module state_dicts.
Arguments
---------
obj : torch.nn.Module
Instance to save.
path : str, pathlib.Path
Path where to save to.
Returns
-------
None
State dict is written to disk.
"""
state_dict = obj.state_dict()
if not state_dict:
logger.warning(f"Saving an empty state_dict for {obj} in {path}.")
torch.save(state_dict, path)
def torch_parameter_transfer(obj, path, device):
"""Non-strict Torch Module state_dict load.
Loads a set of parameters from path to obj. If obj has layers for which
parameters can't be found, only a warning is logged. Same thing
if the path has parameters for layers which don't find a counterpart
in obj.
Arguments
---------
obj : torch.nn.Module
Instance for which to load the parameters.
path : str
Path where to load from.
Returns
-------
None
The object is modified in place.
"""
incompatible_keys = obj.load_state_dict(
torch.load(path, map_location=device), strict=False
)
for missing_key in incompatible_keys.missing_keys:
logger.warning(
f"During parameter transfer to {obj} loading from "
+ f"{path}, the transferred parameters did not have "
+ f"parameters for the key: {missing_key}"
)
for unexpected_key in incompatible_keys.unexpected_keys:
logger.warning(
f"During parameter transfer to {obj} loading from "
+ f"{path}, the object could not use the parameters loaded "
+ f"with the key: {unexpected_key}"
)
# These dicts are indexed by class and hold the default checkpoints methods
if version.parse(torch.__version__) < version.parse("2.0.0"):
DEFAULT_LOAD_HOOKS = {
torch.nn.Module: torch_recovery,
torch.optim.Optimizer: torch_recovery,
torch.optim.lr_scheduler._LRScheduler: torch_recovery,
torch.optim.lr_scheduler.ReduceLROnPlateau: torch_recovery,
torch.cuda.amp.grad_scaler.GradScaler: torch_recovery,
}
DEFAULT_SAVE_HOOKS = {
torch.nn.Module: torch_save,
torch.optim.Optimizer: torch_save,
torch.optim.lr_scheduler._LRScheduler: torch_save,
torch.optim.lr_scheduler.ReduceLROnPlateau: torch_save,
torch.cuda.amp.grad_scaler.GradScaler: torch_save,
}
else:
DEFAULT_LOAD_HOOKS = {
torch.nn.Module: torch_recovery,
torch.optim.Optimizer: torch_recovery,
torch.optim.lr_scheduler.LRScheduler: torch_recovery,
torch.optim.lr_scheduler.ReduceLROnPlateau: torch_recovery,
torch.cuda.amp.grad_scaler.GradScaler: torch_recovery,
}
DEFAULT_SAVE_HOOKS = {
torch.nn.Module: torch_save,
torch.optim.Optimizer: torch_save,
torch.optim.lr_scheduler.LRScheduler: torch_save,
torch.optim.lr_scheduler.ReduceLROnPlateau: torch_save,
torch.cuda.amp.grad_scaler.GradScaler: torch_save,
}
DEFAULT_TRANSFER_HOOKS = {
torch.nn.Module: torch_parameter_transfer,
}
# Add a transfer hook for sentencepiece if it is installed:
try:
import sentencepiece as spm
def _load_spm(obj, path, device=None):
obj.load(str(path)) # SentencePieceProcessor needs a string.
DEFAULT_TRANSFER_HOOKS[spm.SentencePieceProcessor] = _load_spm
del spm # Don't leave it here bare.
except ImportError:
# SentencePiece not loaded, fine!
pass
# Add workarounds:
DEFAULT_SAVE_HOOKS[torch.optim.lr_scheduler.CyclicLR] = __wa._cycliclrsaver
DEFAULT_LOAD_HOOKS[torch.optim.lr_scheduler.CyclicLR] = __wa._cycliclrloader
def mark_as_saver(method):
"""Method decorator which marks given method as the checkpoint saving hook.
See register_checkpoint_hooks for example.
Arguments
---------
method : callable
Method of the class to decorate. Must be callable with
signature (instance, path) using positional arguments. This is
satisfied by for example: def saver(self, path):
Note
----
This will not add the hook (not possible via a method decorator),
you must also decorate the class with @register_checkpoint_hooks
Only one method can be added as the hook.
"""
sig = inspect.signature(method)
try:
sig.bind(object(), pathlib.Path("testpath"))
except TypeError:
MSG = "Checkpoint saver must match signature (instance, path)"
raise TypeError(MSG)
method._speechbrain_saver = True
return method
def mark_as_loader(method):
"""Method decorator which marks given method as checkpoint loading hook.
Arguments
---------
method : callable
Method of the class to decorate. Must be callable with
signature (instance, path, end_of_epoch, device) using positional
arguments. This is satisfied by for example:
`def loader(self, path, end_of_epoch, device):`
Note
----
This will not add the hook (not possible via a method decorator),
you must also decorate the class with @register_checkpoint_hooks
Only one method can be added as the hook.
"""
sig = inspect.signature(method)
try:
sig.bind(object(), pathlib.Path("testpath"), True, None)
except TypeError:
MSG = "Checkpoint loader must have signature (self, path, end_of_epoch, device)"
raise TypeError(MSG)
method._speechbrain_loader = True
return method
def mark_as_transfer(method):
"""Method decorator which marks given method as a parameter transfer hook.
Arguments
---------
method : callable
Method of the class to decorate. Must be callable with
signature (instance, path, device) using positional
arguments. This is satisfied by for example:
`def loader(self, path, device):`
Note
----
This will not add the hook (not possible via a method decorator),
you must also decorate the class with @register_checkpoint_hooks
Only one method can be added as the hook.
Note
----
The transfer hook is prioritized over the loader hook by the ``Pretrainer``
However, if no transfer hook is registered, the Pretrainer will use the
loader hook.
"""
sig = inspect.signature(method)
try:
sig.bind(object(), pathlib.Path("testpath"), device=None)
except TypeError:
MSG = "Transfer hook must have signature (self, path, device)"
raise TypeError(MSG)
method._speechbrain_transfer = True
return method
def register_checkpoint_hooks(cls):
"""Class decorator which registers the load, save and transfer hooks.
The hooks must have been marked with mark_as_loader and mark_as_saver,
and possibly mark_as_transfer.
Arguments
---------
cls : class
Class to decorate
Example
-------
>>> @register_checkpoint_hooks
... class CustomRecoverable:
... def __init__(self, param):
... self.param = int(param)
...
... @mark_as_saver
... def save(self, path):
... with open(path, "w") as fo:
... fo.write(str(self.param))
...
... @mark_as_loader
... def load(self, path, end_of_epoch, device=None):
... del end_of_epoch # Unused here
... with open(path) as fi:
... self.param = int(fi.read())
"""
global DEFAULT_LOAD_HOOKS
global DEFAULT_SAVE_HOOKS
global DEFAULT_TRANSFER_HOOKS
for name, method in cls.__dict__.items():
if hasattr(method, "_speechbrain_saver"):
DEFAULT_SAVE_HOOKS[cls] = method
logger.debug(f"Registered checkpoint save hook for {name}")
if hasattr(method, "_speechbrain_loader"):
DEFAULT_LOAD_HOOKS[cls] = method
logger.debug(f"Registered checkpoint load hook for {name}")
if hasattr(method, "_speechbrain_transfer"):
DEFAULT_TRANSFER_HOOKS[cls] = method
logger.debug(f"Registered parameter transfer hook for {name}")
return cls
def get_default_hook(obj, default_hooks):
"""Finds the default save/load hook to use with the given object.
Follows the Method Resolution Order, i.e., if no hook is registered for
the class of the object itself, also searches classes which the object
inherits from.
Arguments
---------
obj : instance
Instance of a class.
default_hooks : dict
Mapping from classes to (checkpointing hook) functions.
Returns
-------
The correct method or None if no method is registered.
Example
-------
>>> a = torch.nn.Module()
>>> get_default_hook(a, DEFAULT_SAVE_HOOKS) == torch_save
True
"""
mro = inspect.getmro(type(obj))
for cls in mro:
if cls in default_hooks:
return default_hooks[cls]
# If we got here, no hook found
return None
Checkpoint = collections.namedtuple(
"Checkpoint", ["path", "meta", "paramfiles"]
)
Checkpoint.__doc__ = """NamedTuple describing one saved checkpoint
To select a checkpoint to load from many checkpoint,
Checkpoints are first filtered and sorted based on this namedtuple.
Checkpointers put pathlib.Path in path and a dict in meta.
You can essentially add any info you want to meta when saving a checkpoint.
The only default key in meta is "unixtime".
Checkpoint.paramfiles is a dict from recoverable name to parameter filepath.
"""
# Creating a hash allows making checkpoint sets
Checkpoint.__hash__ = lambda self: hash(self.path)
def ckpt_recency(ckpt):
"""Recency as Checkpoint importance metric.
This function can also act as an example of how to make checkpoint
importance keyfuncs. This is a named function, but as you can see
it could be easily implemented as a lambda in a pinch.
"""
return ckpt.meta["unixtime"]
class Checkpointer:
"""Saves checkpoints and recovers from them.
Arguments:
checkpoints_dir : str, pathlib.Path
Path to directory where to save checkpoints.
recoverables : mapping, optional
Objects to to recover. They need a (unique) name: this is used
to connect the parameters in a checkpoint to the correct recoverable.
The name is also used in the filename of the
savefile for the objects parameters. These can also be added with
add_recoverable or add_recoverables or just modifying
checkpointer.recoverables directly.
custom_load_hooks : mapping, optional
A mapping from name [same as in recoverables] to function or method.
Sets a custom loading hook for a particular object. The
function/method must be callable with signature (instance, path)
using positional arguments. This is satisfied by for example:
`def loader(self, path)`.
custom_save_hooks : mapping, optional
Mapping from name [same as in recoverables] to function or method.
Sets a custom saving hook for a particular object. The
function/method must be callable with
signature (instance, path) using positional arguments. This is
satisfied by for example: def saver(self, path):
allow_partial_load : bool, optional
If True, allows loading a checkpoint where a savefile is not found
for every registered recoverable. In that case, only the found
savefiles are loaded. When False, loading such a save will raise
RuntimeError. (default: False)
Example
-------
>>> import torch
>>> #SETUP:
>>> tempdir = getfixture('tmpdir')
>>> class Recoverable(torch.nn.Module):
... def __init__(self, param):
... super().__init__()
... self.param = torch.nn.Parameter(torch.tensor([param]))
... def forward(self, x):
... return x * self.param
>>> recoverable = Recoverable(1.)
>>> recoverables = {'recoverable': recoverable}
>>> # SETUP DONE.
>>> checkpointer = Checkpointer(tempdir, recoverables)
>>> first_ckpt = checkpointer.save_checkpoint()
>>> recoverable.param.data = torch.tensor([2.])
>>> loaded_ckpt = checkpointer.recover_if_possible()
>>> # Parameter has been loaded:
>>> assert recoverable.param.data == torch.tensor([1.])
>>> # With this call, by default, oldest checkpoints are deleted:
>>> checkpointer.save_and_keep_only()
>>> assert first_ckpt not in checkpointer.list_checkpoints()
"""
def __init__(
self,
checkpoints_dir,
recoverables=None,
custom_load_hooks=None,
custom_save_hooks=None,
allow_partial_load=False,
):
self.checkpoints_dir = pathlib.Path(checkpoints_dir)
os.makedirs(self.checkpoints_dir, exist_ok=True)
self.recoverables = {}
if recoverables is not None:
self.add_recoverables(recoverables)
self.custom_load_hooks = {}
if custom_load_hooks is not None:
self.custom_load_hooks.update(custom_load_hooks)
self.custom_save_hooks = {}
if custom_save_hooks is not None:
self.custom_save_hooks.update(custom_save_hooks)
self.allow_partial_load = allow_partial_load
def add_recoverable(
self, name, obj, custom_load_hook=None, custom_save_hook=None
):
"""Register a recoverable with possible custom hooks.
Arguments
---------
name : str
Unique name for recoverable. Used to map savefiles to objects.
obj : instance
The object to recover.
custom_load_hook : callable
Called to load the object's savefile. The function/method must be
callable with signature (instance, path) using positional
arguments. This is satisfied by for example: def load(self, path):
custom_save_hook : callable
Called to save the object's parameters. The function/method must
be callable with signature (instance, path) using positional
arguments. This is satisfied by for example: def saver(self, path):
"""
self.recoverables[name] = obj
if custom_load_hook is not None:
self.custom_load_hooks[name] = custom_load_hook
if custom_save_hook is not None:
self.custom_save_hooks[name] = custom_save_hook
def add_recoverables(self, recoverables):
"""Update the recoverables dict from the given mapping.
Arguments
---------
recoverables : mapping
Objects to recover.
They need a (unique) name: this is used to
connect the parameters in a checkpoint to the correct
recoverable. The name is also used in the filename of the
savefile for the objects parameters.
"""
if isinstance(recoverables, collections.abc.Mapping):
self.recoverables.update(recoverables)
else:
rec = repr(recoverables) # noqa: F841, rec is used in MSG
MSG = f"Checkpointer needs a mapping (e.g. dict), \
got {rec} instead."
raise AttributeError(MSG)
def save_checkpoint(
self, meta={}, end_of_epoch=True, name=None, verbosity=logging.INFO
):
"""Saves a checkpoint.
The whole checkpoint becomes a directory.
Saves each registered object's parameters in a separate file.
Also a meta file is added. The meta file by default has just the
unixtime (seconds since unix epoch), but you can add anything
relevant yourself. The meta information is later used to pick the
checkpoint to load.
The value of end_of_epoch is saved in the meta. This can affect how
epoch counters and dataset iterators load their state.
Arguments
---------
meta : mapping, optional
A mapping which is added to the meta file in the checkpoint. The
key "unixtime" is included by default.
end_of_epoch : bool, optional
Whether the checkpoint is at the end of an epoch. True by default.
May affect loading.
name : str, optional
Specify a custom name for your checkpoint.
The name will still have a prefix added. If no name is given,
a name is created from a timestamp and a random unique id.
verbosity : logging level
Set logging level this save.
Returns
-------
Checkpoint
namedtuple [see above], the saved checkpoint.
"""
if name is None:
ckpt_dir = self._new_checkpoint_dirpath()
else:
ckpt_dir = self._custom_checkpoint_dirpath(name)
os.makedirs(ckpt_dir) # May raise FileExistsError, let it.
saved_meta = self._save_checkpoint_metafile(
ckpt_dir / METAFNAME, meta, end_of_epoch
)
saved_paramfiles = {}
for name, obj in self.recoverables.items():
objfname = f"{name}" + PARAMFILE_EXT
savepath = ckpt_dir / objfname
saved_paramfiles[name] = savepath
# First see if object has custom load hook:
if name in self.custom_save_hooks:
self.custom_save_hooks[name](obj, savepath)
continue
# Otherwise find the default saver for that type:
default_hook = get_default_hook(obj, DEFAULT_SAVE_HOOKS)
if default_hook is not None:
default_hook(obj, savepath)
continue
# If we got here, no custom hook or registered default hook
MSG = f"Don't know how to save {type(obj)}. Register default hook \
or add custom hook for this object."
raise RuntimeError(MSG)
ckpt_type = "end-of-epoch" if end_of_epoch else "intra-epoch"
logger.log(verbosity, f"Saved an {ckpt_type} checkpoint in {ckpt_dir}")
return Checkpoint(ckpt_dir, saved_meta, saved_paramfiles)
def save_and_keep_only(
self,
meta={},
end_of_epoch=True,
name=None,
num_to_keep=1,
keep_recent=True,
importance_keys=[],
max_keys=[],
min_keys=[],
ckpt_predicate=None,
verbosity=logging.INFO,
):
"""Saves a checkpoint, then deletes the least important checkpoints.
Essentially this combines ``save_checkpoint()`` and
``delete_checkpoints()`` in one call, providing short syntax.
Arguments
---------
meta : mapping, optional
A mapping which is added to the meta file in the checkpoint. The
key "unixtime" is included by default.
end_of_epoch : bool, optional
Whether the checkpoint is at the end of an epoch. True by default.
May affect loading.
name : str, optional
Specify a custom name for your checkpoint.
The name will still have a prefix added. If no name is given,
a name is created from a timestamp and a random unique id.
num_to_keep : int, optional
Number of checkpoints to keep. Defaults to 1. This deletes all
checkpoints remaining after filtering. Must be >=0.
keep_recent : bool, optional
Whether to keep the most recent ``num_to_keep`` checkpoints.
importance_keys : list, optional
A list of key functions used in sorting (see the sorted built-in).
Each callable defines a sort order and num_to_keep checkpoints are
kept for callable. The checkpoint with the highest keys are kept.
The functions are passed Checkpoint namedtuples (see above).
max_keys : list, optional
A list of keys for which the *highest* value will be kept.
min_keys : list, optional
A list of keys for which the *lowest* value will be kept.
ckpt_predicate : callable, optional
Use this to exclude some checkpoints from deletion. Before any
sorting, the list of checkpoints is filtered with this predicate.
Only the checkpoints for which ckpt_predicate is True can be
deleted. The function is called with Checkpoint namedtuples
(see above).
Returns
-------
None
Unlike save_checkpoint, this does not return anything, since
we cannot guarantee that the saved checkpoint actually survives
deletion.
"""
self.save_checkpoint(
meta=meta, end_of_epoch=end_of_epoch, name=name, verbosity=verbosity
)
if keep_recent:
importance_keys.append(ckpt_recency)
self.delete_checkpoints(
num_to_keep=num_to_keep,
max_keys=max_keys,
min_keys=min_keys,
importance_keys=importance_keys,
ckpt_predicate=ckpt_predicate,
verbosity=verbosity,
)
def find_checkpoint(
self,
importance_key=None,
max_key=None,
min_key=None,
ckpt_predicate=None,
):
"""Picks a particular checkpoint from all available checkpoints.
If none of ``importance_key``, ``max_key``, and ``min_key`` is
used, then most recent checkpoint will be returned. No more than
one of them may be used.
Most functionality is actually implemented in ``find_checkpoints()``
but this is kept as a useful interface.
Arguments
---------
importance_key : callable, optional
The key function used in sorting.
The checkpoint with the highest returned value is picked.
The function is called with Checkpoint namedtuples.
max_key : str, optional
The checkpoint with the highest value for this key will
be returned. Only checkpoints with this key will be considered!
min_key : str, optional
The checkpoint with the lowest value for this key will
be returned. Only checkpoints with this key will be considered!
ckpt_predicate : callable, optional
Before sorting, the list of
checkpoints is filtered with this predicate.
See the filter builtin.
The function is called with Checkpoint namedtuples (see above).
By default, all checkpoints are considered.
Returns
-------
Checkpoint
If found.
None
If no Checkpoints exist/remain after filtering.
"""
ckpts_found = self.find_checkpoints(
importance_key=importance_key,
max_key=max_key,
min_key=min_key,
ckpt_predicate=ckpt_predicate,
max_num_checkpoints=None,
)
if ckpts_found:
return ckpts_found[0]
else:
return None
def find_checkpoints(
self,
importance_key=None,
max_key=None,
min_key=None,
ckpt_predicate=None,
max_num_checkpoints=None,
):
"""Picks multiple checkpoints.
If none of ``importance_key``, ``max_key``, and ``min_key`` is
used, then the most recent checkpoints will be returned. No more than
one of these may be used.
Arguments
---------
importance_key : callable, optional
The key function used in sorting.
The checkpoint with the highest returned value is picked.
The function is called with Checkpoint namedtuples.
max_key : str, optional
The checkpoint with the highest value for this key will
be returned. Only checkpoints with this key will be considered!
min_key : str, optional
The checkpoint with the lowest value for this key will
be returned. Only checkpoints with this key will be considered!
ckpt_predicate : callable, optional
Before sorting, the list of
checkpoints is filtered with this predicate.
See the filter builtin.
The function is called with Checkpoint namedtuples (see above).
By default, all checkpoints are considered.
max_num_checkpoints : int, None
The maximum number of checkpoints to return, or None to return all
found checkpoints.
Returns
-------
list
List containing at most the max specified number of Checkpoints.
"""
if importance_key is None and min_key is None and max_key is None:
importance_key = ckpt_recency
if max_key and not importance_key:
def importance_key(ckpt):
"Defines the importance key."
return ckpt.meta[max_key]
def ckpt_predicate(ckpt, old_predicate=ckpt_predicate):
"Checkpoints predicate."
if old_predicate is not None:
return max_key in ckpt.meta and old_predicate(ckpt)
else:
return max_key in ckpt.meta
elif min_key and not importance_key:
def importance_key(ckpt):
"Defines the importance key."
return -ckpt.meta[min_key]
def ckpt_predicate(ckpt, old_predicate=ckpt_predicate):
"Checkpoints predicate."
if old_predicate is not None:
return min_key in ckpt.meta and old_predicate(ckpt)
else:
return min_key in ckpt.meta
elif min_key or max_key:
raise ValueError(
"Must specify only one of 'importance_key', 'max_key', "
"and 'min_key'."
)
ckpts = self.list_checkpoints()
ckpts = list(filter(ckpt_predicate, ckpts))
# First sort by recency, so that importance being equal,
# the most checkpoints are returned
ckpts = sorted(ckpts, key=ckpt_recency, reverse=True)
if ckpts:
ranked_ckpts = sorted(ckpts, key=importance_key, reverse=True)
# NOTE: apparently, you can also slice [:None],
# and this is the same as [:], so the following if-else is not
# strictly speaking needed. However, this feature does not seem to
# be documented Python so I don't want to trust it.
if max_num_checkpoints is not None:
return ranked_ckpts[:max_num_checkpoints]
else: # No max number -> return all ckpts, but just sorted
return ranked_ckpts
else:
return [] # Be explicit :)
def recover_if_possible(
self,
importance_key=None,
max_key=None,
min_key=None,
ckpt_predicate=None,
device=None,
):
"""Picks a checkpoint and recovers from that, if one is found.
If a checkpoint is not found, no recovery is run.
If none of ``importance_key``, ``max_key``, and ``min_key`` is
used, then most recent checkpoint will be returned. No more than
one of them may be used.
Arguments
---------
importance_key : callable, optional
The key function used in sorting.
The checkpoint with the highest returned value is loaded.
The function is called with Checkpoint namedtuples.
max_key : str, optional
The checkpoint with the highest value for this key will be loaded.
Only checkpoints with this key will be considered!
min_key : str, optional
The checkpoint with the lowest value for this key will be loaded.
Only checkpoints with this key will be considered!
ckpt_predicate : callable, optional
Before sorting, the list of
checkpoints is filtered with this predicate.
See the filter builtin.
The function is called with Checkpoint namedtuples (see above).
By default, all checkpoints are considered.
device : torch.device
Device to load models to.
Returns
-------
Checkpoint
If found.
None
If no Checkpoints exist/remain after filtering.
"""
chosen_ckpt = self.find_checkpoint(
importance_key, max_key, min_key, ckpt_predicate,
)
if chosen_ckpt is not None:
self.load_checkpoint(chosen_ckpt, device)
else:
logger.info("Would load a checkpoint here, but none found yet.")
return chosen_ckpt
def load_checkpoint(self, checkpoint, device=None):
"""Loads the specified checkpoint.
Arguments
---------
checkpoint : Checkpoint
Checkpoint to load.
"""
self._call_load_hooks(checkpoint, device)
def list_checkpoints(self):
"""List all checkpoints in the checkpoints directory.
Returns
-------
list
List of Checkpoint namedtuple (see above).
"""
return self._construct_checkpoint_objects(self._list_checkpoint_dirs())
# NOTE: * in arglist -> keyword only arguments
def delete_checkpoints(
self,
*,
num_to_keep=1,
min_keys=None,
max_keys=None,
importance_keys=[ckpt_recency],
ckpt_predicate=None,
verbosity=logging.INFO,
):
"""Deletes least important checkpoints.
Since there can be many ways to define importance (e.g. lowest WER,
lowest loss), the user should provide a list of sort key functions,
each defining a particular importance order. In essence, each
importance key function extracts one importance metric (higher is more
important). For each of these orders, num_to_keep checkpoints are kept.
However if there is overlap between each orders' preserved checkpoints,
the additional checkpoints are not preserved, so the total number of
preserved checkpoints can be less than::
num_to_keep * len(importance_keys)
Arguments
---------
num_to_keep : int, optional
Number of checkpoints to keep.
Defaults to 10. You choose to keep 0. This deletes all
checkpoints remaining after filtering. Must be >=0
min_keys : list, optional
List of strings representing keys in the meta. The lowest of
these values will be kept, up to num_to_keep.
max_keys : list, optional
List of strings representing keys in the meta. The highest of
these values will be kept, up to num_to_keep.
importance_keys : list, optional
A list of key functions used in sorting (see the sorted built-in).
Each callable defines a sort order and num_to_keep checkpoints are
kept for callable. To be clear, those with the highest key are
kept.
The functions are called with Checkpoint namedtuples
(see above). See also the default (ckpt_recency,
above). The default deletes all but the latest checkpoint.
ckpt_predicate : callable, optional
Use this to exclude some checkpoints from deletion. Before any
sorting, the list of checkpoints is filtered with this predicate.
Only the checkpoints for which ckpt_predicate is True can be
deleted. The function is called with Checkpoint namedtuples
(see above).
verbosity : logging level
Set logging level for this deletion.
Note
----
Must be called with keyword arguments, as a signoff that you
know what you are doing. Deletion is permanent.
"""
if num_to_keep < 0:
raise ValueError("Number of checkpoints to keep must be positive.")
# Build a list of potential deletions and protected checkpoints
potential_deletions = set()
protected_checkpoints = set()
keys = [{"min_key": key} for key in min_keys or []]
keys.extend([{"max_key": key} for key in max_keys or []])
keys.extend([{"importance_key": key} for key in importance_keys])
# Don't consider checkpoints for deletion that don't have a listed key
for key_kwargs in keys:
key_kwargs["ckpt_predicate"] = ckpt_predicate
potential_deletions.update(self.find_checkpoints(**key_kwargs))
protected_checkpoints.update(
self.find_checkpoints(
max_num_checkpoints=num_to_keep, **key_kwargs
)
)
# Delete unprotected checkpoints
for ckpt in potential_deletions:
if ckpt not in protected_checkpoints:
Checkpointer._delete_checkpoint(ckpt, verbosity=verbosity)
@staticmethod
def _delete_checkpoint(checkpoint, verbosity=logging.INFO):
if not Checkpointer._is_checkpoint_dir(checkpoint.path):
raise RuntimeError("Checkpoint does not appear valid for deletion.")
shutil.rmtree(checkpoint.path)
logger.log(verbosity, f"Deleted checkpoint in {checkpoint.path}")
def _call_load_hooks(self, checkpoint, device=None):
# This internal function finds the correct hook to call for every
# recoverable, and calls it.
logger.info(f"Loading a checkpoint from {checkpoint.path}")
end_of_epoch = checkpoint.meta["end-of-epoch"]
for name, obj in self.recoverables.items():
# NOTE: We want the checkpoint namedtuple to have the paramfile
# paths for each recoverable.
# In some rare case, the user can e.g. add a path there manually.
try:
loadpath = checkpoint.paramfiles[name]
except KeyError:
if self.allow_partial_load:
continue
elif "dataloader" in name:
MSG = f"Loading checkpoint from {checkpoint.path}, \
but missing a load path for {name}"
warnings.warn(MSG, UserWarning)
continue
else:
MSG = f"Loading checkpoint from {checkpoint.path}, \
but missing a load path for {name}"
raise RuntimeError(MSG)
# First see if object has custom load hook:
if name in self.custom_load_hooks:
self.custom_load_hooks[name](
obj, loadpath, end_of_epoch, device
)
continue
# Otherwise find the default saver for that type:
default_hook = get_default_hook(obj, DEFAULT_LOAD_HOOKS)
if default_hook is not None:
default_hook(obj, loadpath, end_of_epoch, device)
continue
# If we got here, no custom hook or registered default hook exists
MSG = f"Don't know how to load {type(obj)}. Register default hook \
or add custom hook for this object."
raise RuntimeError(MSG)
def _list_checkpoint_dirs(self):
# This internal method returns a list of individual checkpoint
# directory paths in the top checkpoint directory
return [
x
for x in self.checkpoints_dir.iterdir()
if Checkpointer._is_checkpoint_dir(x)
]
@staticmethod
def _construct_checkpoint_objects(checkpoint_dirs):
# This internal method takes a list of individual checkpoint
# directory paths (as produced by _list_checkpoint_dirs)
checkpoints = []
for ckpt_dir in checkpoint_dirs:
with open(ckpt_dir / METAFNAME) as fi:
meta = yaml.load(fi, Loader=yaml.Loader)
paramfiles = {}
for ckptfile in ckpt_dir.iterdir():
if ckptfile.suffix == PARAMFILE_EXT:
paramfiles[ckptfile.stem] = ckptfile
checkpoints.append(Checkpoint(ckpt_dir, meta, paramfiles))
return checkpoints
@staticmethod
def _is_checkpoint_dir(path):
# This internal method verifies whether a given path points to a
# directory that holds a checkpoint.
path = pathlib.Path(path)
if not path.is_dir():
return False
if not path.name.startswith(CKPT_PREFIX):
return False
return (path / METAFNAME).exists()
def _new_checkpoint_dirpath(self):
# This internal method creates a checkpoint name and returns a path
# to that directory (but does not create the directory!)
t = time.time()
stamp = time.strftime("%Y-%m-%d+%H-%M-%S", time.localtime(t))
suffix_num = 0
while (
self.checkpoints_dir / f"{CKPT_PREFIX}+{stamp}+{suffix_num:02d}"
).exists():
suffix_num += 1
return self.checkpoints_dir / f"{CKPT_PREFIX}+{stamp}+{suffix_num:02d}"
def _custom_checkpoint_dirpath(self, name):
# This internal method creates a checkpoint name based on a given
# custom name and returns a path to that directory (but does not
# create the directory!)
return self.checkpoints_dir / f"{CKPT_PREFIX}+{name}"
def _save_checkpoint_metafile(
self, fpath, meta_to_include={}, end_of_epoch=True
):
# This internal method saves the meta information in the given path
meta = {"unixtime": time.time(), "end-of-epoch": end_of_epoch}
meta.update(meta_to_include)
with open(fpath, "w") as fo:
fo.write("# yamllint disable\n")
fo.write(yaml.dump(meta))
return meta
def average_state_dicts(state_dicts):
"""Produces an average state_dict from an iterator over state_dicts.
Note that at one time, this keeps two of the state_dicts in memory, which
is the minimum memory requirement.
Arguments
---------
state_dicts : iterator, list
The state_dicts to average.
Returns
-------
state_dict
The averaged state_dict.
"""
iterator = iter(state_dicts)
try:
running_sum = next(iterator)
except StopIteration:
raise ValueError("No state dicts to average.")
num_dicts = 1
with torch.no_grad():
# First sum all state_dicts together:
for state_dict in iterator:
for pname, param in state_dict.items():
running_sum[pname] += param.data
num_dicts += 1
# Finally, divide by number of dicts:
for pname, param in running_sum.items():
running_sum[pname] = param.data / float(num_dicts)
return running_sum
def average_checkpoints(
checkpoint_list,
recoverable_name,
parameter_loader=torch.load,
averager=average_state_dicts,
device=None,
):
"""Average parameters from multiple checkpoints.
Use Checkpointer.find_checkpoints() to get the list of checkpoints to
average over.
Averaging parameters from some of the last checkpoints in training has been
shown to sometimes improve performance.
The default loader and averager work for standard PyTorch modules.
Arguments
---------
checkpoint_list : list
List of checkpoints to average.
recoverable_name : str
The name of the recoverable, the parameters of which are loaded and
averaged.
parameter_loader : function
A function which takes a single argument, the path to a parameter file,
and loads the parameters from that file. By default, torch.load,
which produces state_dict dictionaries.
averager : function
A function which takes an iterator over the parameters from each
checkpoint, as loaded by parameter_loader, and produces their average.
Note that the function is called with an iterator, so the length is
initially unknown; the implementation should simply count the number of
different parameter sets as they are yielded. See average_state_dicts
above for an example. It is the default averager, and averages
state_dicts.
Returns
-------
Any
The output of the averager function.
Example
-------
>>> # Consider this toy Module again:
>>> class Recoverable(torch.nn.Module):
... def __init__(self, param):
... super().__init__()
... self.param = torch.nn.Parameter(torch.tensor([param]))
... def forward(self, x):
... return x * self.param
>>> # Now let's make some checkpoints:
>>> model = Recoverable(1.)
>>> tempdir = getfixture('tmpdir')
>>> checkpointer = Checkpointer(tempdir, {"model": model})
>>> for new_param in range(10):
... model.param.data = torch.tensor([float(new_param)])
... _ = checkpointer.save_checkpoint() # Suppress output with assignment
>>> # Let's average the 3 latest checkpoints
>>> # (parameter values 7, 8, 9 -> avg=8)
>>> ckpt_list = checkpointer.find_checkpoints(max_num_checkpoints = 3)
>>> averaged_state = average_checkpoints(ckpt_list, "model")
>>> # Now load that state in the normal way:
>>> _ = model.load_state_dict(averaged_state) # Suppress output
>>> model.param.data
tensor([8.])
"""
try:
# try to map the ckps to the correct device
parameter_iterator = (
parameter_loader(
ckpt.paramfiles[recoverable_name], map_location=device
)
for ckpt in checkpoint_list
)
except TypeError:
parameter_iterator = (
parameter_loader(ckpt.paramfiles[recoverable_name])
for ckpt in checkpoint_list
)
return averager(parameter_iterator)
| 45,420 | 36.850833 | 88 | py |
speechbrain | speechbrain-main/speechbrain/utils/profiling.py | """Polymorphic decorators to handle PyTorch profiling and benchmarking.
Author:
* Andreas Nautsch 2022
"""
import numpy as np
from copy import deepcopy
from torch import profiler
from functools import wraps
from typing import Any, Callable, Iterable, Optional
# from typing import List
# from itertools import chain
"""
from torch.autograd.profiler_util import ( # pytorch v1.10.1
EventList,
FunctionEvent,
_format_time,
_format_memory,
)
"""
def set_profiler_attr(func: object, set_attr: str, handler: Callable):
"""Sets handler for profiler: scheduler or trace export.
"""
assert set_attr in [
"on_trace_ready",
"schedule",
], "Needs to be a callable profiler attribute."
if (
func is None
): # Polymorph: not used as decorator; func is used as e.g.: trace_export()
return handler
elif callable(
func
): # Polymorph: decorates a decorator of function/class constructor
@wraps(func)
def wrapper(*args, **kwargs):
"""Wrapper implementation."""
if "__call__" not in dir(
func
): # Decorator for class constructor (directly)
result = func(*args, **kwargs)
setattr(result.profiler, set_attr, handler)
return result # not tested
else: # Return as additional argument.
kwargs[set_attr] = handler
return func(*args, **kwargs)
return wrapper
else: # Polymorph: func is assumed to be an instance of speechbrain.core.Brain
# No return: in-place edit
if hasattr(func, "profiler"):
if func.profiler is profiler.profile:
setattr(func.profiler, set_attr, handler)
def schedule(
func: Optional[object] = None,
wait: int = 2,
warmup: int = 2,
active: int = 2,
repeat: int = 1,
skip_first: int = 0,
):
"""Wrapper to create a ```torch.profiler.schedule``` (sets default parameters for warm-up).
"""
torch_scheduler = profiler.schedule(
wait=wait,
warmup=warmup,
active=active,
repeat=repeat,
skip_first=skip_first,
)
"""
Curious which action a default scheduler suggests at which profiler.step() ?
[torch_scheduler(x) for x in range(10)]
00 = {ProfilerAction} ProfilerAction.NONE
01 = {ProfilerAction} ProfilerAction.NONE
02 = {ProfilerAction} ProfilerAction.WARMUP
03 = {ProfilerAction} ProfilerAction.WARMUP
04 = {ProfilerAction} ProfilerAction.RECORD
05 = {ProfilerAction} ProfilerAction.RECORD_AND_SAVE
06 = {ProfilerAction} ProfilerAction.NONE
07 = {ProfilerAction} ProfilerAction.NONE
08 = {ProfilerAction} ProfilerAction.NONE
09 = {ProfilerAction} ProfilerAction.NONE
"""
return set_profiler_attr(
func=func, set_attr="schedule", handler=torch_scheduler
)
def export(
func: Optional[object] = None,
dir_name: str = "./log/",
worker_name: Optional[str] = None,
use_gzip: bool = False,
):
"""Exports current and aggregated traces for:
- Chrome tensorboard
- FlameGraph
(and sets default parameters for log file folder/filenames).
"""
import os
import socket
import time
# Chrome export (default handler); inspired the log_file() function below.
tensorboard_handler = profiler.tensorboard_trace_handler(
dir_name=dir_name, worker_name=worker_name, use_gzip=use_gzip
)
def trace_handler(prof: profiler.profile):
"""trace_handler implementation."""
def log_file(export_chrome: bool = False, info: str = ""):
"""Implementation of logging file."""
nonlocal worker_name
if not worker_name:
worker_name = "{}_{}".format(
socket.gethostname(), str(os.getpid())
)
if export_chrome:
ext = "pt.trace.json"
else:
ext = "txt"
if info:
pattern = "{{}}.{{}}_{}.{{}}".format(info)
else:
pattern = "{}.{}.{}"
file_name = pattern.format(
worker_name, int(time.time() * 1000), ext
)
if use_gzip:
file_name = file_name + ".gz"
return os.path.join(dir_name, file_name)
def export_stacks(log_path: str, metric: str):
"""Implementation of export_stacks."""
prof.export_stacks(log_file(), metric)
def export_traces(aggregated_traces: bool = False):
"""Implementation of export_traces."""
if not aggregated_traces:
# Chrome export (also checks for dir_name existing).
tensorboard_handler(prof)
# FlameGraph exports.
if prof.with_stack or aggregated_traces:
log_path = (
log_file(info="aggregated")
if aggregated_traces
else log_file()
)
export_stacks(log_path=log_path, metric="self_cpu_time_total")
if prof.profiler is not None:
if prof.profiler.use_cuda:
export_stacks(
log_path=log_path, metric="self_cuda_time_total"
)
# export last logged trace - skip if events are empty (e.g., profiler created w/o any torch.nn call)
if prof.events():
export_traces()
return set_profiler_attr(
func=func, set_attr="on_trace_ready", handler=trace_handler
)
def prepare_profiler_for_brain(prof: profiler.profile):
"""Sets up a ``torch.profiler.profile`` to also (a) aggregate traces issued from various interactions
with ``speechbrain.core.Brain``:s and (b) hooks a method to ``merge_traces``.
"""
# Brain functions will be called independently -> traces will be segregated, so we aggregate them.
prof.speechbrain_event_traces = list()
# Preparing the profiler to be re-used during Brain:s' lifecycles.
def hook_profiler_stop(stop: Callable):
"""Implementation of hook_profiler_stop."""
@wraps(stop)
def stop_wrapper():
"""Implementation of stop_wrapper."""
kineto_profiler = prof.profiler
if kineto_profiler is not None:
stop_result = stop()
if (
prof.events()
): # kineto events are not aggregatable (sticking with parsed kineto events)
# see: torch.autograd.profiler.__exit__
kineto_events = kineto_profiler._parse_kineto_results(
kineto_profiler.kineto_results
)
# add to trace record
prof.speechbrain_event_traces.append(
deepcopy(kineto_events)
)
# set flag to disable the profiler
kineto_profiler.enabled = False
return stop_result
else:
return stop() # will be: None
return stop_wrapper
# Preparing the profiler to be re-started during Brain:s' lifecycles.
def hook_profiler_start(start: Callable):
"""Implementation of hook_profiler_start."""
@wraps(start)
def start_wrapper():
"""Implementation of start_wrapper."""
prof.step_num = 0
prof.current_action = prof.schedule(prof.step_num)
kineto_profiler = prof.profiler
if kineto_profiler is not None:
# check flag if profiler is disabled (i.e. as of stop_wrapper); prevents entering its __init__ twice
if not kineto_profiler.enabled:
# reset kineto profiler (otherwise, one obtains the same traces over & over again)
kineto_profiler.enabled = True
return start()
return start_wrapper
"""
# It's currently designed as hiding an Easter Egg.
def merge_traces():
" ""Implementation of merge_traces." ""
# Alternative re-design quirks: make trace aggregator a GLOBAL -or- create another profiler class.
trace_aggregator = "speechbrain_event_traces"
if prof.profiler is not None:
if trace_aggregator in dir(prof) and prof.events():
# clear all assigned parents/children (from previous mergers & trees)
for trace in getattr(prof, trace_aggregator):
for event in trace:
event.cpu_parent = None
event.cpu_children: List[FunctionEvent] = []
# assemble new list
merged_events = EventList(
list(chain.from_iterable(getattr(prof, trace_aggregator))),
use_cuda=prof.profiler.use_cuda,
profile_memory=prof.profiler.profile_memory,
with_flops=prof.profiler.with_flops,
)
merged_events._build_tree()
return merged_events
else: # not tested
return prof.events()
else:
return []
"""
# Augment torch's profiler.
setattr(prof, "start", hook_profiler_start(getattr(prof, "start")))
setattr(prof, "stop", hook_profiler_stop(getattr(prof, "stop")))
# setattr(prof, "merge_traces", merge_traces)
# Return so it can be readily assigned elsewhere :)
return prof
def hook_brain_methods(
func: object,
prof: profiler.profile,
class_hooks: Optional[Iterable[str]] = None,
):
"""For instances of ``speechbrain.core.Brain``, critical functions are hooked to profiler start/stop methods.
"""
# Prepare additional hook decorators for methods of Brain:s.
def hook_brain(f: Callable):
"""Implementation of hook_brain."""
@wraps(f)
def hook(*f_args, **f_kwargs):
"""Implementation of hook."""
# The profiler stopped after __init__ so we need to get it up again and stop it manually also.
prof.start()
r = f(*f_args, **f_kwargs)
prof.stop()
return r
return hook
# Hook the crucial Brain methods.
if class_hooks is None:
class_hooks = ["fit", "evaluate"]
for method in class_hooks:
if method in dir(func): # func is an instance of Brain
setattr(func, method, hook_brain(getattr(func, method)))
def profile(
func: Optional[object] = None,
class_hooks: Optional[Iterable[str]] = None,
activities: Optional[Iterable[profiler.ProfilerActivity]] = None,
schedule: Optional[Callable[[int], profiler.ProfilerAction]] = None,
on_trace_ready: Optional[Callable[..., Any]] = None,
record_shapes: bool = False,
profile_memory: bool = False,
with_stack: bool = False,
with_flops: bool = False,
with_modules: bool = False,
) -> object:
"""Wrapper to create a PyTorch profiler to benchmark training/inference of speechbrain.core.Brain instances.
See ``torch.profiler.profile`` documentation for details (brief summary below).
Arguments
---------
func : object
``speechbrain.core.Brain``:s or a (train/eval) function to be profiled.
class_hooks : iterable
List of method/function names of ``speechbrain.core.Brain``:s that should be profiled also.
Otherwise, only the __init__ constructor will be profiled when decorating a Brain class.
Default: ``['fit', 'evaluate']`` for classes, and ``None`` for functions.
activities : iterable
List of activity groups.
Default: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
(Default value should be ok for most cases.)
schedule : callable
Waits a specified amount of steps for PyTorch to warm-up; see the above ``schedule`` decorator.
Default: ``ProfilerAction.RECORD`` (immediately starts recording).
on_trace_ready : callable
Specifies what benchmark record should be saved (after each scheduled step);
see above ``trace_handler`` decorator.
Default: ``None`` (pick up collected reporting once profiling ended, but not details per step).
record_shapes : bool
Save input shapes of operations (enables to group benchmark data by after profiling).
Default: ``False``.
profile_memory : bool
Track tensor memory allocation/deallocation.
Default: ``False``.
with_stack : bool
Record source information (file and line number).
Default: ``False``.
with_flops: bool
Estimate the number of FLOPs.
Default: ``False``.
with_modules: bool
Record module hierarchy (including function names)
Default: ``False``
Example
-------
>>> import torch
>>> @profile
... def run(x : torch.Tensor):
... y = x ** 2
... z = y ** 3
... return y.backward() # y.backward() returns None --> return value is substituted with profiler
>>> data = torch.randn((1, 1), requires_grad=True)
>>> prof = run(data)
>>> out = [len(prof.events()), len(prof.key_averages()), prof.profiler.total_average().count]
"""
if func is None: # return a profiler; not tested
return prepare_profiler_for_brain(
profiler.profile(
activities=activities,
schedule=schedule,
on_trace_ready=on_trace_ready,
record_shapes=record_shapes,
profile_memory=profile_memory,
with_stack=with_stack,
with_flops=with_flops,
with_modules=with_modules,
)
)
# Polymorph: func is pretrained or an instance of Brain (assumed case)
if hasattr(func, "HPARAMS_NEEDED") or not callable(func):
with profiler.profile(
activities=activities,
schedule=schedule, # scheduler needs to be set directly (fetching is here not possible as for wrappers)
on_trace_ready=on_trace_ready,
record_shapes=record_shapes,
profile_memory=profile_memory,
with_stack=with_stack,
with_flops=with_flops,
with_modules=with_modules,
) as prof:
func.profiler = prepare_profiler_for_brain(prof)
hook_brain_methods(func=func, class_hooks=class_hooks, prof=prof)
return func # no need to return anything; all done in-place; but if needs to be readily assigned elsewhere
else:
# callable(func) - polymorph: __init__ Brain constructor -or- function to be wrapped
@wraps(func)
def wrapper(*args, **kwargs):
"""Implementation of the wrapper."""
# Binding variables.
nonlocal class_hooks
nonlocal schedule
nonlocal on_trace_ready
# Check if there's a nested decorators.
if schedule is None:
if "schedule" in kwargs:
schedule = kwargs.pop("schedule")
if on_trace_ready is None:
if "on_trace_ready" in kwargs:
on_trace_ready = kwargs.pop("on_trace_ready")
with profiler.profile(
activities=activities,
schedule=schedule,
on_trace_ready=on_trace_ready,
record_shapes=record_shapes,
profile_memory=profile_memory,
with_stack=with_stack,
with_flops=with_flops,
with_modules=with_modules,
) as prof:
# Preserves profiler as class attribute if func is not a function (implies: speechbrain.core.Brain).
if "__call__" not in dir(func):
# Passing the profiler to Bain:s' __init__ constructor as an additional argument.
kwargs["profiler"] = prepare_profiler_for_brain(prof)
hook_brain_methods(
func=func, class_hooks=class_hooks, prof=prof
)
# Run & trace to benchmark.
result = func(*args, **kwargs)
# Prof is about to be lost at return.
if "__call__" in dir(func):
if result is None:
return prof # for void function, simply return profiling data
else: # not tested - returns both
return result, prof
return result
return wrapper
def profile_analyst(
func: Optional[object] = None, class_hooks: Optional[Iterable[str]] = None,
): # to diverge, define parameters from scratch: @schedule; @export & @profile
"""Pre-configured profiling for a fully detailed benchmark - analyst perspective.
Creating this analyst view will create overheads (disabling some PyTorch optimisations);
use @profile_optimiser to take benefits of optimisations and further optimise your modules, accordingly.
"""
profiler_kwargs = {
"schedule": schedule(),
"on_trace_ready": None,
"record_shapes": True,
"profile_memory": True,
"with_stack": True,
"with_flops": True, # only for: matrix multiplication & 2D conv; see: torch.autograd.profiler.profile
"with_modules": True,
"class_hooks": class_hooks,
}
wrapped_func = profile(func, **profiler_kwargs)
# Polymorph: func is pretrained or an instance of Brain (assumed case)
if hasattr(func, "HPARAMS_NEEDED") or not callable(func):
return wrapped_func
else: # callable(func) - polymorph: __init__ Brain constructor -or- function to be wrapped
@wraps(func)
def wrapper(*args, **kwargs):
"""Implementation of the wrapper."""
return wrapped_func(*args, **kwargs)
return wrapper
def profile_optimiser(
func: Optional[object] = None, class_hooks: Optional[Iterable[str]] = None,
): # to diverge, define parameters from scratch: @schedule; @export & @profile
"""Pre-configured profiling for a detailed benchmark (better suitable for speed-optimisation than @profile_analyst).
"""
profiler_kwargs = {
"schedule": schedule(),
"on_trace_ready": None,
"record_shapes": False, # avoid: overheads
"profile_memory": True,
"with_stack": False, # avoid: overheads
"with_flops": False, # only for: matrix multiplication & 2D conv; see: torch.autograd.profiler.profile
"with_modules": True,
"class_hooks": class_hooks,
}
wrapped_func = profile(func, **profiler_kwargs)
# Polymorph: func is pretrained or an instance of Brain (assumed case)
if hasattr(func, "HPARAMS_NEEDED") or not callable(func):
return wrapped_func
else: # callable(func) - polymorph: __init__ Brain constructor -or- function to be wrapped
@wraps(func)
def wrapper(*args, **kwargs):
"""Implementation of the wrapper."""
return wrapped_func(*args, **kwargs)
return wrapper
def profile_report( # not part of unittests
func: Optional[object] = None, class_hooks: Optional[Iterable[str]] = None,
):
"""Pre-configured profiling for a reporting benchmark (changed scheduler to @profile_optimiser).
"""
profiler_kwargs = {
"schedule": schedule(
wait=1, warmup=2, active=7, repeat=1, skip_first=0,
), # gives #active, avg:ed of #repeat
"on_trace_ready": None,
"record_shapes": False, # avoid: overheads
"profile_memory": True,
"with_stack": False, # avoid: overheads
"with_flops": False, # only for: matrix multiplication & 2D conv; see: torch.autograd.profiler.profile
"with_modules": True,
"class_hooks": class_hooks,
}
wrapped_func = profile(func, **profiler_kwargs)
# Polymorph: func is pretrained or an instance of Brain (assumed case)
if hasattr(func, "HPARAMS_NEEDED") or not callable(func):
return wrapped_func
else: # callable(func) - polymorph: __init__ Brain constructor -or- function to be wrapped
@wraps(func)
def wrapper(*args, **kwargs):
"""Implementation of the wrapper."""
return wrapped_func(*args, **kwargs)
return wrapper
"""
def events_diff(
a: EventList, b: EventList, filter_by: str = "count",
):
" ""Takes two ``EventList``:s in, filters events of equal value (default: by the count of events).
The purpose of the results of this diff are for visualisation only (to see the difference between implementations).
" ""
# Making copies from the originals instead of simply adding the diff directly might be slower (preserves structure).
aa = deepcopy(a)
bb = deepcopy(b)
# Maps: function name -> (call count, position) // the position helps to remove alike call numbers later on.
a_filter = dict(
[(i.key, (getattr(i, filter_by), p)) for p, i in enumerate(aa)]
)
b_filter = dict(
[(i.key, (getattr(i, filter_by), p)) for p, i in enumerate(bb)]
)
# Figuring our which ones to delete.
a_to_remove = list([])
b_to_remove = list([])
for key in a_filter.keys():
if key in b_filter.keys():
# Equal values are filtered.
if a_filter[key][0] == b_filter[key][0]:
# Enlist position to be removed.
a_to_remove.append(a_filter[key][1])
b_to_remove.append(b_filter[key][1])
# Since EventLists are lists: removing items from the back.
if a_to_remove:
a_to_remove.sort(reverse=True)
for k in a_to_remove:
aa.remove(aa[k])
if b_to_remove:
b_to_remove.sort(reverse=True)
for k in b_to_remove:
bb.remove(bb[k])
return aa, bb
"""
def report_time(events: object, verbose=False, upper_control_limit=False):
"""Summary reporting of total time - see: torch.autograd.profiler_util
"""
# Aggregate CPU & CUDA time.
"""
if isinstance(events, FunctionEvent):
function_events = events
elif
"""
if isinstance(events, profiler.profile):
function_events = events.events()
elif hasattr(events, "profiler"): # assumes speechbrain.core.Brain
function_events = events.profiler.events()
else:
raise TypeError(
"Expected a FunctionEvent; profiler.profile, or a SpeechBrain."
)
if upper_control_limit:
# discerns top-level event (among others) aten:zeros which is in the avg range of 10-20ms on laptop CPU
cpu_data = np.array(
[e.cpu_time for e in function_events if e.key == "ProfilerStep*"]
)
cuda_data = np.array(
[e.cuda_time for e in function_events if e.key == "ProfilerStep*"]
)
cpu_time = cpu_data.mean() + 3 * cpu_data.std()
cuda_time = cuda_data.mean() + 3 * cuda_data.std()
else:
total = function_events.total_average()
cpu_time = total.self_cpu_time_total
cuda_time = total.self_cuda_time_total
"""
if verbose:
print("CPU time: {}".format(_format_time(cpu_time)))
if cuda_time > 0:
print("CUDA time: {}".format(_format_time(cuda_time)))
"""
return cpu_time, cuda_time
def report_memory(handler: object, verbose=False):
"""Summary reporting of total time - see: torch.autograd.profiler_util
"""
# Aggregate CPU & CUDA time.
"""
if isinstance(handler, FunctionEvent):
events = handler
elif
"""
if isinstance(handler, profiler.profile):
events = handler.events()
elif hasattr(handler, "profiler"): # assumes speechbrain.core.Brain
events = handler.profiler.events()
else:
raise TypeError(
"Expected a FunctionEvent; profiler.profile, or a SpeechBrain."
)
"""memory allocation during each time step is of relevance, e.g. for visualisation - time intensive for lots events
mem_times = np.unique(
[[x.time_range.start, x.time_range.end] for x in events]
)
cpu_memory = np.zeros_like(mem_times)
cuda_memory = np.zeros_like(mem_times)
for x in events:
idx = (x.time_range.start <= mem_times) & (
x.time_range.end >= mem_times
)
cpu_memory[idx] += x.cpu_memory_usage
cuda_memory[idx] += x.cuda_memory_usage
# variable names instead of labeling pandas' columns
cpu_mem = np.max(cpu_memory)
cuda_mem = np.max(cuda_memory)
"""
cpu_mem = cuda_mem = 0
for e in events:
if len(e.cpu_children) == 0:
leaf_cpu_mem = e.cpu_memory_usage
leaf_cuda_mem = e.cuda_memory_usage
parent = e.cpu_parent
while parent is not None:
leaf_cpu_mem += parent.cpu_memory_usage
leaf_cuda_mem += parent.cuda_memory_usage
parent = parent.cpu_parent
if leaf_cpu_mem > cpu_mem:
cpu_mem = leaf_cpu_mem
if leaf_cuda_mem > cuda_mem:
cuda_mem = leaf_cuda_mem
"""
if verbose:
print("Peak CPU Mem: {}".format(_format_memory(cpu_mem)))
if cuda_mem > 0:
print("Peak CUDA Mem: {}".format(_format_memory(cuda_mem)))
"""
return cpu_mem, cuda_mem
| 25,566 | 36.653903 | 120 | py |
speechbrain | speechbrain-main/speechbrain/utils/data_utils.py | """This library gathers utilities for data io operation.
Authors
* Mirco Ravanelli 2020
* Aku Rouhe 2020
* Samuele Cornell 2020
"""
import os
import re
import csv
import shutil
import urllib.request
import collections.abc
import torch
import tqdm
import pathlib
import speechbrain as sb
def undo_padding(batch, lengths):
"""Produces Python lists given a batch of sentences with
their corresponding relative lengths.
Arguments
---------
batch : tensor
Batch of sentences gathered in a batch.
lengths : tensor
Relative length of each sentence in the batch.
Example
-------
>>> batch=torch.rand([4,100])
>>> lengths=torch.tensor([0.5,0.6,0.7,1.0])
>>> snt_list=undo_padding(batch, lengths)
>>> len(snt_list)
4
"""
batch_max_len = batch.shape[1]
as_list = []
for seq, seq_length in zip(batch, lengths):
actual_size = int(torch.round(seq_length * batch_max_len))
seq_true = seq.narrow(0, 0, actual_size)
as_list.append(seq_true.tolist())
return as_list
def get_all_files(
dirName, match_and=None, match_or=None, exclude_and=None, exclude_or=None
):
"""Returns a list of files found within a folder.
Different options can be used to restrict the search to some specific
patterns.
Arguments
---------
dirName : str
The directory to search.
match_and : list
A list that contains patterns to match. The file is
returned if it matches all the entries in `match_and`.
match_or : list
A list that contains patterns to match. The file is
returned if it matches one or more of the entries in `match_or`.
exclude_and : list
A list that contains patterns to match. The file is
returned if it matches none of the entries in `exclude_and`.
exclude_or : list
A list that contains pattern to match. The file is
returned if it fails to match one of the entries in `exclude_or`.
Example
-------
>>> get_all_files('tests/samples/RIRs', match_and=['3.wav'])
['tests/samples/RIRs/rir3.wav']
"""
# Match/exclude variable initialization
match_and_entry = True
match_or_entry = True
exclude_or_entry = False
exclude_and_entry = False
# Create a list of file and sub directories
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + get_all_files(
fullPath,
match_and=match_and,
match_or=match_or,
exclude_and=exclude_and,
exclude_or=exclude_or,
)
else:
# Check match_and case
if match_and is not None:
match_and_entry = False
match_found = 0
for ele in match_and:
if ele in fullPath:
match_found = match_found + 1
if match_found == len(match_and):
match_and_entry = True
# Check match_or case
if match_or is not None:
match_or_entry = False
for ele in match_or:
if ele in fullPath:
match_or_entry = True
break
# Check exclude_and case
if exclude_and is not None:
match_found = 0
for ele in exclude_and:
if ele in fullPath:
match_found = match_found + 1
if match_found == len(exclude_and):
exclude_and_entry = True
# Check exclude_or case
if exclude_or is not None:
exclude_or_entry = False
for ele in exclude_or:
if ele in fullPath:
exclude_or_entry = True
break
# If needed, append the current file to the output list
if (
match_and_entry
and match_or_entry
and not (exclude_and_entry)
and not (exclude_or_entry)
):
allFiles.append(fullPath)
return allFiles
def get_list_from_csv(csvfile, field, delimiter=",", skipinitialspace=True):
"""Gets a list from the selected field of the input csv file.
Arguments
---------
csv_file: path
Path to the csv file.
field: str
Field of the csv file used to create the list.
delimiter: str
Delimiter of the csv file.
skipinitialspace: bool
Set it to true to skip initial spaces in the entries.
"""
lst = []
with open(csvfile, newline="") as csvf:
reader = csv.DictReader(
csvf, delimiter=delimiter, skipinitialspace=skipinitialspace
)
for row in reader:
lst.append(row[field])
return lst
def split_list(seq, num):
"""Returns a list of splits in the sequence.
Arguments
---------
seq : iterable
The input list, to be split.
num : int
The number of chunks to produce.
Example
-------
>>> split_list([1, 2, 3, 4, 5, 6, 7, 8, 9], 4)
[[1, 2], [3, 4], [5, 6], [7, 8, 9]]
"""
# Average length of the chunk
avg = len(seq) / float(num)
out = []
last = 0.0
# Creating the chunks
while last < len(seq):
out.append(seq[int(last) : int(last + avg)])
last += avg
return out
def recursive_items(dictionary):
"""Yield each (key, value) of a nested dictionary.
Arguments
---------
dictionary : dict
The nested dictionary to list.
Yields
------
`(key, value)` tuples from the dictionary.
Example
-------
>>> rec_dict={'lev1': {'lev2': {'lev3': 'current_val'}}}
>>> [item for item in recursive_items(rec_dict)]
[('lev3', 'current_val')]
"""
for key, value in dictionary.items():
if type(value) is dict:
yield from recursive_items(value)
else:
yield (key, value)
def recursive_update(d, u, must_match=False):
"""Similar function to `dict.update`, but for a nested `dict`.
From: https://stackoverflow.com/a/3233356
If you have to a nested mapping structure, for example:
{"a": 1, "b": {"c": 2}}
Say you want to update the above structure with:
{"b": {"d": 3}}
This function will produce:
{"a": 1, "b": {"c": 2, "d": 3}}
Instead of:
{"a": 1, "b": {"d": 3}}
Arguments
---------
d : dict
Mapping to be updated.
u : dict
Mapping to update with.
must_match : bool
Whether to throw an error if the key in `u` does not exist in `d`.
Example
-------
>>> d = {'a': 1, 'b': {'c': 2}}
>>> recursive_update(d, {'b': {'d': 3}})
>>> d
{'a': 1, 'b': {'c': 2, 'd': 3}}
"""
# TODO: Consider cases where u has branch off k, but d does not.
# e.g. d = {"a":1}, u = {"a": {"b": 2 }}
for k, v in u.items():
if isinstance(v, collections.abc.Mapping) and k in d:
recursive_update(d.get(k, {}), v)
elif must_match and k not in d:
raise KeyError(
f"Override '{k}' not found in: {[key for key in d.keys()]}"
)
else:
d[k] = v
def download_file(
source, dest, unpack=False, dest_unpack=None, replace_existing=False
):
"""Downloads the file from the given source and saves it in the given
destination path.
Arguments
---------
source : path or url
Path of the source file. If the source is an URL, it downloads it from
the web.
dest : path
Destination path.
unpack : bool
If True, it unpacks the data in the dest folder.
replace_existing : bool
If True, replaces the existing files.
"""
try:
# make sure all processing reached here before main preocess create dest_dir
sb.utils.distributed.ddp_barrier()
if sb.utils.distributed.if_main_process():
class DownloadProgressBar(tqdm.tqdm):
""" DownloadProgressBar class."""
def update_to(self, b=1, bsize=1, tsize=None):
"""Needed to support multigpu training."""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
# Create the destination directory if it doesn't exist
dest_dir = pathlib.Path(dest).resolve().parent
dest_dir.mkdir(parents=True, exist_ok=True)
if "http" not in source:
shutil.copyfile(source, dest)
elif not os.path.isfile(dest) or (
os.path.isfile(dest) and replace_existing
):
print(f"Downloading {source} to {dest}")
with DownloadProgressBar(
unit="B",
unit_scale=True,
miniters=1,
desc=source.split("/")[-1],
) as t:
urllib.request.urlretrieve(
source, filename=dest, reporthook=t.update_to
)
else:
print(f"{dest} exists. Skipping download")
# Unpack if necessary
if unpack:
if dest_unpack is None:
dest_unpack = os.path.dirname(dest)
print(f"Extracting {dest} to {dest_unpack}")
shutil.unpack_archive(dest, dest_unpack)
finally:
sb.utils.distributed.ddp_barrier()
def pad_right_to(
tensor: torch.Tensor, target_shape: (list, tuple), mode="constant", value=0,
):
"""
This function takes a torch tensor of arbitrary shape and pads it to target
shape by appending values on the right.
Parameters
----------
tensor : input torch tensor
Input tensor whose dimension we need to pad.
target_shape : (list, tuple)
Target shape we want for the target tensor its len must be equal to tensor.ndim
mode : str
Pad mode, please refer to torch.nn.functional.pad documentation.
value : float
Pad value, please refer to torch.nn.functional.pad documentation.
Returns
-------
tensor : torch.Tensor
Padded tensor.
valid_vals : list
List containing proportion for each dimension of original, non-padded values.
"""
assert len(target_shape) == tensor.ndim
pads = [] # this contains the abs length of the padding for each dimension.
valid_vals = [] # this contains the relative lengths for each dimension.
i = len(target_shape) - 1 # iterating over target_shape ndims
j = 0
while i >= 0:
assert (
target_shape[i] >= tensor.shape[i]
), "Target shape must be >= original shape for every dim"
pads.extend([0, target_shape[i] - tensor.shape[i]])
valid_vals.append(tensor.shape[j] / target_shape[j])
i -= 1
j += 1
tensor = torch.nn.functional.pad(tensor, pads, mode=mode, value=value)
return tensor, valid_vals
def batch_pad_right(tensors: list, mode="constant", value=0):
"""Given a list of torch tensors it batches them together by padding to the right
on each dimension in order to get same length for all.
Parameters
----------
tensors : list
List of tensor we wish to pad together.
mode : str
Padding mode see torch.nn.functional.pad documentation.
value : float
Padding value see torch.nn.functional.pad documentation.
Returns
-------
tensor : torch.Tensor
Padded tensor.
valid_vals : list
List containing proportion for each dimension of original, non-padded values.
"""
if not len(tensors):
raise IndexError("Tensors list must not be empty")
if len(tensors) == 1:
# if there is only one tensor in the batch we simply unsqueeze it.
return tensors[0].unsqueeze(0), torch.tensor([1.0])
if not (
all(
[tensors[i].ndim == tensors[0].ndim for i in range(1, len(tensors))]
)
):
raise IndexError("All tensors must have same number of dimensions")
# FIXME we limit the support here: we allow padding of only the first dimension
# need to remove this when feat extraction is updated to handle multichannel.
max_shape = []
for dim in range(tensors[0].ndim):
if dim != 0:
if not all(
[x.shape[dim] == tensors[0].shape[dim] for x in tensors[1:]]
):
raise EnvironmentError(
"Tensors should have same dimensions except for the first one"
)
max_shape.append(max([x.shape[dim] for x in tensors]))
batched = []
valid = []
for t in tensors:
# for each tensor we apply pad_right_to
padded, valid_percent = pad_right_to(
t, max_shape, mode=mode, value=value
)
batched.append(padded)
valid.append(valid_percent[0])
batched = torch.stack(batched)
return batched, torch.tensor(valid)
def split_by_whitespace(text):
"""A very basic functional version of str.split"""
return text.split()
def recursive_to(data, *args, **kwargs):
"""Moves data to device, or other type, and handles containers.
Very similar to torch.utils.data._utils.pin_memory.pin_memory,
but applies .to() instead.
"""
if isinstance(data, torch.Tensor):
return data.to(*args, **kwargs)
elif isinstance(data, collections.abc.Mapping):
return {
k: recursive_to(sample, *args, **kwargs)
for k, sample in data.items()
}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return type(data)(
*(recursive_to(sample, *args, **kwargs) for sample in data)
)
elif isinstance(data, collections.abc.Sequence):
return [recursive_to(sample, *args, **kwargs) for sample in data]
elif hasattr(data, "to"):
return data.to(*args, **kwargs)
# What should be done with unknown data?
# For now, just return as they are
else:
return data
np_str_obj_array_pattern = re.compile(r"[SaUO]")
def mod_default_collate(batch):
"""Makes a tensor from list of batch values.
Note that this doesn't need to zip(*) values together
as PaddedBatch connects them already (by key).
Here the idea is not to error out.
This is modified from:
https://github.com/pytorch/pytorch/blob/c0deb231db76dbea8a9d326401417f7d1ce96ed5/torch/utils/data/_utils/collate.py#L42
"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
try:
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
except RuntimeError: # Unequal size:
return batch
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
):
try:
if (
elem_type.__name__ == "ndarray"
or elem_type.__name__ == "memmap"
):
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
return batch
return mod_default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
except RuntimeError: # Unequal size
return batch
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
else:
return batch
def split_path(path):
"""Splits a path to source and filename
This also handles URLs and Huggingface hub paths, in addition to
regular paths.
Arguments
---------
path : str
Returns
-------
str
Source
str
Filename
"""
if "/" in path:
return path.rsplit("/", maxsplit=1)
else:
# Interpret as path to file in current directory.
return "./", path
def scalarize(value):
"""Converts a namedtuple or dictionary containing tensors
to their scalar value
Arguments:
----------
value: dict or namedtuple
a dictionary or named tuple of tensors
Returns
-------
result: dict
a result dictionary
"""
if hasattr(value, "_asdict"):
value_dict = value._asdict()
else:
value_dict = value
return {key: item_value.item() for key, item_value in value_dict.items()}
| 17,403 | 28.90378 | 123 | py |
speechbrain | speechbrain-main/speechbrain/utils/logger.py | """Managing the logger, utilities
Author
* Fang-Pen Lin 2012 https://fangpenlin.com/posts/2012/08/26/good-logging-practice-in-python/
* Peter Plantinga 2020
* Aku Rouhe 2020
"""
import sys
import os
import yaml
import tqdm
import logging
import logging.config
import math
import torch
from speechbrain.utils.data_utils import recursive_update
from speechbrain.utils.superpowers import run_shell
ORDERS_ABBREV = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "µ",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y",
}
# Short scale
# Negative powers of ten in lowercase, positive in uppercase
ORDERS_WORDS = {
-24: "septillionths",
-21: "sextillionths",
-18: "quintillionths",
-15: "quadrillionths",
-12: "trillionths",
-9: "billionths",
-6: "millionths",
-3: "thousandths",
0: "",
3: "Thousand",
6: "Million",
9: "Billion",
12: "Trillion",
15: "Quadrillion",
18: "Quintillion",
21: "Sextillion",
24: "Septillion",
}
class TqdmCompatibleStreamHandler(logging.StreamHandler):
"""TQDM compatible StreamHandler.
Writes and prints should be passed through tqdm.tqdm.write
so that the tqdm progressbar doesn't get messed up.
"""
def emit(self, record):
"""TQDM compatible StreamHandler."""
try:
msg = self.format(record)
stream = self.stream
tqdm.tqdm.write(msg, end=self.terminator, file=stream)
self.flush()
except RecursionError:
raise
except Exception:
self.handleError(record)
def setup_logging(
config_path="log-config.yaml", overrides={}, default_level=logging.INFO,
):
"""Setup logging configuration.
Arguments
---------
config_path : str
The path to a logging config file.
default_level : int
The level to use if the config file is not found.
overrides : dict
A dictionary of the same structure as the config dict
with any updated values that need to be applied.
"""
if os.path.exists(config_path):
with open(config_path, "rt") as f:
config = yaml.safe_load(f)
recursive_update(config, overrides)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def format_order_of_magnitude(number, abbreviate=True):
"""Formats number to the appropriate order of magnitude for printing.
Arguments
---------
number : int, float
The number to format.
abbreviate : bool
Whether to use abbreviations (k,M,G) or words (Thousand, Million,
Billion). Numbers will be either like: "123.5k" or "123.5 Thousand".
Returns
-------
str
The formatted number. Note that the order of magnitude token is part
of the string.
Example
-------
>>> print(format_order_of_magnitude(123456))
123.5k
>>> print(format_order_of_magnitude(0.00000123, abbreviate=False))
1.2 millionths
>>> print(format_order_of_magnitude(5, abbreviate=False))
5
"""
style = ORDERS_ABBREV if abbreviate else ORDERS_WORDS
precision = "{num:3.1f}"
order = 3 * math.floor(math.log(math.fabs(number), 1000))
# Fallback for very large numbers:
while order not in style and order != 0:
order = order - math.copysign(3, order) # Bring 3 units towards 0
order_token = style[order]
if order != 0:
formatted_number = precision.format(num=number / 10 ** order)
else:
if isinstance(number, int):
formatted_number = str(number)
else:
formatted_number = precision.format(num=number)
if abbreviate or not order_token:
return formatted_number + order_token
else:
return formatted_number + " " + order_token
def get_environment_description():
"""Returns a string describing the current Python / SpeechBrain environment.
Useful for making experiments as replicable as possible.
Returns
-------
str
The string is formatted ready to be written to a file.
Example
-------
>>> get_environment_description().splitlines()[0]
'SpeechBrain system description'
"""
python_version_str = "Python version:\n" + sys.version + "\n"
try:
freezed, _, _ = run_shell("pip freeze")
python_packages_str = "Installed Python packages:\n"
python_packages_str += freezed.decode(errors="replace")
except OSError:
python_packages_str = "Could not list python packages with pip freeze"
try:
git_hash, _, _ = run_shell("git rev-parse --short HEAD")
git_str = "Git revision:\n" + git_hash.decode(errors="replace")
except OSError:
git_str = "Could not get git revision"
if torch.cuda.is_available():
if torch.version.cuda is None:
cuda_str = "ROCm version:\n" + torch.version.hip
else:
cuda_str = "CUDA version:\n" + torch.version.cuda
else:
cuda_str = "CUDA not available"
result = "SpeechBrain system description\n"
result += "==============================\n"
result += python_version_str
result += "==============================\n"
result += python_packages_str
result += "==============================\n"
result += git_str
result += "==============================\n"
result += cuda_str
return result
| 5,525 | 27.050761 | 93 | py |
speechbrain | speechbrain-main/speechbrain/utils/_workarounds.py | """This module implements some workarounds for dependencies
Authors
* Aku Rouhe 2022
"""
import torch
import weakref
import warnings
WEAKREF_MARKER = "WEAKREF"
def _cycliclrsaver(obj, path):
state_dict = obj.state_dict()
if state_dict.get("_scale_fn_ref") is not None:
state_dict["_scale_fn_ref"] = WEAKREF_MARKER
torch.save(state_dict, path)
def _cycliclrloader(obj, path, end_of_epoch, device=None):
del end_of_epoch # Unused
state_dict = torch.load(path, map_location=device)
if state_dict.get("_scale_fn_ref") == WEAKREF_MARKER:
if not isinstance(obj._scale_fn_ref, weakref.WeakMethod):
MSG = "Loading CyclicLR scheduler and the _scale_ref_fn did not exist in instance."
MSG += " You did not construct it with the same parameters it was created!"
MSG += " Looks like you changed the scale function!"
MSG += " If this was not intentional, the scheduler might not work correctly."
warnings.warn(MSG)
try:
obj.load_state_dict(torch.load(path, map_location=device), strict=True)
except TypeError:
obj.load_state_dict(torch.load(path, map_location=device))
| 1,188 | 33.970588 | 95 | py |
speechbrain | speechbrain-main/speechbrain/utils/metric_stats.py | """The ``metric_stats`` module provides an abstract class for storing
statistics produced over the course of an experiment and summarizing them.
Authors:
* Peter Plantinga 2020
* Mirco Ravanelli 2020
* Gaelle Laperriere 2021
* Sahar Ghannay 2021
"""
import torch
from joblib import Parallel, delayed
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.edit_distance import wer_summary, wer_details_for_batch
from speechbrain.dataio.dataio import (
merge_char,
split_word,
extract_concepts_values,
)
from speechbrain.dataio.wer import print_wer_summary, print_alignments
class MetricStats:
"""A default class for storing and summarizing arbitrary metrics.
More complex metrics can be created by sub-classing this class.
Arguments
---------
metric : function
The function to use to compute the relevant metric. Should take
at least two arguments (predictions and targets) and can
optionally take the relative lengths of either or both arguments.
Not usually used in sub-classes.
batch_eval: bool
When True it feeds the evaluation metric with the batched input.
When False and n_jobs=1, it performs metric evaluation one-by-one
in a sequential way. When False and n_jobs>1, the evaluation
runs in parallel over the different inputs using joblib.
n_jobs : int
The number of jobs to use for computing the metric. If this is
more than one, every sample is processed individually, otherwise
the whole batch is passed at once.
Example
-------
>>> from speechbrain.nnet.losses import l1_loss
>>> loss_stats = MetricStats(metric=l1_loss)
>>> loss_stats.append(
... ids=["utterance1", "utterance2"],
... predictions=torch.tensor([[0.1, 0.2], [0.2, 0.3]]),
... targets=torch.tensor([[0.1, 0.2], [0.1, 0.2]]),
... reduction="batch",
... )
>>> stats = loss_stats.summarize()
>>> stats['average']
0.050...
>>> stats['max_score']
0.100...
>>> stats['max_id']
'utterance2'
"""
def __init__(self, metric, n_jobs=1, batch_eval=True):
self.metric = metric
self.n_jobs = n_jobs
self.batch_eval = batch_eval
self.clear()
def clear(self):
"""Creates empty container for storage, removing existing stats."""
self.scores = []
self.ids = []
self.summary = {}
def append(self, ids, *args, **kwargs):
"""Store a particular set of metric scores.
Arguments
---------
ids : list
List of ids corresponding to utterances.
*args, **kwargs
Arguments to pass to the metric function.
"""
self.ids.extend(ids)
# Batch evaluation
if self.batch_eval:
scores = self.metric(*args, **kwargs).detach()
else:
if "predict" not in kwargs or "target" not in kwargs:
raise ValueError(
"Must pass 'predict' and 'target' as kwargs if batch_eval=False"
)
if self.n_jobs == 1:
# Sequence evaluation (loop over inputs)
scores = sequence_evaluation(metric=self.metric, **kwargs)
else:
# Multiprocess evaluation
scores = multiprocess_evaluation(
metric=self.metric, n_jobs=self.n_jobs, **kwargs
)
self.scores.extend(scores)
def summarize(self, field=None):
"""Summarize the metric scores, returning relevant stats.
Arguments
---------
field : str
If provided, only returns selected statistic. If not,
returns all computed statistics.
Returns
-------
float or dict
Returns a float if ``field`` is provided, otherwise
returns a dictionary containing all computed stats.
"""
min_index = torch.argmin(torch.tensor(self.scores))
max_index = torch.argmax(torch.tensor(self.scores))
self.summary = {
"average": float(sum(self.scores) / len(self.scores)),
"min_score": float(self.scores[min_index]),
"min_id": self.ids[min_index],
"max_score": float(self.scores[max_index]),
"max_id": self.ids[max_index],
}
if field is not None:
return self.summary[field]
else:
return self.summary
def write_stats(self, filestream, verbose=False):
"""Write all relevant statistics to file.
Arguments
---------
filestream : file-like object
A stream for the stats to be written to.
verbose : bool
Whether to also print the stats to stdout.
"""
if not self.summary:
self.summarize()
message = f"Average score: {self.summary['average']}\n"
message += f"Min error: {self.summary['min_score']} "
message += f"id: {self.summary['min_id']}\n"
message += f"Max error: {self.summary['max_score']} "
message += f"id: {self.summary['max_id']}\n"
filestream.write(message)
if verbose:
print(message)
def multiprocess_evaluation(metric, predict, target, lengths=None, n_jobs=8):
"""Runs metric evaluation if parallel over multiple jobs."""
if lengths is not None:
lengths = (lengths * predict.size(1)).round().int().cpu()
predict = [p[:length].cpu() for p, length in zip(predict, lengths)]
target = [t[:length].cpu() for t, length in zip(target, lengths)]
while True:
try:
scores = Parallel(n_jobs=n_jobs, timeout=30)(
delayed(metric)(p, t) for p, t in zip(predict, target)
)
break
except Exception as e:
print(e)
print("Evaluation timeout...... (will try again)")
return scores
def sequence_evaluation(metric, predict, target, lengths=None):
"""Runs metric evaluation sequentially over the inputs."""
if lengths is not None:
lengths = (lengths * predict.size(1)).round().int().cpu()
predict = [p[:length].cpu() for p, length in zip(predict, lengths)]
target = [t[:length].cpu() for t, length in zip(target, lengths)]
scores = []
for p, t in zip(predict, target):
score = metric(p, t)
scores.append(score)
return scores
class ErrorRateStats(MetricStats):
"""A class for tracking error rates (e.g., WER, PER).
Arguments
---------
merge_tokens : bool
Whether to merge the successive tokens (used for e.g.,
creating words out of character tokens).
See ``speechbrain.dataio.dataio.merge_char``.
split_tokens : bool
Whether to split tokens (used for e.g. creating
characters out of word tokens).
See ``speechbrain.dataio.dataio.split_word``.
space_token : str
The character to use for boundaries. Used with ``merge_tokens``
this represents character to split on after merge.
Used with ``split_tokens`` the sequence is joined with
this token in between, and then the whole sequence is split.
keep_values : bool
Whether to keep the values of the concepts or not.
extract_concepts_values : bool
Process the predict and target to keep only concepts and values.
tag_in : str
Start of the concept ('<' for exemple).
tag_out : str
End of the concept ('>' for exemple).
Example
-------
>>> cer_stats = ErrorRateStats()
>>> i2l = {0: 'a', 1: 'b'}
>>> cer_stats.append(
... ids=['utterance1'],
... predict=torch.tensor([[0, 1, 1]]),
... target=torch.tensor([[0, 1, 0]]),
... target_len=torch.ones(1),
... ind2lab=lambda batch: [[i2l[int(x)] for x in seq] for seq in batch],
... )
>>> stats = cer_stats.summarize()
>>> stats['WER']
33.33...
>>> stats['insertions']
0
>>> stats['deletions']
0
>>> stats['substitutions']
1
"""
def __init__(
self,
merge_tokens=False,
split_tokens=False,
space_token="_",
keep_values=True,
extract_concepts_values=False,
tag_in="",
tag_out="",
):
self.clear()
self.merge_tokens = merge_tokens
self.split_tokens = split_tokens
self.space_token = space_token
self.extract_concepts_values = extract_concepts_values
self.keep_values = keep_values
self.tag_in = tag_in
self.tag_out = tag_out
def append(
self,
ids,
predict,
target,
predict_len=None,
target_len=None,
ind2lab=None,
):
"""Add stats to the relevant containers.
* See MetricStats.append()
Arguments
---------
ids : list
List of ids corresponding to utterances.
predict : torch.tensor
A predicted output, for comparison with the target output
target : torch.tensor
The correct reference output, for comparison with the prediction.
predict_len : torch.tensor
The predictions relative lengths, used to undo padding if
there is padding present in the predictions.
target_len : torch.tensor
The target outputs' relative lengths, used to undo padding if
there is padding present in the target.
ind2lab : callable
Callable that maps from indices to labels, operating on batches,
for writing alignments.
"""
self.ids.extend(ids)
if predict_len is not None:
predict = undo_padding(predict, predict_len)
if target_len is not None:
target = undo_padding(target, target_len)
if ind2lab is not None:
predict = ind2lab(predict)
target = ind2lab(target)
if self.merge_tokens:
predict = merge_char(predict, space=self.space_token)
target = merge_char(target, space=self.space_token)
if self.split_tokens:
predict = split_word(predict, space=self.space_token)
target = split_word(target, space=self.space_token)
if self.extract_concepts_values:
predict = extract_concepts_values(
predict,
self.keep_values,
self.tag_in,
self.tag_out,
space=self.space_token,
)
target = extract_concepts_values(
target,
self.keep_values,
self.tag_in,
self.tag_out,
space=self.space_token,
)
scores = wer_details_for_batch(ids, target, predict, True)
self.scores.extend(scores)
def summarize(self, field=None):
"""Summarize the error_rate and return relevant statistics.
* See MetricStats.summarize()
"""
self.summary = wer_summary(self.scores)
# Add additional, more generic key
self.summary["error_rate"] = self.summary["WER"]
if field is not None:
return self.summary[field]
else:
return self.summary
def write_stats(self, filestream):
"""Write all relevant info (e.g., error rate alignments) to file.
* See MetricStats.write_stats()
"""
if not self.summary:
self.summarize()
print_wer_summary(self.summary, filestream)
print_alignments(self.scores, filestream)
class BinaryMetricStats(MetricStats):
"""Tracks binary metrics, such as precision, recall, F1, EER, etc.
"""
def __init__(self, positive_label=1):
self.clear()
self.positive_label = positive_label
def clear(self):
"""Clears the stored metrics."""
self.ids = []
self.scores = []
self.labels = []
self.summary = {}
def append(self, ids, scores, labels):
"""Appends scores and labels to internal lists.
Does not compute metrics until time of summary, since
automatic thresholds (e.g., EER) need full set of scores.
Arguments
---------
ids : list
The string ids for the samples
"""
self.ids.extend(ids)
self.scores.extend(scores.detach())
self.labels.extend(labels.detach())
def summarize(
self, field=None, threshold=None, max_samples=None, beta=1, eps=1e-8
):
"""Compute statistics using a full set of scores.
Full set of fields:
- TP - True Positive
- TN - True Negative
- FP - False Positive
- FN - False Negative
- FAR - False Acceptance Rate
- FRR - False Rejection Rate
- DER - Detection Error Rate (EER if no threshold passed)
- threshold - threshold (EER threshold if no threshold passed)
- precision - Precision (positive predictive value)
- recall - Recall (sensitivity)
- F-score - Balance of precision and recall (equal if beta=1)
- MCC - Matthews Correlation Coefficient
Arguments
---------
field : str
A key for selecting a single statistic. If not provided,
a dict with all statistics is returned.
threshold : float
If no threshold is provided, equal error rate is used.
max_samples: float
How many samples to keep for postive/negative scores.
If no max_samples is provided, all scores are kept.
Only effective when threshold is None.
beta : float
How much to weight precision vs recall in F-score. Default
of 1. is equal weight, while higher values weight recall
higher, and lower values weight precision higher.
eps : float
A small value to avoid dividing by zero.
"""
if isinstance(self.scores, list):
self.scores = torch.stack(self.scores)
self.labels = torch.stack(self.labels)
if threshold is None:
positive_scores = self.scores[
(self.labels == self.positive_label).nonzero(as_tuple=True)
]
negative_scores = self.scores[
(self.labels != self.positive_label).nonzero(as_tuple=True)
]
if max_samples is not None:
if len(positive_scores) > max_samples:
positive_scores, _ = torch.sort(positive_scores)
positive_scores = positive_scores[
[
i
for i in range(
0,
len(positive_scores),
int(len(positive_scores) / max_samples),
)
]
]
if len(negative_scores) > max_samples:
negative_scores, _ = torch.sort(negative_scores)
negative_scores = negative_scores[
[
i
for i in range(
0,
len(negative_scores),
int(len(negative_scores) / max_samples),
)
]
]
eer, threshold = EER(positive_scores, negative_scores)
pred = (self.scores > threshold).float()
true = self.labels
TP = self.summary["TP"] = float(pred.mul(true).sum())
TN = self.summary["TN"] = float((1.0 - pred).mul(1.0 - true).sum())
FP = self.summary["FP"] = float(pred.mul(1.0 - true).sum())
FN = self.summary["FN"] = float((1.0 - pred).mul(true).sum())
self.summary["FAR"] = FP / (FP + TN + eps)
self.summary["FRR"] = FN / (TP + FN + eps)
self.summary["DER"] = (FP + FN) / (TP + TN + eps)
self.summary["threshold"] = threshold
self.summary["precision"] = TP / (TP + FP + eps)
self.summary["recall"] = TP / (TP + FN + eps)
self.summary["F-score"] = (
(1.0 + beta ** 2.0)
* TP
/ ((1.0 + beta ** 2.0) * TP + beta ** 2.0 * FN + FP)
)
self.summary["MCC"] = (TP * TN - FP * FN) / (
(TP + FP) * (TP + FN) * (TN + FP) * (TN + FN) + eps
) ** 0.5
if field is not None:
return self.summary[field]
else:
return self.summary
def EER(positive_scores, negative_scores):
"""Computes the EER (and its threshold).
Arguments
---------
positive_scores : torch.tensor
The scores from entries of the same class.
negative_scores : torch.tensor
The scores from entries of different classes.
Example
-------
>>> positive_scores = torch.tensor([0.6, 0.7, 0.8, 0.5])
>>> negative_scores = torch.tensor([0.4, 0.3, 0.2, 0.1])
>>> val_eer, threshold = EER(positive_scores, negative_scores)
>>> val_eer
0.0
"""
# Computing candidate thresholds
thresholds, _ = torch.sort(torch.cat([positive_scores, negative_scores]))
thresholds = torch.unique(thresholds)
# Adding intermediate thresholds
interm_thresholds = (thresholds[0:-1] + thresholds[1:]) / 2
thresholds, _ = torch.sort(torch.cat([thresholds, interm_thresholds]))
# Variable to store the min FRR, min FAR and their corresponding index
min_index = 0
final_FRR = 0
final_FAR = 0
for i, cur_thresh in enumerate(thresholds):
pos_scores_threshold = positive_scores <= cur_thresh
FRR = (pos_scores_threshold.sum(0)).float() / positive_scores.shape[0]
del pos_scores_threshold
neg_scores_threshold = negative_scores > cur_thresh
FAR = (neg_scores_threshold.sum(0)).float() / negative_scores.shape[0]
del neg_scores_threshold
# Finding the threshold for EER
if (FAR - FRR).abs().item() < abs(final_FAR - final_FRR) or i == 0:
min_index = i
final_FRR = FRR.item()
final_FAR = FAR.item()
# It is possible that eer != fpr != fnr. We return (FAR + FRR) / 2 as EER.
EER = (final_FAR + final_FRR) / 2
return float(EER), float(thresholds[min_index])
def minDCF(
positive_scores, negative_scores, c_miss=1.0, c_fa=1.0, p_target=0.01
):
"""Computes the minDCF metric normally used to evaluate speaker verification
systems. The min_DCF is the minimum of the following C_det function computed
within the defined threshold range:
C_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 -p_target)
where p_miss is the missing probability and p_fa is the probability of having
a false alarm.
Arguments
---------
positive_scores : torch.tensor
The scores from entries of the same class.
negative_scores : torch.tensor
The scores from entries of different classes.
c_miss : float
Cost assigned to a missing error (default 1.0).
c_fa : float
Cost assigned to a false alarm (default 1.0).
p_target: float
Prior probability of having a target (default 0.01).
Example
-------
>>> positive_scores = torch.tensor([0.6, 0.7, 0.8, 0.5])
>>> negative_scores = torch.tensor([0.4, 0.3, 0.2, 0.1])
>>> val_minDCF, threshold = minDCF(positive_scores, negative_scores)
>>> val_minDCF
0.0
"""
# Computing candidate thresholds
thresholds, _ = torch.sort(torch.cat([positive_scores, negative_scores]))
thresholds = torch.unique(thresholds)
# Adding intermediate thresholds
interm_thresholds = (thresholds[0:-1] + thresholds[1:]) / 2
thresholds, _ = torch.sort(torch.cat([thresholds, interm_thresholds]))
# Computing False Rejection Rate (miss detection)
positive_scores = torch.cat(
len(thresholds) * [positive_scores.unsqueeze(0)]
)
pos_scores_threshold = positive_scores.transpose(0, 1) <= thresholds
p_miss = (pos_scores_threshold.sum(0)).float() / positive_scores.shape[1]
del positive_scores
del pos_scores_threshold
# Computing False Acceptance Rate (false alarm)
negative_scores = torch.cat(
len(thresholds) * [negative_scores.unsqueeze(0)]
)
neg_scores_threshold = negative_scores.transpose(0, 1) > thresholds
p_fa = (neg_scores_threshold.sum(0)).float() / negative_scores.shape[1]
del negative_scores
del neg_scores_threshold
c_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 - p_target)
c_min, min_index = torch.min(c_det, dim=0)
return float(c_min), float(thresholds[min_index])
class ClassificationStats(MetricStats):
"""Computes statistics pertaining to multi-label
classification tasks, as well as tasks that can be loosely interpreted as such for the purpose of
evaluations
Example
-------
>>> import sys
>>> from speechbrain.utils.metric_stats import ClassificationStats
>>> cs = ClassificationStats()
>>> cs.append(
... ids=["ITEM1", "ITEM2", "ITEM3", "ITEM4"],
... predictions=[
... "M EY K AH",
... "T EY K",
... "B AE D",
... "M EY K",
... ],
... targets=[
... "M EY K",
... "T EY K",
... "B AE D",
... "M EY K",
... ],
... categories=[
... "make",
... "take",
... "bad",
... "make"
... ]
... )
>>> cs.write_stats(sys.stdout)
Overall Accuracy: 75%
<BLANKLINE>
Class-Wise Accuracy
-------------------
bad -> B AE D : 1 / 1 (100.00%)
make -> M EY K: 1 / 2 (50.00%)
take -> T EY K: 1 / 1 (100.00%)
<BLANKLINE>
Confusion
---------
Target: bad -> B AE D
-> B AE D : 1 / 1 (100.00%)
Target: make -> M EY K
-> M EY K : 1 / 2 (50.00%)
-> M EY K AH: 1 / 2 (50.00%)
Target: take -> T EY K
-> T EY K : 1 / 1 (100.00%)
>>> summary = cs.summarize()
>>> summary['accuracy']
0.75
>>> summary['classwise_stats'][('bad', 'B AE D')]
{'total': 1.0, 'correct': 1.0, 'accuracy': 1.0}
>>> summary['classwise_stats'][('make', 'M EY K')]
{'total': 2.0, 'correct': 1.0, 'accuracy': 0.5}
>>> summary['keys']
[('bad', 'B AE D'), ('make', 'M EY K'), ('take', 'T EY K')]
>>> summary['predictions']
['B AE D', 'M EY K', 'M EY K AH', 'T EY K']
>>> summary['classwise_total']
{('bad', 'B AE D'): 1.0, ('make', 'M EY K'): 2.0, ('take', 'T EY K'): 1.0}
>>> summary['classwise_correct']
{('bad', 'B AE D'): 1.0, ('make', 'M EY K'): 1.0, ('take', 'T EY K'): 1.0}
>>> summary['classwise_accuracy']
{('bad', 'B AE D'): 1.0, ('make', 'M EY K'): 0.5, ('take', 'T EY K'): 1.0}
"""
def __init__(self):
super()
self.clear()
self.summary = None
def append(self, ids, predictions, targets, categories=None):
"""
Appends inputs, predictions and targets to internal
lists
Arguments
---------
ids: list
the string IDs for the samples
predictions: list
the model's predictions (human-interpretable,
preferably strings)
targets: list
the ground truths (human-interpretable, preferably strings)
categories: list
an additional way to classify training
samples. If available, the categories will
be combined with targets
"""
self.ids.extend(ids)
self.predictions.extend(predictions)
self.targets.extend(targets)
if categories is not None:
self.categories.extend(categories)
def summarize(self, field=None):
"""Summarize the classification metric scores
The following statistics are computed:
accuracy: the overall accuracy (# correct / # total)
confusion_matrix: a dictionary of type
{(target, prediction): num_entries} representing
the confusion matrix
classwise_stats: computes the total number of samples,
the number of correct classifications and accuracy
for each class
keys: all available class keys, which can be either target classes
or (category, target) tuples
predictions: all available predictions all predicions the model
has made
Arguments
---------
field : str
If provided, only returns selected statistic. If not,
returns all computed statistics.
Returns
-------
float or dict
Returns a float if ``field`` is provided, otherwise
returns a dictionary containing all computed stats.
"""
self._build_lookups()
confusion_matrix = self._compute_confusion_matrix()
self.summary = {
"accuracy": self._compute_accuracy(),
"confusion_matrix": confusion_matrix,
"classwise_stats": self._compute_classwise_stats(confusion_matrix),
"keys": self._available_keys,
"predictions": self._available_predictions,
}
for stat in ["total", "correct", "accuracy"]:
self.summary[f"classwise_{stat}"] = {
key: key_stats[stat]
for key, key_stats in self.summary["classwise_stats"].items()
}
if field is not None:
return self.summary[field]
else:
return self.summary
def _compute_accuracy(self):
return sum(
prediction == target
for prediction, target in zip(self.predictions, self.targets)
) / len(self.ids)
def _build_lookups(self):
self._available_keys = self._get_keys()
self._available_predictions = list(
sorted(set(prediction for prediction in self.predictions))
)
self._keys_lookup = self._index_lookup(self._available_keys)
self._predictions_lookup = self._index_lookup(
self._available_predictions
)
def _compute_confusion_matrix(self):
confusion_matrix = torch.zeros(
len(self._available_keys), len(self._available_predictions)
)
for key, prediction in self._get_confusion_entries():
key_idx = self._keys_lookup[key]
prediction_idx = self._predictions_lookup[prediction]
confusion_matrix[key_idx, prediction_idx] += 1
return confusion_matrix
def _compute_classwise_stats(self, confusion_matrix):
total = confusion_matrix.sum(dim=-1)
# This can be used with "classes" that are not
# statically determined; for example, they could
# be constructed from seq2seq predictions. As a
# result, one cannot use the diagonal
key_targets = (
self._available_keys
if not self.categories
else [target for _, target in self._available_keys]
)
correct = torch.tensor(
[
(
confusion_matrix[idx, self._predictions_lookup[target]]
if target in self._predictions_lookup
else 0
)
for idx, target in enumerate(key_targets)
]
)
accuracy = correct / total
return {
key: {
"total": item_total.item(),
"correct": item_correct.item(),
"accuracy": item_accuracy.item(),
}
for key, item_total, item_correct, item_accuracy in zip(
self._available_keys, total, correct, accuracy
)
}
def _get_keys(self):
if self.categories:
keys = zip(self.categories, self.targets)
else:
keys = self.targets
return list(sorted(set(keys)))
def _get_confusion_entries(self):
if self.categories:
result = (
((category, target), prediction)
for category, target, prediction in zip(
self.categories, self.targets, self.predictions
)
)
else:
result = zip(self.targets, self.predictions)
result = list(result)
return result
def _index_lookup(self, items):
return {item: idx for idx, item in enumerate(items)}
def clear(self):
"""Clears the collected statistics"""
self.ids = []
self.predictions = []
self.targets = []
self.categories = []
def write_stats(self, filestream):
"""Outputs the stats to the specified filestream in a human-readable format
Arguments
---------
filestream: file
a file-like object
"""
if self.summary is None:
self.summarize()
print(
f"Overall Accuracy: {self.summary['accuracy']:.0%}", file=filestream
)
print(file=filestream)
self._write_classwise_stats(filestream)
print(file=filestream)
self._write_confusion(filestream)
def _write_classwise_stats(self, filestream):
self._write_header("Class-Wise Accuracy", filestream=filestream)
key_labels = {
key: self._format_key_label(key) for key in self._available_keys
}
longest_key_label = max(len(label) for label in key_labels.values())
for key in self._available_keys:
stats = self.summary["classwise_stats"][key]
padded_label = self._pad_to_length(
self._format_key_label(key), longest_key_label
)
print(
f"{padded_label}: {int(stats['correct'])} / {int(stats['total'])} ({stats['accuracy']:.2%})",
file=filestream,
)
def _write_confusion(self, filestream):
self._write_header("Confusion", filestream=filestream)
longest_prediction = max(
len(prediction) for prediction in self._available_predictions
)
confusion_matrix = self.summary["confusion_matrix"].int()
totals = confusion_matrix.sum(dim=-1)
for key, key_predictions, total in zip(
self._available_keys, confusion_matrix, totals
):
target_label = self._format_key_label(key)
print(f"Target: {target_label}", file=filestream)
(indexes,) = torch.where(key_predictions > 0)
total = total.item()
for index in indexes:
count = key_predictions[index].item()
prediction = self._available_predictions[index]
padded_label = self._pad_to_length(
prediction, longest_prediction
)
print(
f" -> {padded_label}: {count} / {total} ({count / total:.2%})",
file=filestream,
)
def _write_header(self, header, filestream):
print(header, file=filestream)
print("-" * len(header), file=filestream)
def _pad_to_length(self, label, length):
padding = max(0, length - len(label))
return label + (" " * padding)
def _format_key_label(self, key):
if self.categories:
category, target = key
label = f"{category} -> {target}"
else:
label = key
return label
| 31,718 | 33.069817 | 109 | py |
speechbrain | speechbrain-main/speechbrain/utils/distributed.py | """Guard for running certain operations on main process only
Authors:
* Abdel Heba 2020
* Aku Rouhe 2020
"""
import os
import torch
import logging
logger = logging.getLogger(__name__)
def run_on_main(
func,
args=None,
kwargs=None,
post_func=None,
post_args=None,
post_kwargs=None,
run_post_on_main=False,
):
"""Runs a function with DPP (multi-gpu) support.
The main function is only run on the main process.
A post_function can be specified, to be on non-main processes after the main
func completes. This way whatever the main func produces can be loaded on
the other processes.
Arguments
---------
func : callable
Function to run on the main process.
args : list, None
Positional args to pass to func.
kwargs : dict, None
Keyword args to pass to func.
post_func : callable, None
Function to run after func has finished on main. By default only run on
non-main processes.
post_args : list, None
Positional args to pass to post_func.
post_kwargs : dict, None
Keyword args to pass to post_func.
run_post_on_main : bool
Whether to run post_func on main process as well. (default: False)
"""
# Handle the mutable data types' default args:
if args is None:
args = []
if kwargs is None:
kwargs = {}
if post_args is None:
post_args = []
if post_kwargs is None:
post_kwargs = {}
if if_main_process():
# Main comes here
try:
func(*args, **kwargs)
finally:
ddp_barrier()
else:
# Others go here
ddp_barrier()
if post_func is not None:
if run_post_on_main:
# Just run on every process without any barrier.
post_func(*post_args, **post_kwargs)
elif not if_main_process():
# Others go here
try:
post_func(*post_args, **post_kwargs)
finally:
ddp_barrier()
else:
# But main comes here
ddp_barrier()
def if_main_process():
"""Checks if the current process is the main process and authorized to run
I/O commands. In DDP mode, the main process is the one with RANK == 0.
In standard mode, the process will not have `RANK` Unix var and will be
authorized to run the I/O commands.
"""
if "RANK" in os.environ:
if os.environ["RANK"] == "":
return False
else:
if int(os.environ["RANK"]) == 0:
return True
return False
return True
def ddp_barrier():
"""In DDP mode, this function will synchronize all processes.
torch.distributed.barrier() will block processes until the whole
group enters this function.
"""
if torch.distributed.is_initialized():
torch.distributed.barrier()
def ddp_init_group(run_opts):
"""This function will initialize the ddp group if
distributed_launch bool is given in the python command line.
The ddp group will use distributed_backend arg for setting the
DDP communication protocol. `RANK` Unix variable will be used for
registering the subprocess to the ddp group.
Arguments
---------
run_opts: list
A list of arguments to parse, most often from `sys.argv[1:]`.
"""
if run_opts["distributed_launch"]:
if "local_rank" not in run_opts:
raise ValueError(
"To use DDP backend, start your script with:\n\t"
"python -m torch.distributed.launch [args]\n\t"
"experiment.py hyperparams.yaml --distributed_launch "
"--distributed_backend=nccl"
)
else:
if not run_opts["distributed_backend"] == "gloo":
if run_opts["local_rank"] + 1 > torch.cuda.device_count():
raise ValueError(
"Killing process " + str() + "\n"
"Not enough GPUs available!"
)
if "RANK" in os.environ is None or os.environ["RANK"] == "":
raise ValueError(
"To use DDP backend, start your script with:\n\t"
"python -m torch.distributed.launch [args]\n\t"
"experiment.py hyperparams.yaml --distributed_launch "
"--distributed_backend=nccl"
)
rank = int(os.environ["RANK"])
if run_opts["distributed_backend"] == "nccl":
if not torch.distributed.is_nccl_available():
raise ValueError("NCCL is not supported in your machine.")
elif run_opts["distributed_backend"] == "gloo":
if not torch.distributed.is_gloo_available():
raise ValueError("GLOO is not supported in your machine.")
elif run_opts["distributed_backend"] == "mpi":
if not torch.distributed.is_mpi_available():
raise ValueError("MPI is not supported in your machine.")
else:
logger.info(
run_opts["distributed_backend"]
+ " communcation protocol doesn't exist."
)
raise ValueError(
run_opts["distributed_backend"]
+ " communcation protocol doesn't exist."
)
# rank arg is used to set the right rank of the current process for ddp.
# if you have 2 servers with 2 gpu:
# server1:
# GPU0: local_rank=device=0, rank=0
# GPU1: local_rank=device=1, rank=1
# server2:
# GPU0: local_rank=device=0, rank=2
# GPU1: local_rank=device=1, rank=3
torch.distributed.init_process_group(
backend=run_opts["distributed_backend"], rank=rank
)
else:
logger.info(
"distributed_launch flag is disabled, "
"this experiment will be executed without DDP."
)
if "local_rank" in run_opts and run_opts["local_rank"] > 0:
raise ValueError(
"DDP is disabled, local_rank must not be set.\n"
"For DDP training, please use --distributed_launch. "
"For example:\n\tpython -m torch.distributed.launch "
"experiment.py hyperparams.yaml "
"--distributed_launch --distributed_backend=nccl"
)
| 6,388 | 33.349462 | 80 | py |
speechbrain | speechbrain-main/speechbrain/utils/bleu.py | """Library for computing the BLEU score
Authors
* Mirco Ravanelli 2021
"""
from speechbrain.utils.metric_stats import MetricStats
def merge_words(sequences):
"""Merge successive words into phrase, putting space between each word
Arguments
---------
sequences : list
Each item contains a list, and this list contains a word sequence.
Returns
-------
The list contains phrase sequences.
"""
results = []
for seq in sequences:
words = " ".join(seq)
results.append(words)
return results
class BLEUStats(MetricStats):
"""A class for tracking BLEU (https://www.aclweb.org/anthology/P02-1040.pdf).
Arguments
---------
merge_words: bool
Whether to merge the successive words to create sentences.
Example
-------
>>> bleu = BLEUStats()
>>> i2l = {0: 'a', 1: 'b'}
>>> bleu.append(
... ids=['utterance1'],
... predict=[[0, 1, 1]],
... targets=[[[0, 1, 0]], [[0, 1, 1]], [[1, 1, 0]]],
... ind2lab=lambda batch: [[i2l[int(x)] for x in seq] for seq in batch],
... )
>>> stats = bleu.summarize()
>>> stats['BLEU']
0.0
"""
def __init__(
self, lang="en", merge_words=True,
):
self.clear()
self.merge_words = merge_words
self.predicts = []
self.targets = None
def append(
self, ids, predict, targets, ind2lab=None,
):
"""Add stats to the relevant containers.
* See MetricStats.append()
Arguments
---------
ids : list
List of ids corresponding to utterances.
predict : torch.tensor
A predicted output, for comparison with the target output
targets : list
list of references (when measuring BLEU, one sentence could have more
than one target translation).
ind2lab : callable
Callable that maps from indices to labels, operating on batches,
for writing alignments.
"""
self.ids.extend(ids)
if ind2lab is not None:
predict = ind2lab(predict)
targets = [ind2lab(t) for t in targets]
if self.merge_words:
predict = merge_words(predict)
targets = [merge_words(t) for t in targets]
self.predicts.extend(predict)
if self.targets is None:
self.targets = targets
else:
assert len(self.targets) == len(targets)
for i in range(len(self.targets)):
self.targets[i].extend(targets[i])
def summarize(self, field=None):
"""Summarize the BLEU and return relevant statistics.
* See MetricStats.summarize()
"""
# Check extra-dependency for computing the bleu score
try:
import sacrebleu
except ImportError:
print(
"Please install sacrebleu (https://pypi.org/project/sacrebleu/) in order to use the BLEU metric"
)
scores = sacrebleu.corpus_bleu(self.predicts, self.targets)
details = {}
details["BLEU"] = scores.score
details["BP"] = scores.bp
details["ratio"] = scores.sys_len / scores.ref_len
details["hyp_len"] = scores.sys_len
details["ref_len"] = scores.ref_len
details["precisions"] = scores.precisions
self.scores = scores
self.summary = details
# Add additional, more generic key
self.summary["bleu_score"] = self.summary["BLEU"]
if field is not None:
return self.summary[field]
else:
return self.summary
def write_stats(self, filestream):
"""Write all relevant info (e.g., error rate alignments) to file.
* See MetricStats.write_stats()
"""
if not self.summary:
self.summarize()
print(self.scores, file=filestream)
| 3,943 | 28 | 112 | py |
speechbrain | speechbrain-main/speechbrain/utils/Accuracy.py | """Calculate accuracy.
Authors
* Jianyuan Zhong 2020
"""
import torch
from speechbrain.dataio.dataio import length_to_mask
def Accuracy(log_probabilities, targets, length=None):
"""Calculates the accuracy for predicted log probabilities and targets in a batch.
Arguments
----------
log_probabilities : tensor
Predicted log probabilities (batch_size, time, feature).
targets : tensor
Target (batch_size, time).
length : tensor
Length of target (batch_size,).
Example
-------
>>> probs = torch.tensor([[0.9, 0.1], [0.1, 0.9], [0.8, 0.2]]).unsqueeze(0)
>>> acc = Accuracy(torch.log(probs), torch.tensor([1, 1, 0]).unsqueeze(0), torch.tensor([2/3]))
>>> print(acc)
(1.0, 2.0)
"""
if length is not None:
mask = length_to_mask(
length * targets.shape[1], max_len=targets.shape[1],
).bool()
if len(targets.shape) == 3:
mask = mask.unsqueeze(2).repeat(1, 1, targets.shape[2])
padded_pred = log_probabilities.argmax(-1)
if length is not None:
numerator = torch.sum(
padded_pred.masked_select(mask) == targets.masked_select(mask)
)
denominator = torch.sum(mask)
else:
numerator = torch.sum(padded_pred == targets)
denominator = targets.shape[1]
return float(numerator), float(denominator)
class AccuracyStats:
"""Module for calculate the overall one-step-forward prediction accuracy.
Example
-------
>>> probs = torch.tensor([[0.9, 0.1], [0.1, 0.9], [0.8, 0.2]]).unsqueeze(0)
>>> stats = AccuracyStats()
>>> stats.append(torch.log(probs), torch.tensor([1, 1, 0]).unsqueeze(0), torch.tensor([2/3]))
>>> acc = stats.summarize()
>>> print(acc)
0.5
"""
def __init__(self):
self.correct = 0
self.total = 0
def append(self, log_probabilities, targets, length=None):
"""This function is for updating the stats according to the prediction
and target in the current batch.
Arguments
----------
log_probabilities : tensor
Predicted log probabilities (batch_size, time, feature).
targets : tensor
Target (batch_size, time).
length: tensor
Length of target (batch_size,).
"""
numerator, denominator = Accuracy(log_probabilities, targets, length)
self.correct += numerator
self.total += denominator
def summarize(self):
"""Computes the accuract metric."""
return self.correct / self.total
| 2,584 | 29.05814 | 99 | py |
speechbrain | speechbrain-main/speechbrain/utils/torch_audio_backend.py | """Library for checking the torchaudio backend.
Authors
* Mirco Ravanelli 2021
"""
import platform
import logging
import torchaudio
logger = logging.getLogger(__name__)
def check_torchaudio_backend():
"""Checks the torchaudio backend and sets it to soundfile if
windows is detected.
"""
current_system = platform.system()
if current_system == "Windows":
logger.warn(
"The torchaudio backend is switched to 'soundfile'. Note that 'sox_io' is not supported on Windows."
)
torchaudio.set_audio_backend("soundfile")
| 573 | 23.956522 | 112 | py |
speechbrain | speechbrain-main/speechbrain/utils/train_logger.py | """Loggers for experiment monitoring.
Authors
* Peter Plantinga 2020
"""
import logging
import ruamel.yaml
import torch
import os
logger = logging.getLogger(__name__)
class TrainLogger:
"""Abstract class defining an interface for training loggers."""
def log_stats(
self,
stats_meta,
train_stats=None,
valid_stats=None,
test_stats=None,
verbose=False,
):
"""Log the stats for one epoch.
Arguments
---------
stats_meta : dict of str:scalar pairs
Meta information about the stats (e.g., epoch, learning-rate, etc.).
train_stats : dict of str:list pairs
Each loss type is represented with a str : list pair including
all the values for the training pass.
valid_stats : dict of str:list pairs
Each loss type is represented with a str : list pair including
all the values for the validation pass.
test_stats : dict of str:list pairs
Each loss type is represented with a str : list pair including
all the values for the test pass.
verbose : bool
Whether to also put logging information to the standard logger.
"""
raise NotImplementedError
class FileTrainLogger(TrainLogger):
"""Text logger of training information.
Arguments
---------
save_file : str
The file to use for logging train information.
precision : int
Number of decimal places to display. Default 2, example: 1.35e-5.
summary_fns : dict of str:function pairs
Each summary function should take a list produced as output
from a training/validation pass and summarize it to a single scalar.
"""
def __init__(self, save_file, precision=2):
self.save_file = save_file
self.precision = precision
def _item_to_string(self, key, value, dataset=None):
"""Convert one item to string, handling floats"""
if isinstance(value, float) and 1.0 < value < 100.0:
value = f"{value:.{self.precision}f}"
elif isinstance(value, float):
value = f"{value:.{self.precision}e}"
if dataset is not None:
key = f"{dataset} {key}"
return f"{key}: {value}"
def _stats_to_string(self, stats, dataset=None):
"""Convert all stats to a single string summary"""
return ", ".join(
[self._item_to_string(k, v, dataset) for k, v in stats.items()]
)
def log_stats(
self,
stats_meta,
train_stats=None,
valid_stats=None,
test_stats=None,
verbose=True,
):
"""See TrainLogger.log_stats()"""
string_summary = self._stats_to_string(stats_meta)
for dataset, stats in [
("train", train_stats),
("valid", valid_stats),
("test", test_stats),
]:
if stats is not None:
string_summary += " - " + self._stats_to_string(stats, dataset)
with open(self.save_file, "a") as fout:
print(string_summary, file=fout)
if verbose:
logger.info(string_summary)
class TensorboardLogger(TrainLogger):
"""Logs training information in the format required by Tensorboard.
Arguments
---------
save_dir : str
A directory for storing all the relevant logs.
Raises
------
ImportError if Tensorboard is not installed.
"""
def __init__(self, save_dir):
self.save_dir = save_dir
# Raises ImportError if TensorBoard is not installed
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(self.save_dir)
self.global_step = {"train": {}, "valid": {}, "test": {}, "meta": 0}
def log_stats(
self,
stats_meta,
train_stats=None,
valid_stats=None,
test_stats=None,
verbose=False,
):
"""See TrainLogger.log_stats()"""
self.global_step["meta"] += 1
for name, value in stats_meta.items():
self.writer.add_scalar(name, value, self.global_step["meta"])
for dataset, stats in [
("train", train_stats),
("valid", valid_stats),
("test", test_stats),
]:
if stats is None:
continue
for stat, value_list in stats.items():
if stat not in self.global_step[dataset]:
self.global_step[dataset][stat] = 0
tag = f"{stat}/{dataset}"
# Both single value (per Epoch) and list (Per batch) logging is supported
if isinstance(value_list, list):
for value in value_list:
new_global_step = self.global_step[dataset][stat] + 1
self.writer.add_scalar(tag, value, new_global_step)
self.global_step[dataset][stat] = new_global_step
else:
value = value_list
new_global_step = self.global_step[dataset][stat] + 1
self.writer.add_scalar(tag, value, new_global_step)
self.global_step[dataset][stat] = new_global_step
def log_audio(self, name, value, sample_rate):
"""Add audio signal in the logs."""
self.writer.add_audio(
name, value, self.global_step["meta"], sample_rate=sample_rate
)
def log_figure(self, name, value):
"""Add a figure in the logs."""
fig = plot_spectrogram(value)
if fig is not None:
self.writer.add_figure(name, fig, self.global_step["meta"])
class WandBLogger(TrainLogger):
"""Logger for wandb. To be used the same way as TrainLogger. Handles nested dicts as well.
An example on how to use this can be found in recipes/Voicebank/MTL/CoopNet/"""
def __init__(self, *args, **kwargs):
try:
yaml_file = kwargs.pop("yaml_config")
with open(yaml_file, "r") as yaml_stream:
# Read yaml with ruamel to ignore bangs
config_dict = ruamel.yaml.YAML().load(yaml_stream)
self.run = kwargs.pop("initializer", None)(
*args, **kwargs, config=config_dict
)
except Exception as e:
raise e("There was an issue with the WandB Logger initialization")
def log_stats(
self,
stats_meta,
train_stats=None,
valid_stats=None,
test_stats=None,
verbose=False,
):
"""See TrainLogger.log_stats()"""
logs = {}
for dataset, stats in [
("train", train_stats),
("valid", valid_stats),
("test", test_stats),
]:
if stats is None:
continue
logs[dataset] = stats
step = stats_meta.get("epoch", None)
if step is not None: # Useful for continuing runs that crashed
self.run.log({**logs, **stats_meta}, step=step)
else:
self.run.log({**logs, **stats_meta})
def _get_image_saver():
"""Returns the TorchVision image saver, if available
or None if it is not - optional dependency"""
try:
import torchvision
return torchvision.utils.save_image
except ImportError:
logger.warn("torchvision is not available - cannot save figures")
return None
class ProgressSampleLogger:
"""A logger that outputs samples during training progress, used primarily in speech synthesis but customizable, reusable and applicable to any other generative task
Natively, this logger supports images and raw PyTorch output.
Other custom formats can be added as needed.
Example:
In hparams.yaml
progress_sample_logger: !new:speechbrain.utils.progress_samples.ProgressSampleLogger
output_path: output/samples
progress_batch_sample_size: 3
format_defs:
foo:
extension: bar
saver: !speechbrain.dataio.mystuff.save_my_format
kwargs:
baz: qux
formats:
foobar: foo
In the brain:
Run the following to "remember" a sample (e.g. from compute_objectives)
self.hparams.progress_sample_logger.remember(
target=spectrogram_target,
output=spectrogram_output,
alignments=alignments_output,
my_output=
raw_batch={
"inputs": inputs,
"spectrogram_target": spectrogram_target,
"spectrogram_output": spectrorgram_outputu,
"alignments": alignments_output
}
)
Run the following at the end of the epoch (e.g. from on_stage_end)
self.progress_sample_logger.save(epoch)
Arguments
---------
output_path: str
the filesystem path to which samples will be saved
formats: dict
a dictionary with format identifiers as keys and dictionaries with
handler callables and extensions as values. The signature of the handler
should be similar to torch.save
Example:
{
"myformat": {
"extension": "myf",
"saver": somemodule.save_my_format
}
}
batch_sample_size: int
The number of items to retrieve when extracting a batch sample
"""
_DEFAULT_FORMAT_DEFS = {
"raw": {"extension": "pth", "saver": torch.save, "kwargs": {}},
"image": {
"extension": "png",
"saver": _get_image_saver(),
"kwargs": {},
},
}
DEFAULT_FORMAT = "image"
def __init__(
self, output_path, formats=None, format_defs=None, batch_sample_size=1
):
self.progress_samples = {}
self.formats = formats or {}
self.format_defs = dict(self._DEFAULT_FORMAT_DEFS)
if format_defs is not None:
self.format_defs.update(format_defs)
self.batch_sample_size = batch_sample_size
self.output_path = output_path
def reset(self):
"""Initializes the collection of progress samples"""
self.progress_samples = {}
def remember(self, **kwargs):
"""Updates the internal dictionary of snapshots with the provided
values
Arguments
---------
kwargs: dict
the parameters to be saved with
"""
self.progress_samples.update(
{key: detach(value) for key, value in kwargs.items()}
)
def get_batch_sample(self, value):
"""Obtains a sample of a batch for saving. This can be useful to
monitor raw data (both samples and predictions) over the course
of training
Arguments
---------
value: dict|torch.Tensor|list
the raw values from the batch
Returns
-------
result: object
the same type of object as the provided value
"""
if isinstance(value, dict):
result = {
key: self.get_batch_sample(item_value)
for key, item_value in value.items()
}
elif isinstance(value, (torch.Tensor, list)):
result = value[: self.batch_sample_size]
else:
result = value
return result
def save(self, epoch):
"""Saves all items previously saved with remember() calls
Arguments
---------
epoch: int
The epoch number
"""
for key, data in self.progress_samples.items():
self.save_item(key, data, epoch)
def save_item(self, key, data, epoch):
"""Saves a single sample item
Arguments
---------
key: str
the key/identifier of the item
data: torch.Tensor
the data to save
epoch: int
the epoch number (used in file path calculations)
"""
target_path = os.path.join(self.output_path, str(epoch))
if not os.path.exists(target_path):
os.makedirs(target_path)
format = self.formats.get(key, self.DEFAULT_FORMAT)
format_def = self.format_defs.get(format)
if format_def is None:
raise ValueError("Unsupported format {format}")
file_name = f"{key}.{format_def['extension']}"
effective_file_name = os.path.join(target_path, file_name)
saver = format_def.get("saver")
if saver is not None:
saver(data, effective_file_name, **format_def["kwargs"])
def plot_spectrogram(spectrogram, ap=None, fig_size=(16, 10), output_fig=False):
"""Returns the matplotlib sprctrogram if available
or None if it is not - optional dependency"""
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
except ImportError:
logger.warn("matplotlib is not available - cannot log figures")
return None
spectrogram = spectrogram.detach().cpu().numpy().squeeze()
fig = plt.figure(figsize=fig_size)
plt.imshow(spectrogram, aspect="auto", origin="lower")
plt.colorbar()
plt.tight_layout()
if not output_fig:
plt.close()
return fig
def detach(value):
"""Detaches the specified object from the graph, which can be a
single tensor or a dictionary of tensors. Dictionaries of tensors are
converted recursively
Arguments
---------
value: torch.Tensor|dict
a tensor or a dictionary of tensors
Returns
-------
result: torch.Tensor|dict
a tensor of dictionary of tensors
"""
if isinstance(value, torch.Tensor):
result = value.detach().cpu()
elif isinstance(value, dict):
result = {key: detach(item_value) for key, item_value in value.items()}
else:
result = value
return result
| 13,898 | 30.445701 | 168 | py |
speechbrain | speechbrain-main/speechbrain/processing/NMF.py | """Non-negative matrix factorization
Authors
* Cem Subakan
"""
import torch
from speechbrain.processing.features import spectral_magnitude
import speechbrain.processing.features as spf
def spectral_phase(stft, power=2, log=False):
"""Returns the phase of a complex spectrogram.
Arguments
---------
stft : torch.Tensor
A tensor, output from the stft function.
Example
-------
>>> BS, nfft, T = 10, 20, 300
>>> X_stft = torch.randn(BS, nfft//2 + 1, T, 2)
>>> phase_mix = spectral_phase(X_stft)
"""
phase = torch.atan2(stft[:, :, :, 1], stft[:, :, :, 0])
return phase
def NMF_separate_spectra(Whats, Xmix):
"""This function separates the mixture signals, given NMF template matrices.
Arguments
---------
Whats : list
This list contains the list [W1, W2], where W1 W2 are respectively
the NMF template matrices that correspond to source1 and source2.
W1, W2 are of size [nfft/2 + 1, K], where nfft is the fft size for STFT,
and K is the number of vectors (templates) in W.
Xmix : torch.tensor
This is the magnitude spectra for the mixtures.
The size is [BS x T x nfft//2 + 1] where,
BS = batch size, nfft = fft size, T = number of time steps in the spectra.
Outputs
-------
X1hat : Separated spectrum for source1
Size = [BS x (nfft/2 +1) x T] where,
BS = batch size, nfft = fft size, T = number of time steps in the spectra.
X2hat : Separated Spectrum for source2
The size definitions are the same as above.
Example
--------
>>> BS, nfft, T = 4, 20, 400
>>> K1, K2 = 10, 10
>>> W1hat = torch.randn(nfft//2 + 1, K1)
>>> W2hat = torch.randn(nfft//2 + 1, K2)
>>> Whats = [W1hat, W2hat]
>>> Xmix = torch.randn(BS, T, nfft//2 + 1)
>>> X1hat, X2hat = NMF_separate_spectra(Whats, Xmix)
"""
W1, W2 = Whats
nmixtures = Xmix.shape[0]
Xmix = Xmix.permute(0, 2, 1).reshape(-1, Xmix.size(-1)).t()
n = Xmix.shape[1]
eps = 1e-20
# Normalize input
g = Xmix.sum(dim=0) + eps
z = Xmix / g
# initialize
w = torch.cat([W1, W2], dim=1)
K = w.size(1)
K1 = W1.size(1)
h = 0.1 * torch.rand(K, n)
h /= torch.sum(h, dim=0) + eps
for ep in range(1000):
v = z / (torch.matmul(w, h) + eps)
nh = h * torch.matmul(w.t(), v)
h = nh / (torch.sum(nh, dim=0) + eps)
h *= g
Xhat1 = torch.matmul(w[:, :K1], h[:K1, :])
Xhat1 = torch.split(Xhat1.unsqueeze(0), Xhat1.size(1) // nmixtures, dim=2)
Xhat1 = torch.cat(Xhat1, dim=0)
Xhat2 = torch.matmul(w[:, K1:], h[K1:, :])
Xhat2 = torch.split(Xhat2.unsqueeze(0), Xhat2.size(1) // nmixtures, dim=2)
Xhat2 = torch.cat(Xhat2, dim=0)
return Xhat1, Xhat2
def reconstruct_results(
X1hat, X2hat, X_stft, sample_rate, win_length, hop_length,
):
"""This function reconstructs the separated spectra into waveforms.
Arguments
---------
Xhat1 : torch.tensor
The separated spectrum for source 1 of size [BS, nfft/2 + 1, T],
where, BS = batch size, nfft = fft size, T = length of the spectra.
Xhat2 : torch.tensor
The separated spectrum for source 2 of size [BS, nfft/2 + 1, T].
The size definitions are the same as Xhat1.
X_stft : torch.tensor
This is the magnitude spectra for the mixtures.
The size is [BS x nfft//2 + 1 x T x 2] where,
BS = batch size, nfft = fft size, T = number of time steps in the spectra.
The last dimension is to represent complex numbers.
sample_rate : int
The sampling rate (in Hz) in which we would like to save the results.
win_length : int
The length of stft windows (in ms).
hop_length : int
The length with which we shift the STFT windows (in ms).
Returns
-------
x1hats : list
List of waveforms for source 1.
x2hats : list
List of waveforms for source 2.
Example
-------
>>> BS, nfft, T = 10, 512, 16000
>>> sample_rate, win_length, hop_length = 16000, 25, 10
>>> X1hat = torch.randn(BS, nfft//2 + 1, T)
>>> X2hat = torch.randn(BS, nfft//2 + 1, T)
>>> X_stft = torch.randn(BS, nfft//2 + 1, T, 2)
>>> x1hats, x2hats = reconstruct_results(X1hat, X2hat, X_stft, sample_rate, win_length, hop_length)
"""
ISTFT = spf.ISTFT(
sample_rate=sample_rate, win_length=win_length, hop_length=hop_length
)
phase_mix = spectral_phase(X_stft)
mag_mix = spectral_magnitude(X_stft, power=2)
x1hats, x2hats = [], []
eps = 1e-25
for i in range(X1hat.shape[0]):
X1hat_stft = (
(X1hat[i] / (eps + X1hat[i] + X2hat[i])).unsqueeze(-1)
* mag_mix[i].unsqueeze(-1)
* torch.cat(
[
torch.cos(phase_mix[i].unsqueeze(-1)),
torch.sin(phase_mix[i].unsqueeze(-1)),
],
dim=-1,
)
)
X2hat_stft = (
(X2hat[i] / (eps + X1hat[i] + X2hat[i])).unsqueeze(-1)
* mag_mix[i].unsqueeze(-1)
* torch.cat(
[
torch.cos(phase_mix[i].unsqueeze(-1)),
torch.sin(phase_mix[i].unsqueeze(-1)),
],
dim=-1,
)
)
X1hat_stft = X1hat_stft.unsqueeze(0).permute(0, 2, 1, 3)
X2hat_stft = X2hat_stft.unsqueeze(0).permute(0, 2, 1, 3)
shat1 = ISTFT(X1hat_stft)
shat2 = ISTFT(X2hat_stft)
div_factor = 10
x1 = shat1 / (div_factor * shat1.std())
x2 = shat2 / (div_factor * shat2.std())
x1hats.append(x1)
x2hats.append(x2)
return x1hats, x2hats
| 5,770 | 29.373684 | 103 | py |
speechbrain | speechbrain-main/speechbrain/processing/features.py | """Low-level feature pipeline components
This library gathers functions that compute popular speech features over
batches of data. All the classes are of type nn.Module. This gives the
possibility to have end-to-end differentiability and to backpropagate the
gradient through them. Our functions are a modified version the ones
in torch audio toolkit (https://github.com/pytorch/audio).
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> signal =read_audio('tests/samples/single-mic/example1.wav')
>>> signal = signal.unsqueeze(0)
>>> compute_STFT = STFT(
... sample_rate=16000, win_length=25, hop_length=10, n_fft=400
... )
>>> features = compute_STFT(signal)
>>> features = spectral_magnitude(features)
>>> compute_fbanks = Filterbank(n_mels=40)
>>> features = compute_fbanks(features)
>>> compute_mfccs = DCT(input_size=40, n_out=20)
>>> features = compute_mfccs(features)
>>> compute_deltas = Deltas(input_size=20)
>>> delta1 = compute_deltas(features)
>>> delta2 = compute_deltas(delta1)
>>> features = torch.cat([features, delta1, delta2], dim=2)
>>> compute_cw = ContextWindow(left_frames=5, right_frames=5)
>>> features = compute_cw(features)
>>> norm = InputNormalization()
>>> features = norm(features, torch.tensor([1]).float())
Authors
* Mirco Ravanelli 2020
"""
import math
import torch
import logging
from speechbrain.utils.checkpoints import (
mark_as_saver,
mark_as_loader,
mark_as_transfer,
register_checkpoint_hooks,
)
logger = logging.getLogger(__name__)
class STFT(torch.nn.Module):
"""computes the Short-Term Fourier Transform (STFT).
This class computes the Short-Term Fourier Transform of an audio signal.
It supports multi-channel audio inputs (batch, time, channels).
Arguments
---------
sample_rate : int
Sample rate of the input audio signal (e.g 16000).
win_length : float
Length (in ms) of the sliding window used to compute the STFT.
hop_length : float
Length (in ms) of the hope of the sliding window used to compute
the STFT.
n_fft : int
Number of fft point of the STFT. It defines the frequency resolution
(n_fft should be <= than win_len).
window_fn : function
A function that takes an integer (number of samples) and outputs a
tensor to be multiplied with each window before fft.
normalized_stft : bool
If True, the function returns the normalized STFT results,
i.e., multiplied by win_length^-0.5 (default is False).
center : bool
If True (default), the input will be padded on both sides so that the
t-th frame is centered at time t×hop_length. Otherwise, the t-th frame
begins at time t×hop_length.
pad_mode : str
It can be 'constant','reflect','replicate', 'circular', 'reflect'
(default). 'constant' pads the input tensor boundaries with a
constant value. 'reflect' pads the input tensor using the reflection
of the input boundary. 'replicate' pads the input tensor using
replication of the input boundary. 'circular' pads using circular
replication.
onesided : True
If True (default) only returns nfft/2 values. Note that the other
samples are redundant due to the Fourier transform conjugate symmetry.
Example
-------
>>> import torch
>>> compute_STFT = STFT(
... sample_rate=16000, win_length=25, hop_length=10, n_fft=400
... )
>>> inputs = torch.randn([10, 16000])
>>> features = compute_STFT(inputs)
>>> features.shape
torch.Size([10, 101, 201, 2])
"""
def __init__(
self,
sample_rate,
win_length=25,
hop_length=10,
n_fft=400,
window_fn=torch.hamming_window,
normalized_stft=False,
center=True,
pad_mode="constant",
onesided=True,
):
super().__init__()
self.sample_rate = sample_rate
self.win_length = win_length
self.hop_length = hop_length
self.n_fft = n_fft
self.normalized_stft = normalized_stft
self.center = center
self.pad_mode = pad_mode
self.onesided = onesided
# Convert win_length and hop_length from ms to samples
self.win_length = int(
round((self.sample_rate / 1000.0) * self.win_length)
)
self.hop_length = int(
round((self.sample_rate / 1000.0) * self.hop_length)
)
self.window = window_fn(self.win_length)
def forward(self, x):
"""Returns the STFT generated from the input waveforms.
Arguments
---------
x : tensor
A batch of audio signals to transform.
"""
# Managing multi-channel stft
or_shape = x.shape
if len(or_shape) == 3:
x = x.transpose(1, 2)
x = x.reshape(or_shape[0] * or_shape[2], or_shape[1])
stft = torch.stft(
x,
self.n_fft,
self.hop_length,
self.win_length,
self.window.to(x.device),
self.center,
self.pad_mode,
self.normalized_stft,
self.onesided,
return_complex=False,
)
# Retrieving the original dimensionality (batch,time, channels)
if len(or_shape) == 3:
stft = stft.reshape(
or_shape[0],
or_shape[2],
stft.shape[1],
stft.shape[2],
stft.shape[3],
)
stft = stft.permute(0, 3, 2, 4, 1)
else:
# (batch, time, channels)
stft = stft.transpose(2, 1)
return stft
class ISTFT(torch.nn.Module):
""" Computes the Inverse Short-Term Fourier Transform (ISTFT)
This class computes the Inverse Short-Term Fourier Transform of
an audio signal. It supports multi-channel audio inputs
(batch, time_step, n_fft, 2, n_channels [optional]).
Arguments
---------
sample_rate : int
Sample rate of the input audio signal (e.g. 16000).
win_length : float
Length (in ms) of the sliding window used when computing the STFT.
hop_length : float
Length (in ms) of the hope of the sliding window used when computing
the STFT.
window_fn : function
A function that takes an integer (number of samples) and outputs a
tensor to be used as a window for ifft.
normalized_stft : bool
If True, the function assumes that it's working with the normalized
STFT results. (default is False)
center : bool
If True (default), the function assumes that the STFT result was padded
on both sides.
onesided : True
If True (default), the function assumes that there are n_fft/2 values
for each time frame of the STFT.
epsilon : float
A small value to avoid division by 0 when normalizing by the sum of the
squared window. Playing with it can fix some abnormalities at the
beginning and at the end of the reconstructed signal. The default value
of epsilon is 1e-12.
Example
-------
>>> import torch
>>> compute_STFT = STFT(
... sample_rate=16000, win_length=25, hop_length=10, n_fft=400
... )
>>> compute_ISTFT = ISTFT(
... sample_rate=16000, win_length=25, hop_length=10
... )
>>> inputs = torch.randn([10, 16000])
>>> outputs = compute_ISTFT(compute_STFT(inputs))
>>> outputs.shape
torch.Size([10, 16000])
"""
def __init__(
self,
sample_rate,
n_fft=None,
win_length=25,
hop_length=10,
window_fn=torch.hamming_window,
normalized_stft=False,
center=True,
onesided=True,
epsilon=1e-12,
):
super().__init__()
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length
self.hop_length = hop_length
self.normalized_stft = normalized_stft
self.center = center
self.onesided = onesided
self.epsilon = epsilon
# Convert win_length and hop_length from ms to samples
self.win_length = int(
round((self.sample_rate / 1000.0) * self.win_length)
)
self.hop_length = int(
round((self.sample_rate / 1000.0) * self.hop_length)
)
# Create window using provided function
self.window = window_fn(self.win_length)
def forward(self, x, sig_length=None):
""" Returns the ISTFT generated from the input signal.
Arguments
---------
x : tensor
A batch of audio signals in the frequency domain to transform.
sig_length : int
The length of the output signal in number of samples. If not
specified will be equal to: (time_step - 1) * hop_length + n_fft
"""
or_shape = x.shape
# Infer n_fft if not provided
if self.n_fft is None and self.onesided:
n_fft = (x.shape[2] - 1) * 2
elif self.n_fft is None and not self.onesided:
n_fft = x.shape[2]
else:
n_fft = self.n_fft
# Changing the format for (batch, time_step, n_fft, 2, n_channels)
if len(or_shape) == 5:
x = x.permute(0, 4, 2, 1, 3)
# Lumping batch and channel dimension, because torch.istft
# doesn't support batching.
x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4])
elif len(or_shape) == 4:
x = x.permute(0, 2, 1, 3)
# isft ask complex input
x = torch.complex(x[..., 0], x[..., 1])
istft = torch.istft(
input=x,
n_fft=n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(x.device),
center=self.center,
onesided=self.onesided,
length=sig_length,
)
# Convert back to (time, time_step, n_channels)
if len(or_shape) == 5:
istft = istft.reshape(or_shape[0], or_shape[4], -1)
istft = istft.transpose(1, 2)
return istft
def spectral_magnitude(
stft, power: int = 1, log: bool = False, eps: float = 1e-14
):
"""Returns the magnitude of a complex spectrogram.
Arguments
---------
stft : torch.Tensor
A tensor, output from the stft function.
power : int
What power to use in computing the magnitude.
Use power=1 for the power spectrogram.
Use power=0.5 for the magnitude spectrogram.
log : bool
Whether to apply log to the spectral features.
Example
-------
>>> a = torch.Tensor([[3, 4]])
>>> spectral_magnitude(a, power=0.5)
tensor([5.])
"""
spectr = stft.pow(2).sum(-1)
# Add eps avoids NaN when spectr is zero
if power < 1:
spectr = spectr + eps
spectr = spectr.pow(power)
if log:
return torch.log(spectr + eps)
return spectr
class Filterbank(torch.nn.Module):
"""computes filter bank (FBANK) features given spectral magnitudes.
Arguments
---------
n_mels : float
Number of Mel filters used to average the spectrogram.
log_mel : bool
If True, it computes the log of the FBANKs.
filter_shape : str
Shape of the filters ('triangular', 'rectangular', 'gaussian').
f_min : int
Lowest frequency for the Mel filters.
f_max : int
Highest frequency for the Mel filters.
n_fft : int
Number of fft points of the STFT. It defines the frequency resolution
(n_fft should be<= than win_len).
sample_rate : int
Sample rate of the input audio signal (e.g, 16000)
power_spectrogram : float
Exponent used for spectrogram computation.
amin : float
Minimum amplitude (used for numerical stability).
ref_value : float
Reference value used for the dB scale.
top_db : float
Minimum negative cut-off in decibels.
freeze : bool
If False, it the central frequency and the band of each filter are
added into nn.parameters. If True, the standard frozen features
are computed.
param_change_factor: bool
If freeze=False, this parameter affects the speed at which the filter
parameters (i.e., central_freqs and bands) can be changed. When high
(e.g., param_change_factor=1) the filters change a lot during training.
When low (e.g. param_change_factor=0.1) the filter parameters are more
stable during training
param_rand_factor: float
This parameter can be used to randomly change the filter parameters
(i.e, central frequencies and bands) during training. It is thus a
sort of regularization. param_rand_factor=0 does not affect, while
param_rand_factor=0.15 allows random variations within +-15% of the
standard values of the filter parameters (e.g., if the central freq
is 100 Hz, we can randomly change it from 85 Hz to 115 Hz).
Example
-------
>>> import torch
>>> compute_fbanks = Filterbank()
>>> inputs = torch.randn([10, 101, 201])
>>> features = compute_fbanks(inputs)
>>> features.shape
torch.Size([10, 101, 40])
"""
def __init__(
self,
n_mels=40,
log_mel=True,
filter_shape="triangular",
f_min=0,
f_max=8000,
n_fft=400,
sample_rate=16000,
power_spectrogram=2,
amin=1e-10,
ref_value=1.0,
top_db=80.0,
param_change_factor=1.0,
param_rand_factor=0.0,
freeze=True,
):
super().__init__()
self.n_mels = n_mels
self.log_mel = log_mel
self.filter_shape = filter_shape
self.f_min = f_min
self.f_max = f_max
self.n_fft = n_fft
self.sample_rate = sample_rate
self.power_spectrogram = power_spectrogram
self.amin = amin
self.ref_value = ref_value
self.top_db = top_db
self.freeze = freeze
self.n_stft = self.n_fft // 2 + 1
self.db_multiplier = math.log10(max(self.amin, self.ref_value))
self.device_inp = torch.device("cpu")
self.param_change_factor = param_change_factor
self.param_rand_factor = param_rand_factor
if self.power_spectrogram == 2:
self.multiplier = 10
else:
self.multiplier = 20
# Make sure f_min < f_max
if self.f_min >= self.f_max:
err_msg = "Require f_min: %f < f_max: %f" % (
self.f_min,
self.f_max,
)
logger.error(err_msg, exc_info=True)
# Filter definition
mel = torch.linspace(
self._to_mel(self.f_min), self._to_mel(self.f_max), self.n_mels + 2
)
hz = self._to_hz(mel)
# Computation of the filter bands
band = hz[1:] - hz[:-1]
self.band = band[:-1]
self.f_central = hz[1:-1]
# Adding the central frequency and the band to the list of nn param
if not self.freeze:
self.f_central = torch.nn.Parameter(
self.f_central / (self.sample_rate * self.param_change_factor)
)
self.band = torch.nn.Parameter(
self.band / (self.sample_rate * self.param_change_factor)
)
# Frequency axis
all_freqs = torch.linspace(0, self.sample_rate // 2, self.n_stft)
# Replicating for all the filters
self.all_freqs_mat = all_freqs.repeat(self.f_central.shape[0], 1)
def forward(self, spectrogram):
"""Returns the FBANks.
Arguments
---------
x : tensor
A batch of spectrogram tensors.
"""
# Computing central frequency and bandwidth of each filter
f_central_mat = self.f_central.repeat(
self.all_freqs_mat.shape[1], 1
).transpose(0, 1)
band_mat = self.band.repeat(self.all_freqs_mat.shape[1], 1).transpose(
0, 1
)
# Uncomment to print filter parameters
# print(self.f_central*self.sample_rate * self.param_change_factor)
# print(self.band*self.sample_rate* self.param_change_factor)
# Creation of the multiplication matrix. It is used to create
# the filters that average the computed spectrogram.
if not self.freeze:
f_central_mat = f_central_mat * (
self.sample_rate
* self.param_change_factor
* self.param_change_factor
)
band_mat = band_mat * (
self.sample_rate
* self.param_change_factor
* self.param_change_factor
)
# Regularization with random changes of filter central frequency and band
elif self.param_rand_factor != 0 and self.training:
rand_change = (
1.0
+ torch.rand(2) * 2 * self.param_rand_factor
- self.param_rand_factor
)
f_central_mat = f_central_mat * rand_change[0]
band_mat = band_mat * rand_change[1]
fbank_matrix = self._create_fbank_matrix(f_central_mat, band_mat).to(
spectrogram.device
)
sp_shape = spectrogram.shape
# Managing multi-channels case (batch, time, channels)
if len(sp_shape) == 4:
spectrogram = spectrogram.permute(0, 3, 1, 2)
spectrogram = spectrogram.reshape(
sp_shape[0] * sp_shape[3], sp_shape[1], sp_shape[2]
)
# FBANK computation
fbanks = torch.matmul(spectrogram, fbank_matrix)
if self.log_mel:
fbanks = self._amplitude_to_DB(fbanks)
# Reshaping in the case of multi-channel inputs
if len(sp_shape) == 4:
fb_shape = fbanks.shape
fbanks = fbanks.reshape(
sp_shape[0], sp_shape[3], fb_shape[1], fb_shape[2]
)
fbanks = fbanks.permute(0, 2, 3, 1)
return fbanks
@staticmethod
def _to_mel(hz):
"""Returns mel-frequency value corresponding to the input
frequency value in Hz.
Arguments
---------
x : float
The frequency point in Hz.
"""
return 2595 * math.log10(1 + hz / 700)
@staticmethod
def _to_hz(mel):
"""Returns hz-frequency value corresponding to the input
mel-frequency value.
Arguments
---------
x : float
The frequency point in the mel-scale.
"""
return 700 * (10 ** (mel / 2595) - 1)
def _triangular_filters(self, all_freqs, f_central, band):
"""Returns fbank matrix using triangular filters.
Arguments
---------
all_freqs : Tensor
Tensor gathering all the frequency points.
f_central : Tensor
Tensor gathering central frequencies of each filter.
band : Tensor
Tensor gathering the bands of each filter.
"""
# Computing the slops of the filters
slope = (all_freqs - f_central) / band
left_side = slope + 1.0
right_side = -slope + 1.0
# Adding zeros for negative values
zero = torch.zeros(1, device=self.device_inp)
fbank_matrix = torch.max(
zero, torch.min(left_side, right_side)
).transpose(0, 1)
return fbank_matrix
def _rectangular_filters(self, all_freqs, f_central, band):
"""Returns fbank matrix using rectangular filters.
Arguments
---------
all_freqs : Tensor
Tensor gathering all the frequency points.
f_central : Tensor
Tensor gathering central frequencies of each filter.
band : Tensor
Tensor gathering the bands of each filter.
"""
# cut-off frequencies of the filters
low_hz = f_central - band
high_hz = f_central + band
# Left/right parts of the filter
left_side = right_size = all_freqs.ge(low_hz)
right_size = all_freqs.le(high_hz)
fbank_matrix = (left_side * right_size).float().transpose(0, 1)
return fbank_matrix
def _gaussian_filters(
self, all_freqs, f_central, band, smooth_factor=torch.tensor(2)
):
"""Returns fbank matrix using gaussian filters.
Arguments
---------
all_freqs : Tensor
Tensor gathering all the frequency points.
f_central : Tensor
Tensor gathering central frequencies of each filter.
band : Tensor
Tensor gathering the bands of each filter.
smooth_factor: Tensor
Smoothing factor of the gaussian filter. It can be used to employ
sharper or flatter filters.
"""
fbank_matrix = torch.exp(
-0.5 * ((all_freqs - f_central) / (band / smooth_factor)) ** 2
).transpose(0, 1)
return fbank_matrix
def _create_fbank_matrix(self, f_central_mat, band_mat):
"""Returns fbank matrix to use for averaging the spectrum with
the set of filter-banks.
Arguments
---------
f_central : Tensor
Tensor gathering central frequencies of each filter.
band : Tensor
Tensor gathering the bands of each filter.
smooth_factor: Tensor
Smoothing factor of the gaussian filter. It can be used to employ
sharper or flatter filters.
"""
if self.filter_shape == "triangular":
fbank_matrix = self._triangular_filters(
self.all_freqs_mat, f_central_mat, band_mat
)
elif self.filter_shape == "rectangular":
fbank_matrix = self._rectangular_filters(
self.all_freqs_mat, f_central_mat, band_mat
)
else:
fbank_matrix = self._gaussian_filters(
self.all_freqs_mat, f_central_mat, band_mat
)
return fbank_matrix
def _amplitude_to_DB(self, x):
"""Converts linear-FBANKs to log-FBANKs.
Arguments
---------
x : Tensor
A batch of linear FBANK tensors.
"""
x_db = self.multiplier * torch.log10(torch.clamp(x, min=self.amin))
x_db -= self.multiplier * self.db_multiplier
# Setting up dB max. It is the max over time and frequency,
# Hence, of a whole sequence (sequence-dependent)
new_x_db_max = x_db.amax(dim=(-2, -1)) - self.top_db
# Clipping to dB max. The view is necessary as only a scalar is obtained
# per sequence.
x_db = torch.max(x_db, new_x_db_max.view(x_db.shape[0], 1, 1))
return x_db
class DCT(torch.nn.Module):
"""Computes the discrete cosine transform.
This class is primarily used to compute MFCC features of an audio signal
given a set of FBANK features as input.
Arguments
---------
input_size : int
Expected size of the last dimension in the input.
n_out : int
Number of output coefficients.
ortho_norm : bool
Whether to use orthogonal norm.
Example
-------
>>> import torch
>>> inputs = torch.randn([10, 101, 40])
>>> compute_mfccs = DCT(input_size=inputs.size(-1))
>>> features = compute_mfccs(inputs)
>>> features.shape
torch.Size([10, 101, 20])
"""
def __init__(
self, input_size, n_out=20, ortho_norm=True,
):
super().__init__()
if n_out > input_size:
raise ValueError(
"Cannot select more DCT coefficients than inputs "
"(n_out=%i, n_in=%i)" % (n_out, input_size)
)
# Generate matix for DCT transformation
n = torch.arange(float(input_size))
k = torch.arange(float(n_out)).unsqueeze(1)
dct = torch.cos(math.pi / float(input_size) * (n + 0.5) * k)
if ortho_norm:
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(input_size))
else:
dct *= 2.0
self.dct_mat = dct.t()
def forward(self, x):
"""Returns the DCT of the input tensor.
Arguments
---------
x : tensor
A batch of tensors to transform, usually fbank features.
"""
# Managing multi-channels case
input_shape = x.shape
if len(input_shape) == 4:
x = x.reshape(x.shape[0] * x.shape[3], x.shape[1], x.shape[2])
# apply the DCT transform
dct = torch.matmul(x, self.dct_mat.to(x.device))
# Reshape in the case of multi-channels
if len(input_shape) == 4:
dct = dct.reshape(
input_shape[0], dct.shape[1], dct.shape[2], input_shape[3]
)
return dct
class Deltas(torch.nn.Module):
"""Computes delta coefficients (time derivatives).
Arguments
---------
win_length : int
Length of the window used to compute the time derivatives.
Example
-------
>>> inputs = torch.randn([10, 101, 20])
>>> compute_deltas = Deltas(input_size=inputs.size(-1))
>>> features = compute_deltas(inputs)
>>> features.shape
torch.Size([10, 101, 20])
"""
def __init__(
self, input_size, window_length=5,
):
super().__init__()
self.n = (window_length - 1) // 2
self.denom = self.n * (self.n + 1) * (2 * self.n + 1) / 3
self.register_buffer(
"kernel",
torch.arange(-self.n, self.n + 1, dtype=torch.float32,).repeat(
input_size, 1, 1
),
)
def forward(self, x):
"""Returns the delta coefficients.
Arguments
---------
x : tensor
A batch of tensors.
"""
# Managing multi-channel deltas reshape tensor (batch*channel,time)
x = x.transpose(1, 2).transpose(2, -1)
or_shape = x.shape
if len(or_shape) == 4:
x = x.reshape(or_shape[0] * or_shape[2], or_shape[1], or_shape[3])
# Padding for time borders
x = torch.nn.functional.pad(x, (self.n, self.n), mode="replicate")
# Derivative estimation (with a fixed convolutional kernel)
delta_coeff = (
torch.nn.functional.conv1d(
x, self.kernel.to(x.device), groups=x.shape[1]
)
/ self.denom
)
# Retrieving the original dimensionality (for multi-channel case)
if len(or_shape) == 4:
delta_coeff = delta_coeff.reshape(
or_shape[0], or_shape[1], or_shape[2], or_shape[3],
)
delta_coeff = delta_coeff.transpose(1, -1).transpose(2, -1)
return delta_coeff
class ContextWindow(torch.nn.Module):
"""Computes the context window.
This class applies a context window by gathering multiple time steps
in a single feature vector. The operation is performed with a
convolutional layer based on a fixed kernel designed for that.
Arguments
---------
left_frames : int
Number of left frames (i.e, past frames) to collect.
right_frames : int
Number of right frames (i.e, future frames) to collect.
Example
-------
>>> import torch
>>> compute_cw = ContextWindow(left_frames=5, right_frames=5)
>>> inputs = torch.randn([10, 101, 20])
>>> features = compute_cw(inputs)
>>> features.shape
torch.Size([10, 101, 220])
"""
def __init__(
self, left_frames=0, right_frames=0,
):
super().__init__()
self.left_frames = left_frames
self.right_frames = right_frames
self.context_len = self.left_frames + self.right_frames + 1
self.kernel_len = 2 * max(self.left_frames, self.right_frames) + 1
# Kernel definition
self.kernel = torch.eye(self.context_len, self.kernel_len)
if self.right_frames > self.left_frames:
lag = self.right_frames - self.left_frames
self.kernel = torch.roll(self.kernel, lag, 1)
self.first_call = True
def forward(self, x):
"""Returns the tensor with the surrounding context.
Arguments
---------
x : tensor
A batch of tensors.
"""
x = x.transpose(1, 2)
if self.first_call is True:
self.first_call = False
self.kernel = (
self.kernel.repeat(x.shape[1], 1, 1)
.view(x.shape[1] * self.context_len, self.kernel_len,)
.unsqueeze(1)
)
# Managing multi-channel case
or_shape = x.shape
if len(or_shape) == 4:
x = x.reshape(or_shape[0] * or_shape[2], or_shape[1], or_shape[3])
# Compute context (using the estimated convolutional kernel)
cw_x = torch.nn.functional.conv1d(
x,
self.kernel.to(x.device),
groups=x.shape[1],
padding=max(self.left_frames, self.right_frames),
)
# Retrieving the original dimensionality (for multi-channel case)
if len(or_shape) == 4:
cw_x = cw_x.reshape(
or_shape[0], cw_x.shape[1], or_shape[2], cw_x.shape[-1]
)
cw_x = cw_x.transpose(1, 2)
return cw_x
@register_checkpoint_hooks
class InputNormalization(torch.nn.Module):
"""Performs mean and variance normalization of the input tensor.
Arguments
---------
mean_norm : True
If True, the mean will be normalized.
std_norm : True
If True, the standard deviation will be normalized.
norm_type : str
It defines how the statistics are computed ('sentence' computes them
at sentence level, 'batch' at batch level, 'speaker' at speaker
level, while global computes a single normalization vector for all
the sentences in the dataset). Speaker and global statistics are
computed with a moving average approach.
avg_factor : float
It can be used to manually set the weighting factor between
current statistics and accumulated ones.
Example
-------
>>> import torch
>>> norm = InputNormalization()
>>> inputs = torch.randn([10, 101, 20])
>>> inp_len = torch.ones([10])
>>> features = norm(inputs, inp_len)
"""
from typing import Dict
spk_dict_mean: Dict[int, torch.Tensor]
spk_dict_std: Dict[int, torch.Tensor]
spk_dict_count: Dict[int, int]
def __init__(
self,
mean_norm=True,
std_norm=True,
norm_type="global",
avg_factor=None,
requires_grad=False,
update_until_epoch=3,
):
super().__init__()
self.mean_norm = mean_norm
self.std_norm = std_norm
self.norm_type = norm_type
self.avg_factor = avg_factor
self.requires_grad = requires_grad
self.glob_mean = torch.tensor([0])
self.glob_std = torch.tensor([0])
self.spk_dict_mean = {}
self.spk_dict_std = {}
self.spk_dict_count = {}
self.weight = 1.0
self.count = 0
self.eps = 1e-10
self.update_until_epoch = update_until_epoch
def forward(self, x, lengths, spk_ids=torch.tensor([]), epoch=0):
"""Returns the tensor with the surrounding context.
Arguments
---------
x : tensor
A batch of tensors.
lengths : tensor
A batch of tensors containing the relative length of each
sentence (e.g, [0.7, 0.9, 1.0]). It is used to avoid
computing stats on zero-padded steps.
spk_ids : tensor containing the ids of each speaker (e.g, [0 10 6]).
It is used to perform per-speaker normalization when
norm_type='speaker'.
"""
N_batches = x.shape[0]
current_means = []
current_stds = []
for snt_id in range(N_batches):
# Avoiding padded time steps
actual_size = torch.round(lengths[snt_id] * x.shape[1]).int()
# computing statistics
current_mean, current_std = self._compute_current_stats(
x[snt_id, 0:actual_size, ...]
)
current_means.append(current_mean)
current_stds.append(current_std)
if self.norm_type == "sentence":
x[snt_id] = (x[snt_id] - current_mean.data) / current_std.data
if self.norm_type == "speaker":
spk_id = int(spk_ids[snt_id][0])
if self.training:
if spk_id not in self.spk_dict_mean:
# Initialization of the dictionary
self.spk_dict_mean[spk_id] = current_mean
self.spk_dict_std[spk_id] = current_std
self.spk_dict_count[spk_id] = 1
else:
self.spk_dict_count[spk_id] = (
self.spk_dict_count[spk_id] + 1
)
if self.avg_factor is None:
self.weight = 1 / self.spk_dict_count[spk_id]
else:
self.weight = self.avg_factor
self.spk_dict_mean[spk_id] = (
(1 - self.weight) * self.spk_dict_mean[spk_id]
+ self.weight * current_mean
)
self.spk_dict_std[spk_id] = (
(1 - self.weight) * self.spk_dict_std[spk_id]
+ self.weight * current_std
)
self.spk_dict_mean[spk_id].detach()
self.spk_dict_std[spk_id].detach()
speaker_mean = self.spk_dict_mean[spk_id].data
speaker_std = self.spk_dict_std[spk_id].data
else:
if spk_id in self.spk_dict_mean:
speaker_mean = self.spk_dict_mean[spk_id].data
speaker_std = self.spk_dict_std[spk_id].data
else:
speaker_mean = current_mean.data
speaker_std = current_std.data
x[snt_id] = (x[snt_id] - speaker_mean) / speaker_std
if self.norm_type == "batch" or self.norm_type == "global":
current_mean = torch.mean(torch.stack(current_means), dim=0)
current_std = torch.mean(torch.stack(current_stds), dim=0)
if self.norm_type == "batch":
x = (x - current_mean.data) / (current_std.data)
if self.norm_type == "global":
if self.training:
if self.count == 0:
self.glob_mean = current_mean
self.glob_std = current_std
elif epoch < self.update_until_epoch:
if self.avg_factor is None:
self.weight = 1 / (self.count + 1)
else:
self.weight = self.avg_factor
self.glob_mean = (
1 - self.weight
) * self.glob_mean + self.weight * current_mean
self.glob_std = (
1 - self.weight
) * self.glob_std + self.weight * current_std
self.glob_mean.detach()
self.glob_std.detach()
self.count = self.count + 1
x = (x - self.glob_mean.data) / (self.glob_std.data)
return x
def _compute_current_stats(self, x):
"""Returns the tensor with the surrounding context.
Arguments
---------
x : tensor
A batch of tensors.
"""
# Compute current mean
if self.mean_norm:
current_mean = torch.mean(x, dim=0).detach().data
else:
current_mean = torch.tensor([0.0], device=x.device)
# Compute current std
if self.std_norm:
current_std = torch.std(x, dim=0).detach().data
else:
current_std = torch.tensor([1.0], device=x.device)
# Improving numerical stability of std
current_std = torch.max(
current_std, self.eps * torch.ones_like(current_std)
)
return current_mean, current_std
def _statistics_dict(self):
"""Fills the dictionary containing the normalization statistics.
"""
state = {}
state["count"] = self.count
state["glob_mean"] = self.glob_mean
state["glob_std"] = self.glob_std
state["spk_dict_mean"] = self.spk_dict_mean
state["spk_dict_std"] = self.spk_dict_std
state["spk_dict_count"] = self.spk_dict_count
return state
def _load_statistics_dict(self, state):
"""Loads the dictionary containing the statistics.
Arguments
---------
state : dict
A dictionary containing the normalization statistics.
"""
self.count = state["count"]
if isinstance(state["glob_mean"], int):
self.glob_mean = state["glob_mean"]
self.glob_std = state["glob_std"]
else:
self.glob_mean = state["glob_mean"] # .to(self.device_inp)
self.glob_std = state["glob_std"] # .to(self.device_inp)
# Loading the spk_dict_mean in the right device
self.spk_dict_mean = {}
for spk in state["spk_dict_mean"]:
self.spk_dict_mean[spk] = state["spk_dict_mean"][spk].to(
self.device_inp
)
# Loading the spk_dict_std in the right device
self.spk_dict_std = {}
for spk in state["spk_dict_std"]:
self.spk_dict_std[spk] = state["spk_dict_std"][spk].to(
self.device_inp
)
self.spk_dict_count = state["spk_dict_count"]
return state
def to(self, device):
"""Puts the needed tensors in the right device.
"""
self = super(InputNormalization, self).to(device)
self.glob_mean = self.glob_mean.to(device)
self.glob_std = self.glob_std.to(device)
for spk in self.spk_dict_mean:
self.spk_dict_mean[spk] = self.spk_dict_mean[spk].to(device)
self.spk_dict_std[spk] = self.spk_dict_std[spk].to(device)
return self
@mark_as_saver
def _save(self, path):
"""Save statistic dictionary.
Arguments
---------
path : str
A path where to save the dictionary.
"""
stats = self._statistics_dict()
torch.save(stats, path)
@mark_as_transfer
@mark_as_loader
def _load(self, path, end_of_epoch=False, device=None):
"""Load statistic dictionary.
Arguments
---------
path : str
The path of the statistic dictionary
device : str, None
Passed to torch.load(..., map_location=device)
"""
del end_of_epoch # Unused here.
stats = torch.load(path, map_location=device)
self._load_statistics_dict(stats)
| 39,570 | 31.435246 | 81 | py |
speechbrain | speechbrain-main/speechbrain/processing/speech_augmentation.py | """Classes for mutating speech data for data augmentation.
This module provides classes that produce realistic distortions of speech
data for the purpose of training speech processing models. The list of
distortions includes adding noise, adding reverberation, changing speed,
and more. All the classes are of type `torch.nn.Module`. This gives the
possibility to have end-to-end differentiability and
backpropagate the gradient through them. In addition, all operations
are expected to be performed on the GPU (where available) for efficiency.
Authors
* Peter Plantinga 2020
"""
# Importing libraries
import math
import torch
import torch.nn.functional as F
from speechbrain.dataio.legacy import ExtendedCSVDataset
from speechbrain.dataio.dataloader import make_dataloader
from speechbrain.processing.signal_processing import (
compute_amplitude,
dB_to_amplitude,
convolve1d,
notch_filter,
reverberate,
)
class AddNoise(torch.nn.Module):
"""This class additively combines a noise signal to the input signal.
Arguments
---------
csv_file : str
The name of a csv file containing the location of the
noise audio files. If none is provided, white noise will be used.
csv_keys : list, None, optional
Default: None . One data entry for the noise data should be specified.
If None, the csv file is expected to have only one data entry.
sorting : str
The order to iterate the csv file, from one of the
following options: random, original, ascending, and descending.
num_workers : int
Number of workers in the DataLoader (See PyTorch DataLoader docs).
snr_low : int
The low end of the mixing ratios, in decibels.
snr_high : int
The high end of the mixing ratios, in decibels.
pad_noise : bool
If True, copy noise signals that are shorter than
their corresponding clean signals so as to cover the whole clean
signal. Otherwise, leave the noise un-padded.
mix_prob : float
The probability that a batch of signals will be mixed
with a noise signal. By default, every batch is mixed with noise.
start_index : int
The index in the noise waveforms to start from. By default, chooses
a random index in [0, len(noise) - len(waveforms)].
normalize : bool
If True, output noisy signals that exceed [-1,1] will be
normalized to [-1,1].
replacements : dict
A set of string replacements to carry out in the
csv file. Each time a key is found in the text, it will be replaced
with the corresponding value.
noise_sample_rate : int
The sample rate of the noise audio signals, so noise can be resampled
to the clean sample rate if necessary.
clean_sample_rate : int
The sample rate of the clean audio signals, so noise can be resampled
to the clean sample rate if necessary.
Example
-------
>>> import pytest
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> clean = signal.unsqueeze(0) # [batch, time, channels]
>>> noisifier = AddNoise('tests/samples/annotation/noise.csv',
... replacements={'noise_folder': 'tests/samples/noise'})
>>> noisy = noisifier(clean, torch.ones(1))
"""
def __init__(
self,
csv_file=None,
csv_keys=None,
sorting="random",
num_workers=0,
snr_low=0,
snr_high=0,
pad_noise=False,
mix_prob=1.0,
start_index=None,
normalize=False,
replacements={},
noise_sample_rate=16000,
clean_sample_rate=16000,
):
super().__init__()
self.csv_file = csv_file
self.csv_keys = csv_keys
self.sorting = sorting
self.num_workers = num_workers
self.snr_low = snr_low
self.snr_high = snr_high
self.pad_noise = pad_noise
self.mix_prob = mix_prob
self.start_index = start_index
self.normalize = normalize
self.replacements = replacements
if noise_sample_rate != clean_sample_rate:
self.resampler = Resample(noise_sample_rate, clean_sample_rate)
def forward(self, waveforms, lengths):
"""
Arguments
---------
waveforms : tensor
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
Shape should be a single dimension, `[batch]`.
Returns
-------
Tensor of shape `[batch, time]` or `[batch, time, channels]`.
"""
# Copy clean waveform to initialize noisy waveform
noisy_waveform = waveforms.clone()
lengths = (lengths * waveforms.shape[1]).unsqueeze(1)
# Don't add noise (return early) 1-`mix_prob` portion of the batches
if torch.rand(1) > self.mix_prob:
return noisy_waveform
# Compute the average amplitude of the clean waveforms
clean_amplitude = compute_amplitude(waveforms, lengths)
# Pick an SNR and use it to compute the mixture amplitude factors
SNR = torch.rand(len(waveforms), 1, device=waveforms.device)
SNR = SNR * (self.snr_high - self.snr_low) + self.snr_low
noise_amplitude_factor = 1 / (dB_to_amplitude(SNR) + 1)
new_noise_amplitude = noise_amplitude_factor * clean_amplitude
# Scale clean signal appropriately
noisy_waveform *= 1 - noise_amplitude_factor
# Loop through clean samples and create mixture
if self.csv_file is None:
white_noise = torch.randn_like(waveforms)
noisy_waveform += new_noise_amplitude * white_noise
else:
tensor_length = waveforms.shape[1]
noise_waveform, noise_length = self._load_noise(
lengths, tensor_length,
)
# Rescale and add
noise_amplitude = compute_amplitude(noise_waveform, noise_length)
noise_waveform *= new_noise_amplitude / (noise_amplitude + 1e-14)
noisy_waveform += noise_waveform
# Normalizing to prevent clipping
if self.normalize:
abs_max, _ = torch.max(
torch.abs(noisy_waveform), dim=1, keepdim=True
)
noisy_waveform = noisy_waveform / abs_max.clamp(min=1.0)
return noisy_waveform
def _load_noise(self, lengths, max_length):
"""Load a batch of noises"""
lengths = lengths.long().squeeze(1)
batch_size = len(lengths)
# Load a noise batch
if not hasattr(self, "data_loader"):
# Set parameters based on input
self.device = lengths.device
# Create a data loader for the noise wavforms
if self.csv_file is not None:
dataset = ExtendedCSVDataset(
csvpath=self.csv_file,
output_keys=self.csv_keys,
sorting=self.sorting
if self.sorting != "random"
else "original",
replacements=self.replacements,
)
self.data_loader = make_dataloader(
dataset,
batch_size=batch_size,
num_workers=self.num_workers,
shuffle=(self.sorting == "random"),
)
self.noise_data = iter(self.data_loader)
# Load noise to correct device
noise_batch, noise_len = self._load_noise_batch_of_size(batch_size)
noise_batch = noise_batch.to(lengths.device)
noise_len = noise_len.to(lengths.device)
# Resample noise if necessary
if hasattr(self, "resampler"):
noise_batch = self.resampler(noise_batch)
# Convert relative length to an index
noise_len = (noise_len * noise_batch.shape[1]).long()
# Ensure shortest wav can cover speech signal
# WARNING: THIS COULD BE SLOW IF THERE ARE VERY SHORT NOISES
if self.pad_noise:
while torch.any(noise_len < lengths):
min_len = torch.min(noise_len)
prepend = noise_batch[:, :min_len]
noise_batch = torch.cat((prepend, noise_batch), axis=1)
noise_len += min_len
# Ensure noise batch is long enough
elif noise_batch.size(1) < max_length:
padding = (0, max_length - noise_batch.size(1))
noise_batch = torch.nn.functional.pad(noise_batch, padding)
# Select a random starting location in the waveform
start_index = self.start_index
if self.start_index is None:
start_index = 0
max_chop = (noise_len - lengths).min().clamp(min=1)
start_index = torch.randint(
high=max_chop, size=(1,), device=lengths.device
)
# Truncate noise_batch to max_length
noise_batch = noise_batch[:, start_index : start_index + max_length]
noise_len = (noise_len - start_index).clamp(max=max_length).unsqueeze(1)
return noise_batch, noise_len
def _load_noise_batch_of_size(self, batch_size):
"""Concatenate noise batches, then chop to correct size"""
noise_batch, noise_lens = self._load_noise_batch()
# Expand
while len(noise_batch) < batch_size:
added_noise, added_lens = self._load_noise_batch()
noise_batch, noise_lens = AddNoise._concat_batch(
noise_batch, noise_lens, added_noise, added_lens
)
# Contract
if len(noise_batch) > batch_size:
noise_batch = noise_batch[:batch_size]
noise_lens = noise_lens[:batch_size]
return noise_batch, noise_lens
@staticmethod
def _concat_batch(noise_batch, noise_lens, added_noise, added_lens):
"""Concatenate two noise batches of potentially different lengths"""
# pad shorter batch to correct length
noise_tensor_len = noise_batch.shape[1]
added_tensor_len = added_noise.shape[1]
pad = (0, abs(noise_tensor_len - added_tensor_len))
if noise_tensor_len > added_tensor_len:
added_noise = torch.nn.functional.pad(added_noise, pad)
added_lens = added_lens * added_tensor_len / noise_tensor_len
else:
noise_batch = torch.nn.functional.pad(noise_batch, pad)
noise_lens = noise_lens * noise_tensor_len / added_tensor_len
noise_batch = torch.cat((noise_batch, added_noise))
noise_lens = torch.cat((noise_lens, added_lens))
return noise_batch, noise_lens
def _load_noise_batch(self):
"""Load a batch of noises, restarting iteration if necessary."""
try:
# Don't necessarily know the key
noises, lens = next(self.noise_data).at_position(0)
except StopIteration:
self.noise_data = iter(self.data_loader)
noises, lens = next(self.noise_data).at_position(0)
return noises, lens
class AddReverb(torch.nn.Module):
"""This class convolves an audio signal with an impulse response.
Arguments
---------
csv_file : str
The name of a csv file containing the location of the
impulse response files.
sorting : str
The order to iterate the csv file, from one of
the following options: random, original, ascending, and descending.
reverb_prob : float
The chance that the audio signal will be reverbed.
By default, every batch is reverbed.
rir_scale_factor: float
It compresses or dilates the given impulse response.
If 0 < scale_factor < 1, the impulse response is compressed
(less reverb), while if scale_factor > 1 it is dilated
(more reverb).
replacements : dict
A set of string replacements to carry out in the
csv file. Each time a key is found in the text, it will be replaced
with the corresponding value.
reverb_sample_rate : int
The sample rate of the corruption signals (rirs), so that they
can be resampled to clean sample rate if necessary.
clean_sample_rate : int
The sample rate of the clean signals, so that the corruption
signals can be resampled to the clean sample rate before convolution.
Example
-------
>>> import pytest
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> clean = signal.unsqueeze(0) # [batch, time, channels]
>>> reverb = AddReverb('tests/samples/annotation/RIRs.csv',
... replacements={'rir_folder': 'tests/samples/RIRs'})
>>> reverbed = reverb(clean, torch.ones(1))
"""
def __init__(
self,
csv_file,
sorting="random",
reverb_prob=1.0,
rir_scale_factor=1.0,
replacements={},
reverb_sample_rate=16000,
clean_sample_rate=16000,
):
super().__init__()
self.csv_file = csv_file
self.sorting = sorting
self.reverb_prob = reverb_prob
self.replacements = replacements
self.rir_scale_factor = rir_scale_factor
# Create a data loader for the RIR waveforms
dataset = ExtendedCSVDataset(
csvpath=self.csv_file,
sorting=self.sorting if self.sorting != "random" else "original",
replacements=self.replacements,
)
self.data_loader = make_dataloader(
dataset, shuffle=(self.sorting == "random")
)
self.rir_data = iter(self.data_loader)
if reverb_sample_rate != clean_sample_rate:
self.resampler = Resample(reverb_sample_rate, clean_sample_rate)
def forward(self, waveforms, lengths):
"""
Arguments
---------
waveforms : tensor
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
Shape should be a single dimension, `[batch]`.
Returns
-------
Tensor of shape `[batch, time]` or `[batch, time, channels]`.
"""
# Don't add reverb (return early) 1-`reverb_prob` portion of the time
if torch.rand(1) > self.reverb_prob:
return waveforms.clone()
# Add channels dimension if necessary
channel_added = False
if len(waveforms.shape) == 2:
waveforms = waveforms.unsqueeze(-1)
channel_added = True
# Convert length from ratio to number of indices
# lengths = (lengths * waveforms.shape[1])[:, None, None]
# Load and prepare RIR
rir_waveform = self._load_rir(waveforms)
# Resample to correct rate
if hasattr(self, "resampler"):
rir_waveform = self.resampler(rir_waveform)
# Compress or dilate RIR
if self.rir_scale_factor != 1:
rir_waveform = F.interpolate(
rir_waveform.transpose(1, -1),
scale_factor=self.rir_scale_factor,
mode="linear",
align_corners=False,
)
rir_waveform = rir_waveform.transpose(1, -1)
rev_waveform = reverberate(waveforms, rir_waveform, rescale_amp="avg")
# Remove channels dimension if added
if channel_added:
return rev_waveform.squeeze(-1)
return rev_waveform
def _load_rir(self, waveforms):
try:
rir_waveform, length = next(self.rir_data).at_position(0)
except StopIteration:
self.rir_data = iter(self.data_loader)
rir_waveform, length = next(self.rir_data).at_position(0)
# Make sure RIR has correct channels
if len(rir_waveform.shape) == 2:
rir_waveform = rir_waveform.unsqueeze(-1)
# Make sure RIR has correct type and device
rir_waveform = rir_waveform.type(waveforms.dtype)
return rir_waveform.to(waveforms.device)
class SpeedPerturb(torch.nn.Module):
"""Slightly speed up or slow down an audio signal.
Resample the audio signal at a rate that is similar to the original rate,
to achieve a slightly slower or slightly faster signal. This technique is
outlined in the paper: "Audio Augmentation for Speech Recognition"
Arguments
---------
orig_freq : int
The frequency of the original signal.
speeds : list
The speeds that the signal should be changed to, as a percentage of the
original signal (i.e. `speeds` is divided by 100 to get a ratio).
perturb_prob : float
The chance that the batch will be speed-
perturbed. By default, every batch is perturbed.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> perturbator = SpeedPerturb(orig_freq=16000, speeds=[90])
>>> clean = signal.unsqueeze(0)
>>> perturbed = perturbator(clean)
>>> clean.shape
torch.Size([1, 52173])
>>> perturbed.shape
torch.Size([1, 46956])
"""
def __init__(
self, orig_freq, speeds=[90, 100, 110], perturb_prob=1.0,
):
super().__init__()
self.orig_freq = orig_freq
self.speeds = speeds
self.perturb_prob = perturb_prob
# Initialize index of perturbation
self.samp_index = 0
# Initialize resamplers
self.resamplers = []
for speed in self.speeds:
config = {
"orig_freq": self.orig_freq,
"new_freq": self.orig_freq * speed // 100,
}
self.resamplers.append(Resample(**config))
def forward(self, waveform):
"""
Arguments
---------
waveforms : tensor
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
Shape should be a single dimension, `[batch]`.
Returns
-------
Tensor of shape `[batch, time]` or `[batch, time, channels]`.
"""
# Don't perturb (return early) 1-`perturb_prob` portion of the batches
if torch.rand(1) > self.perturb_prob:
return waveform.clone()
# Perform a random perturbation
self.samp_index = torch.randint(len(self.speeds), (1,))[0]
perturbed_waveform = self.resamplers[self.samp_index](waveform)
return perturbed_waveform
class Resample(torch.nn.Module):
"""This class resamples an audio signal using sinc-based interpolation.
It is a modification of the `resample` function from torchaudio
(https://pytorch.org/audio/stable/tutorials/audio_resampling_tutorial.html)
Arguments
---------
orig_freq : int
the sampling frequency of the input signal.
new_freq : int
the new sampling frequency after this operation is performed.
lowpass_filter_width : int
Controls the sharpness of the filter, larger numbers result in a
sharper filter, but they are less efficient. Values from 4 to 10 are
allowed.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> signal = signal.unsqueeze(0) # [batch, time, channels]
>>> resampler = Resample(orig_freq=16000, new_freq=8000)
>>> resampled = resampler(signal)
>>> signal.shape
torch.Size([1, 52173])
>>> resampled.shape
torch.Size([1, 26087])
"""
def __init__(
self, orig_freq=16000, new_freq=16000, lowpass_filter_width=6,
):
super().__init__()
self.orig_freq = orig_freq
self.new_freq = new_freq
self.lowpass_filter_width = lowpass_filter_width
# Compute rate for striding
self._compute_strides()
assert self.orig_freq % self.conv_stride == 0
assert self.new_freq % self.conv_transpose_stride == 0
def _compute_strides(self):
"""Compute the phases in polyphase filter.
(almost directly from torchaudio.compliance.kaldi)
"""
# Compute new unit based on ratio of in/out frequencies
base_freq = math.gcd(self.orig_freq, self.new_freq)
input_samples_in_unit = self.orig_freq // base_freq
self.output_samples = self.new_freq // base_freq
# Store the appropriate stride based on the new units
self.conv_stride = input_samples_in_unit
self.conv_transpose_stride = self.output_samples
def forward(self, waveforms):
"""
Arguments
---------
waveforms : tensor
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
Shape should be a single dimension, `[batch]`.
Returns
-------
Tensor of shape `[batch, time]` or `[batch, time, channels]`.
"""
if not hasattr(self, "first_indices"):
self._indices_and_weights(waveforms)
# Don't do anything if the frequencies are the same
if self.orig_freq == self.new_freq:
return waveforms
unsqueezed = False
if len(waveforms.shape) == 2:
waveforms = waveforms.unsqueeze(1)
unsqueezed = True
elif len(waveforms.shape) == 3:
waveforms = waveforms.transpose(1, 2)
else:
raise ValueError("Input must be 2 or 3 dimensions")
# Do resampling
resampled_waveform = self._perform_resample(waveforms)
if unsqueezed:
resampled_waveform = resampled_waveform.squeeze(1)
else:
resampled_waveform = resampled_waveform.transpose(1, 2)
return resampled_waveform
def _perform_resample(self, waveforms):
"""Resamples the waveform at the new frequency.
This matches Kaldi's OfflineFeatureTpl ResampleWaveform which uses a
LinearResample (resample a signal at linearly spaced intervals to
up/downsample a signal). LinearResample (LR) means that the output
signal is at linearly spaced intervals (i.e the output signal has a
frequency of `new_freq`). It uses sinc/bandlimited interpolation to
upsample/downsample the signal.
(almost directly from torchaudio.compliance.kaldi)
https://ccrma.stanford.edu/~jos/resample/
Theory_Ideal_Bandlimited_Interpolation.html
https://github.com/kaldi-asr/kaldi/blob/master/src/feat/resample.h#L56
Arguments
---------
waveforms : tensor
The batch of audio signals to resample.
Returns
-------
The waveforms at the new frequency.
"""
# Compute output size and initialize
batch_size, num_channels, wave_len = waveforms.size()
window_size = self.weights.size(1)
tot_output_samp = self._output_samples(wave_len)
resampled_waveform = torch.zeros(
(batch_size, num_channels, tot_output_samp),
device=waveforms.device,
)
self.weights = self.weights.to(waveforms.device)
# Check weights are on correct device
if waveforms.device != self.weights.device:
self.weights = self.weights.to(waveforms.device)
# eye size: (num_channels, num_channels, 1)
eye = torch.eye(num_channels, device=waveforms.device).unsqueeze(2)
# Iterate over the phases in the polyphase filter
for i in range(self.first_indices.size(0)):
wave_to_conv = waveforms
first_index = int(self.first_indices[i].item())
if first_index >= 0:
# trim the signal as the filter will not be applied
# before the first_index
wave_to_conv = wave_to_conv[..., first_index:]
# pad the right of the signal to allow partial convolutions
# meaning compute values for partial windows (e.g. end of the
# window is outside the signal length)
max_index = (tot_output_samp - 1) // self.output_samples
end_index = max_index * self.conv_stride + window_size
current_wave_len = wave_len - first_index
right_padding = max(0, end_index + 1 - current_wave_len)
left_padding = max(0, -first_index)
wave_to_conv = torch.nn.functional.pad(
wave_to_conv, (left_padding, right_padding)
)
conv_wave = torch.nn.functional.conv1d(
input=wave_to_conv,
weight=self.weights[i].repeat(num_channels, 1, 1),
stride=self.conv_stride,
groups=num_channels,
)
# we want conv_wave[:, i] to be at
# output[:, i + n*conv_transpose_stride]
dilated_conv_wave = torch.nn.functional.conv_transpose1d(
conv_wave, eye, stride=self.conv_transpose_stride
)
# pad dilated_conv_wave so it reaches the output length if needed.
left_padding = i
previous_padding = left_padding + dilated_conv_wave.size(-1)
right_padding = max(0, tot_output_samp - previous_padding)
dilated_conv_wave = torch.nn.functional.pad(
dilated_conv_wave, (left_padding, right_padding)
)
dilated_conv_wave = dilated_conv_wave[..., :tot_output_samp]
resampled_waveform += dilated_conv_wave
return resampled_waveform
def _output_samples(self, input_num_samp):
"""Based on LinearResample::GetNumOutputSamples.
LinearResample (LR) means that the output signal is at
linearly spaced intervals (i.e the output signal has a
frequency of ``new_freq``). It uses sinc/bandlimited
interpolation to upsample/downsample the signal.
(almost directly from torchaudio.compliance.kaldi)
Arguments
---------
input_num_samp : int
The number of samples in each example in the batch.
Returns
-------
Number of samples in the output waveform.
"""
# For exact computation, we measure time in "ticks" of 1.0 / tick_freq,
# where tick_freq is the least common multiple of samp_in and
# samp_out.
samp_in = int(self.orig_freq)
samp_out = int(self.new_freq)
tick_freq = abs(samp_in * samp_out) // math.gcd(samp_in, samp_out)
ticks_per_input_period = tick_freq // samp_in
# work out the number of ticks in the time interval
# [ 0, input_num_samp/samp_in ).
interval_length = input_num_samp * ticks_per_input_period
if interval_length <= 0:
return 0
ticks_per_output_period = tick_freq // samp_out
# Get the last output-sample in the closed interval,
# i.e. replacing [ ) with [ ]. Note: integer division rounds down.
# See http://en.wikipedia.org/wiki/Interval_(mathematics) for an
# explanation of the notation.
last_output_samp = interval_length // ticks_per_output_period
# We need the last output-sample in the open interval, so if it
# takes us to the end of the interval exactly, subtract one.
if last_output_samp * ticks_per_output_period == interval_length:
last_output_samp -= 1
# First output-sample index is zero, so the number of output samples
# is the last output-sample plus one.
num_output_samp = last_output_samp + 1
return num_output_samp
def _indices_and_weights(self, waveforms):
"""Based on LinearResample::SetIndexesAndWeights
Retrieves the weights for resampling as well as the indices in which
they are valid. LinearResample (LR) means that the output signal is at
linearly spaced intervals (i.e the output signal has a frequency
of ``new_freq``). It uses sinc/bandlimited interpolation to
upsample/downsample the signal.
Returns
-------
- the place where each filter should start being applied
- the filters to be applied to the signal for resampling
"""
# Lowpass filter frequency depends on smaller of two frequencies
min_freq = min(self.orig_freq, self.new_freq)
lowpass_cutoff = 0.99 * 0.5 * min_freq
assert lowpass_cutoff * 2 <= min_freq
window_width = self.lowpass_filter_width / (2.0 * lowpass_cutoff)
assert lowpass_cutoff < min(self.orig_freq, self.new_freq) / 2
output_t = torch.arange(
start=0.0, end=self.output_samples, device=waveforms.device,
)
output_t /= self.new_freq
min_t = output_t - window_width
max_t = output_t + window_width
min_input_index = torch.ceil(min_t * self.orig_freq)
max_input_index = torch.floor(max_t * self.orig_freq)
num_indices = max_input_index - min_input_index + 1
max_weight_width = num_indices.max()
j = torch.arange(max_weight_width, device=waveforms.device)
input_index = min_input_index.unsqueeze(1) + j.unsqueeze(0)
delta_t = (input_index / self.orig_freq) - output_t.unsqueeze(1)
weights = torch.zeros_like(delta_t)
inside_window_indices = delta_t.abs().lt(window_width)
# raised-cosine (Hanning) window with width `window_width`
weights[inside_window_indices] = 0.5 * (
1
+ torch.cos(
2
* math.pi
* lowpass_cutoff
/ self.lowpass_filter_width
* delta_t[inside_window_indices]
)
)
t_eq_zero_indices = delta_t.eq(0.0)
t_not_eq_zero_indices = ~t_eq_zero_indices
# sinc filter function
weights[t_not_eq_zero_indices] *= torch.sin(
2 * math.pi * lowpass_cutoff * delta_t[t_not_eq_zero_indices]
) / (math.pi * delta_t[t_not_eq_zero_indices])
# limit of the function at t = 0
weights[t_eq_zero_indices] *= 2 * lowpass_cutoff
# size (output_samples, max_weight_width)
weights /= self.orig_freq
self.first_indices = min_input_index
self.weights = weights
class AddBabble(torch.nn.Module):
"""Simulate babble noise by mixing the signals in a batch.
Arguments
---------
speaker_count : int
The number of signals to mix with the original signal.
snr_low : int
The low end of the mixing ratios, in decibels.
snr_high : int
The high end of the mixing ratios, in decibels.
mix_prob : float
The probability that the batch of signals will be
mixed with babble noise. By default, every signal is mixed.
Example
-------
>>> import pytest
>>> babbler = AddBabble()
>>> dataset = ExtendedCSVDataset(
... csvpath='tests/samples/annotation/speech.csv',
... replacements={"data_folder": "tests/samples/single-mic"}
... )
>>> loader = make_dataloader(dataset, batch_size=5)
>>> speech, lengths = next(iter(loader)).at_position(0)
>>> noisy = babbler(speech, lengths)
"""
def __init__(
self, speaker_count=3, snr_low=0, snr_high=0, mix_prob=1,
):
super().__init__()
self.speaker_count = speaker_count
self.snr_low = snr_low
self.snr_high = snr_high
self.mix_prob = mix_prob
def forward(self, waveforms, lengths):
"""
Arguments
---------
waveforms : tensor
A batch of audio signals to process, with shape `[batch, time]` or
`[batch, time, channels]`.
lengths : tensor
The length of each audio in the batch, with shape `[batch]`.
Returns
-------
Tensor with processed waveforms.
"""
babbled_waveform = waveforms.clone()
lengths = (lengths * waveforms.shape[1]).unsqueeze(1)
batch_size = len(waveforms)
# Don't mix (return early) 1-`mix_prob` portion of the batches
if torch.rand(1) > self.mix_prob:
return babbled_waveform
# Pick an SNR and use it to compute the mixture amplitude factors
clean_amplitude = compute_amplitude(waveforms, lengths)
SNR = torch.rand(batch_size, 1, device=waveforms.device)
SNR = SNR * (self.snr_high - self.snr_low) + self.snr_low
noise_amplitude_factor = 1 / (dB_to_amplitude(SNR) + 1)
new_noise_amplitude = noise_amplitude_factor * clean_amplitude
# Scale clean signal appropriately
babbled_waveform *= 1 - noise_amplitude_factor
# For each speaker in the mixture, roll and add
babble_waveform = waveforms.roll((1,), dims=0)
babble_len = lengths.roll((1,), dims=0)
for i in range(1, self.speaker_count):
babble_waveform += waveforms.roll((1 + i,), dims=0)
babble_len = torch.max(babble_len, babble_len.roll((1,), dims=0))
# Rescale and add to mixture
babble_amplitude = compute_amplitude(babble_waveform, babble_len)
babble_waveform *= new_noise_amplitude / (babble_amplitude + 1e-14)
babbled_waveform += babble_waveform
return babbled_waveform
class DropFreq(torch.nn.Module):
"""This class drops a random frequency from the signal.
The purpose of this class is to teach models to learn to rely on all parts
of the signal, not just a few frequency bands.
Arguments
---------
drop_freq_low : float
The low end of frequencies that can be dropped,
as a fraction of the sampling rate / 2.
drop_freq_high : float
The high end of frequencies that can be
dropped, as a fraction of the sampling rate / 2.
drop_count_low : int
The low end of number of frequencies that could be dropped.
drop_count_high : int
The high end of number of frequencies that could be dropped.
drop_width : float
The width of the frequency band to drop, as
a fraction of the sampling_rate / 2.
drop_prob : float
The probability that the batch of signals will have a frequency
dropped. By default, every batch has frequencies dropped.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> dropper = DropFreq()
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> dropped_signal = dropper(signal.unsqueeze(0))
"""
def __init__(
self,
drop_freq_low=1e-14,
drop_freq_high=1,
drop_count_low=1,
drop_count_high=2,
drop_width=0.05,
drop_prob=1,
):
super().__init__()
self.drop_freq_low = drop_freq_low
self.drop_freq_high = drop_freq_high
self.drop_count_low = drop_count_low
self.drop_count_high = drop_count_high
self.drop_width = drop_width
self.drop_prob = drop_prob
def forward(self, waveforms):
"""
Arguments
---------
waveforms : tensor
Shape should be `[batch, time]` or `[batch, time, channels]`.
Returns
-------
Tensor of shape `[batch, time]` or `[batch, time, channels]`.
"""
# Don't drop (return early) 1-`drop_prob` portion of the batches
dropped_waveform = waveforms.clone()
if torch.rand(1) > self.drop_prob:
return dropped_waveform
# Add channels dimension
if len(waveforms.shape) == 2:
dropped_waveform = dropped_waveform.unsqueeze(-1)
# Pick number of frequencies to drop
drop_count = torch.randint(
low=self.drop_count_low, high=self.drop_count_high + 1, size=(1,),
)
# Pick a frequency to drop
drop_range = self.drop_freq_high - self.drop_freq_low
drop_frequency = (
torch.rand(drop_count) * drop_range + self.drop_freq_low
)
# Filter parameters
filter_length = 101
pad = filter_length // 2
# Start with delta function
drop_filter = torch.zeros(1, filter_length, 1, device=waveforms.device)
drop_filter[0, pad, 0] = 1
# Subtract each frequency
for frequency in drop_frequency:
notch_kernel = notch_filter(
frequency, filter_length, self.drop_width,
).to(waveforms.device)
drop_filter = convolve1d(drop_filter, notch_kernel, pad)
# Apply filter
dropped_waveform = convolve1d(dropped_waveform, drop_filter, pad)
# Remove channels dimension if added
return dropped_waveform.squeeze(-1)
class DropChunk(torch.nn.Module):
"""This class drops portions of the input signal.
Using `DropChunk` as an augmentation strategy helps a models learn to rely
on all parts of the signal, since it can't expect a given part to be
present.
Arguments
---------
drop_length_low : int
The low end of lengths for which to set the
signal to zero, in samples.
drop_length_high : int
The high end of lengths for which to set the
signal to zero, in samples.
drop_count_low : int
The low end of number of times that the signal
can be dropped to zero.
drop_count_high : int
The high end of number of times that the signal
can be dropped to zero.
drop_start : int
The first index for which dropping will be allowed.
drop_end : int
The last index for which dropping will be allowed.
drop_prob : float
The probability that the batch of signals will
have a portion dropped. By default, every batch
has portions dropped.
noise_factor : float
The factor relative to average amplitude of an utterance
to use for scaling the white noise inserted. 1 keeps
the average amplitude the same, while 0 inserts all 0's.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> dropper = DropChunk(drop_start=100, drop_end=200, noise_factor=0.)
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> signal = signal.unsqueeze(0) # [batch, time, channels]
>>> length = torch.ones(1)
>>> dropped_signal = dropper(signal, length)
>>> float(dropped_signal[:, 150])
0.0
"""
def __init__(
self,
drop_length_low=100,
drop_length_high=1000,
drop_count_low=1,
drop_count_high=10,
drop_start=0,
drop_end=None,
drop_prob=1,
noise_factor=0.0,
):
super().__init__()
self.drop_length_low = drop_length_low
self.drop_length_high = drop_length_high
self.drop_count_low = drop_count_low
self.drop_count_high = drop_count_high
self.drop_start = drop_start
self.drop_end = drop_end
self.drop_prob = drop_prob
self.noise_factor = noise_factor
# Validate low < high
if drop_length_low > drop_length_high:
raise ValueError("Low limit must not be more than high limit")
if drop_count_low > drop_count_high:
raise ValueError("Low limit must not be more than high limit")
# Make sure the length doesn't exceed end - start
if drop_end is not None and drop_end >= 0:
if drop_start > drop_end:
raise ValueError("Low limit must not be more than high limit")
drop_range = drop_end - drop_start
self.drop_length_low = min(drop_length_low, drop_range)
self.drop_length_high = min(drop_length_high, drop_range)
def forward(self, waveforms, lengths):
"""
Arguments
---------
waveforms : tensor
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
Shape should be a single dimension, `[batch]`.
Returns
-------
Tensor of shape `[batch, time]` or
`[batch, time, channels]`
"""
# Reading input list
lengths = (lengths * waveforms.size(1)).long()
batch_size = waveforms.size(0)
dropped_waveform = waveforms.clone()
# Don't drop (return early) 1-`drop_prob` portion of the batches
if torch.rand(1) > self.drop_prob:
return dropped_waveform
# Store original amplitude for computing white noise amplitude
clean_amplitude = compute_amplitude(waveforms, lengths.unsqueeze(1))
# Pick a number of times to drop
drop_times = torch.randint(
low=self.drop_count_low,
high=self.drop_count_high + 1,
size=(batch_size,),
)
# Iterate batch to set mask
for i in range(batch_size):
if drop_times[i] == 0:
continue
# Pick lengths
length = torch.randint(
low=self.drop_length_low,
high=self.drop_length_high + 1,
size=(drop_times[i],),
)
# Compute range of starting locations
start_min = self.drop_start
if start_min < 0:
start_min += lengths[i]
start_max = self.drop_end
if start_max is None:
start_max = lengths[i]
if start_max < 0:
start_max += lengths[i]
start_max = max(0, start_max - length.max())
# Pick starting locations
start = torch.randint(
low=start_min, high=start_max + 1, size=(drop_times[i],),
)
end = start + length
# Update waveform
if not self.noise_factor:
for j in range(drop_times[i]):
dropped_waveform[i, start[j] : end[j]] = 0.0
else:
# Uniform distribution of -2 to +2 * avg amplitude should
# preserve the average for normalization
noise_max = 2 * clean_amplitude[i] * self.noise_factor
for j in range(drop_times[i]):
# zero-center the noise distribution
noise_vec = torch.rand(length[j], device=waveforms.device)
noise_vec = 2 * noise_max * noise_vec - noise_max
dropped_waveform[i, start[j] : end[j]] = noise_vec
return dropped_waveform
class DoClip(torch.nn.Module):
"""This function mimics audio clipping by clamping the input tensor.
Arguments
---------
clip_low : float
The low end of amplitudes for which to clip the signal.
clip_high : float
The high end of amplitudes for which to clip the signal.
clip_prob : float
The probability that the batch of signals will have a portion clipped.
By default, every batch has portions clipped.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> clipper = DoClip(clip_low=0.01, clip_high=0.01)
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> clipped_signal = clipper(signal.unsqueeze(0))
>>> "%.2f" % clipped_signal.max()
'0.01'
"""
def __init__(
self, clip_low=0.5, clip_high=1, clip_prob=1,
):
super().__init__()
self.clip_low = clip_low
self.clip_high = clip_high
self.clip_prob = clip_prob
def forward(self, waveforms):
"""
Arguments
---------
waveforms : tensor
Shape should be `[batch, time]` or `[batch, time, channels]`.
Returns
-------
Tensor of shape `[batch, time]` or `[batch, time, channels]`
"""
# Don't clip (return early) 1-`clip_prob` portion of the batches
if torch.rand(1) > self.clip_prob:
return waveforms.clone()
# Randomly select clip value
clipping_range = self.clip_high - self.clip_low
clip_value = torch.rand(1,)[0] * clipping_range + self.clip_low
# Apply clipping
clipped_waveform = waveforms.clamp(-clip_value, clip_value)
return clipped_waveform
| 44,293 | 34.982128 | 81 | py |
speechbrain | speechbrain-main/speechbrain/processing/signal_processing.py | """
Low level signal processing utilities
Authors
* Peter Plantinga 2020
* Francois Grondin 2020
* William Aris 2020
* Samuele Cornell 2020
* Sarthak Yadav 2022
"""
import torch
import math
from packaging import version
def compute_amplitude(waveforms, lengths=None, amp_type="avg", scale="linear"):
"""Compute amplitude of a batch of waveforms.
Arguments
---------
waveform : tensor
The waveforms used for computing amplitude.
Shape should be `[time]` or `[batch, time]` or
`[batch, time, channels]`.
lengths : tensor
The lengths of the waveforms excluding the padding.
Shape should be a single dimension, `[batch]`.
amp_type : str
Whether to compute "avg" average or "peak" amplitude.
Choose between ["avg", "peak"].
scale : str
Whether to compute amplitude in "dB" or "linear" scale.
Choose between ["linear", "dB"].
Returns
-------
The average amplitude of the waveforms.
Example
-------
>>> signal = torch.sin(torch.arange(16000.0)).unsqueeze(0)
>>> compute_amplitude(signal, signal.size(1))
tensor([[0.6366]])
"""
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0)
assert amp_type in ["avg", "peak"]
assert scale in ["linear", "dB"]
if amp_type == "avg":
if lengths is None:
out = torch.mean(torch.abs(waveforms), dim=1, keepdim=True)
else:
wav_sum = torch.sum(input=torch.abs(waveforms), dim=1, keepdim=True)
out = wav_sum / lengths
elif amp_type == "peak":
out = torch.max(torch.abs(waveforms), dim=1, keepdim=True)[0]
else:
raise NotImplementedError
if scale == "linear":
return out
elif scale == "dB":
return torch.clamp(20 * torch.log10(out), min=-80) # clamp zeros
else:
raise NotImplementedError
def normalize(waveforms, lengths=None, amp_type="avg", eps=1e-14):
"""This function normalizes a signal to unitary average or peak amplitude.
Arguments
---------
waveforms : tensor
The waveforms to normalize.
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
The lengths of the waveforms excluding the padding.
Shape should be a single dimension, `[batch]`.
amp_type : str
Whether one wants to normalize with respect to "avg" or "peak"
amplitude. Choose between ["avg", "peak"]. Note: for "avg" clipping
is not prevented and can occur.
eps : float
A small number to add to the denominator to prevent NaN.
Returns
-------
waveforms : tensor
Normalized level waveform.
"""
assert amp_type in ["avg", "peak"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
den = compute_amplitude(waveforms, lengths, amp_type) + eps
if batch_added:
waveforms = waveforms.squeeze(0)
return waveforms / den
def rescale(waveforms, lengths, target_lvl, amp_type="avg", scale="linear"):
"""This functions performs signal rescaling to a target level.
Arguments
---------
waveforms : tensor
The waveforms to normalize.
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
The lengths of the waveforms excluding the padding.
Shape should be a single dimension, `[batch]`.
target_lvl : float
Target lvl in dB or linear scale.
amp_type : str
Whether one wants to rescale with respect to "avg" or "peak" amplitude.
Choose between ["avg", "peak"].
scale : str
whether target_lvl belongs to linear or dB scale.
Choose between ["linear", "dB"].
Returns
-------
waveforms : tensor
Rescaled waveforms.
"""
assert amp_type in ["peak", "avg"]
assert scale in ["linear", "dB"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
waveforms = normalize(waveforms, lengths, amp_type)
if scale == "linear":
out = target_lvl * waveforms
elif scale == "dB":
out = dB_to_amplitude(target_lvl) * waveforms
else:
raise NotImplementedError("Invalid scale, choose between dB and linear")
if batch_added:
out = out.squeeze(0)
return out
def convolve1d(
waveform,
kernel,
padding=0,
pad_type="constant",
stride=1,
groups=1,
use_fft=False,
rotation_index=0,
):
"""Use torch.nn.functional to perform 1d padding and conv.
Arguments
---------
waveform : tensor
The tensor to perform operations on.
kernel : tensor
The filter to apply during convolution.
padding : int or tuple
The padding (pad_left, pad_right) to apply.
If an integer is passed instead, this is passed
to the conv1d function and pad_type is ignored.
pad_type : str
The type of padding to use. Passed directly to
`torch.nn.functional.pad`, see PyTorch documentation
for available options.
stride : int
The number of units to move each time convolution is applied.
Passed to conv1d. Has no effect if `use_fft` is True.
groups : int
This option is passed to `conv1d` to split the input into groups for
convolution. Input channels should be divisible by the number of groups.
use_fft : bool
When `use_fft` is passed `True`, then compute the convolution in the
spectral domain using complex multiply. This is more efficient on CPU
when the size of the kernel is large (e.g. reverberation). WARNING:
Without padding, circular convolution occurs. This makes little
difference in the case of reverberation, but may make more difference
with different kernels.
rotation_index : int
This option only applies if `use_fft` is true. If so, the kernel is
rolled by this amount before convolution to shift the output location.
Returns
-------
The convolved waveform.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> signal = signal.unsqueeze(0).unsqueeze(2)
>>> kernel = torch.rand(1, 10, 1)
>>> signal = convolve1d(signal, kernel, padding=(9, 0))
"""
if len(waveform.shape) != 3:
raise ValueError("Convolve1D expects a 3-dimensional tensor")
# Move time dimension last, which pad and fft and conv expect.
waveform = waveform.transpose(2, 1)
kernel = kernel.transpose(2, 1)
# Padding can be a tuple (left_pad, right_pad) or an int
if isinstance(padding, tuple):
waveform = torch.nn.functional.pad(
input=waveform, pad=padding, mode=pad_type,
)
# This approach uses FFT, which is more efficient if the kernel is large
if use_fft:
# Pad kernel to same length as signal, ensuring correct alignment
zero_length = waveform.size(-1) - kernel.size(-1)
# Handle case where signal is shorter
if zero_length < 0:
kernel = kernel[..., :zero_length]
zero_length = 0
# Perform rotation to ensure alignment
zeros = torch.zeros(
kernel.size(0), kernel.size(1), zero_length, device=kernel.device
)
after_index = kernel[..., rotation_index:]
before_index = kernel[..., :rotation_index]
kernel = torch.cat((after_index, zeros, before_index), dim=-1)
# Multiply in frequency domain to convolve in time domain
if version.parse(torch.__version__) > version.parse("1.6.0"):
import torch.fft as fft
result = fft.rfft(waveform) * fft.rfft(kernel)
convolved = fft.irfft(result, n=waveform.size(-1))
else:
f_signal = torch.rfft(waveform, 1)
f_kernel = torch.rfft(kernel, 1)
sig_real, sig_imag = f_signal.unbind(-1)
ker_real, ker_imag = f_kernel.unbind(-1)
f_result = torch.stack(
[
sig_real * ker_real - sig_imag * ker_imag,
sig_real * ker_imag + sig_imag * ker_real,
],
dim=-1,
)
convolved = torch.irfft(
f_result, 1, signal_sizes=[waveform.size(-1)]
)
# Use the implementation given by torch, which should be efficient on GPU
else:
convolved = torch.nn.functional.conv1d(
input=waveform,
weight=kernel,
stride=stride,
groups=groups,
padding=padding if not isinstance(padding, tuple) else 0,
)
# Return time dimension to the second dimension.
return convolved.transpose(2, 1)
def reverberate(waveforms, rir_waveform, rescale_amp="avg"):
"""
General function to contaminate a given signal with reverberation given a
Room Impulse Response (RIR).
It performs convolution between RIR and signal, but without changing
the original amplitude of the signal.
Arguments
---------
waveforms : tensor
The waveforms to normalize.
Shape should be `[batch, time]` or `[batch, time, channels]`.
rir_waveform : tensor
RIR tensor, shape should be [time, channels].
rescale_amp : str
Whether reverberated signal is rescaled (None) and with respect either
to original signal "peak" amplitude or "avg" average amplitude.
Choose between [None, "avg", "peak"].
Returns
-------
waveforms: tensor
Reverberated signal.
"""
orig_shape = waveforms.shape
if len(waveforms.shape) > 3 or len(rir_waveform.shape) > 3:
raise NotImplementedError
# if inputs are mono tensors we reshape to 1, samples
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0).unsqueeze(-1)
elif len(waveforms.shape) == 2:
waveforms = waveforms.unsqueeze(-1)
if len(rir_waveform.shape) == 1: # convolve1d expects a 3d tensor !
rir_waveform = rir_waveform.unsqueeze(0).unsqueeze(-1)
elif len(rir_waveform.shape) == 2:
rir_waveform = rir_waveform.unsqueeze(-1)
# Compute the average amplitude of the clean
orig_amplitude = compute_amplitude(
waveforms, waveforms.size(1), rescale_amp
)
# Compute index of the direct signal, so we can preserve alignment
value_max, direct_index = rir_waveform.abs().max(axis=1, keepdim=True)
# Making sure the max is always positive (if not, flip)
# mask = torch.logical_and(rir_waveform == value_max, rir_waveform < 0)
# rir_waveform[mask] = -rir_waveform[mask]
# Use FFT to compute convolution, because of long reverberation filter
waveforms = convolve1d(
waveform=waveforms,
kernel=rir_waveform,
use_fft=True,
rotation_index=direct_index,
)
# Rescale to the peak amplitude of the clean waveform
waveforms = rescale(
waveforms, waveforms.size(1), orig_amplitude, rescale_amp
)
if len(orig_shape) == 1:
waveforms = waveforms.squeeze(0).squeeze(-1)
if len(orig_shape) == 2:
waveforms = waveforms.squeeze(-1)
return waveforms
def dB_to_amplitude(SNR):
"""Returns the amplitude ratio, converted from decibels.
Arguments
---------
SNR : float
The ratio in decibels to convert.
Example
-------
>>> round(dB_to_amplitude(SNR=10), 3)
3.162
>>> dB_to_amplitude(SNR=0)
1.0
"""
return 10 ** (SNR / 20)
def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
"""Returns a notch filter constructed from a high-pass and low-pass filter.
(from https://tomroelandts.com/articles/
how-to-create-simple-band-pass-and-band-reject-filters)
Arguments
---------
notch_freq : float
frequency to put notch as a fraction of the
sampling rate / 2. The range of possible inputs is 0 to 1.
filter_width : int
Filter width in samples. Longer filters have
smaller transition bands, but are more inefficient.
notch_width : float
Width of the notch, as a fraction of the sampling_rate / 2.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('tests/samples/single-mic/example1.wav')
>>> signal = signal.unsqueeze(0).unsqueeze(2)
>>> kernel = notch_filter(0.25)
>>> notched_signal = convolve1d(signal, kernel)
"""
# Check inputs
assert 0 < notch_freq <= 1
assert filter_width % 2 != 0
pad = filter_width // 2
inputs = torch.arange(filter_width) - pad
# Avoid frequencies that are too low
notch_freq += notch_width
# Define sinc function, avoiding division by zero
def sinc(x):
"Computes the sinc function."
def _sinc(x):
return torch.sin(x) / x
# The zero is at the middle index
return torch.cat([_sinc(x[:pad]), torch.ones(1), _sinc(x[pad + 1 :])])
# Compute a low-pass filter with cutoff frequency notch_freq.
hlpf = sinc(3 * (notch_freq - notch_width) * inputs)
hlpf *= torch.blackman_window(filter_width)
hlpf /= torch.sum(hlpf)
# Compute a high-pass filter with cutoff frequency notch_freq.
hhpf = sinc(3 * (notch_freq + notch_width) * inputs)
hhpf *= torch.blackman_window(filter_width)
hhpf /= -torch.sum(hhpf)
hhpf[pad] += 1
# Adding filters creates notch filter
return (hlpf + hhpf).view(1, -1, 1)
def overlap_and_add(signal, frame_step):
"""Taken from https://github.com/kaituoxu/Conv-TasNet/blob/master/src/utils.py
Reconstructs a signal from a framed representation.
Adds potentially overlapping frames of a signal with shape
`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.
The resulting tensor has shape `[..., output_size]` where
output_size = (frames - 1) * frame_step + frame_length
Args:
signal: A [..., frames, frame_length] Tensor. All dimensions may be unknown, and rank must be at least 2.
frame_step: An integer denoting overlap offsets. Must be less than or equal to frame_length.
Returns:
A Tensor with shape [..., output_size] containing the overlap-added frames of signal's inner-most two dimensions.
output_size = (frames - 1) * frame_step + frame_length
Based on https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/contrib/signal/python/ops/reconstruction_ops.py
Example
-------
>>> signal = torch.randn(5, 20)
>>> overlapped = overlap_and_add(signal, 20)
>>> overlapped.shape
torch.Size([100])
"""
outer_dimensions = signal.size()[:-2]
frames, frame_length = signal.size()[-2:]
subframe_length = math.gcd(
frame_length, frame_step
) # gcd=Greatest Common Divisor
subframe_step = frame_step // subframe_length
subframes_per_frame = frame_length // subframe_length
output_size = frame_step * (frames - 1) + frame_length
output_subframes = output_size // subframe_length
subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)
frame = torch.arange(0, output_subframes).unfold(
0, subframes_per_frame, subframe_step
)
# frame_old = signal.new_tensor(frame).long() # signal may in GPU or CPU
frame = frame.clone().detach().to(signal.device.type)
# print((frame - frame_old).sum())
frame = frame.contiguous().view(-1)
result = signal.new_zeros(
*outer_dimensions, output_subframes, subframe_length
)
result.index_add_(-2, frame, subframe_signal)
result = result.view(*outer_dimensions, -1)
return result
def resynthesize(enhanced_mag, noisy_inputs, stft, istft, normalize_wavs=True):
"""Function for resynthesizing waveforms from enhanced mags.
Arguments
---------
enhanced_mag : torch.Tensor
Predicted spectral magnitude, should be three dimensional.
noisy_inputs : torch.Tensor
The noisy waveforms before any processing, to extract phase.
lengths : torch.Tensor
The length of each waveform for normalization.
stft : torch.nn.Module
Module for computing the STFT for extracting phase.
istft : torch.nn.Module
Module for computing the iSTFT for resynthesis.
normalize_wavs : bool
Whether to normalize the output wavs before returning them.
Returns
-------
enhanced_wav : torch.Tensor
The resynthesized waveforms of the enhanced magnitudes with noisy phase.
"""
# Extract noisy phase from inputs
noisy_feats = stft(noisy_inputs)
noisy_phase = torch.atan2(noisy_feats[:, :, :, 1], noisy_feats[:, :, :, 0])
# Combine with enhanced magnitude
complex_predictions = torch.mul(
torch.unsqueeze(enhanced_mag, -1),
torch.cat(
(
torch.unsqueeze(torch.cos(noisy_phase), -1),
torch.unsqueeze(torch.sin(noisy_phase), -1),
),
-1,
),
)
pred_wavs = istft(complex_predictions, sig_length=noisy_inputs.shape[1])
# Normalize. Since we're using peak amplitudes, ignore lengths
if normalize_wavs:
pred_wavs = normalize(pred_wavs, amp_type="peak")
return pred_wavs
def gabor_impulse_response(t, center, fwhm):
"""
Function for generating gabor impulse responses
as used by GaborConv1d proposed in
Neil Zeghidour, Olivier Teboul, F{\'e}lix de Chaumont Quitry & Marco Tagliasacchi, "LEAF: A LEARNABLE FRONTEND
FOR AUDIO CLASSIFICATION", in Proc of ICLR 2021 (https://arxiv.org/abs/2101.08596)
"""
denominator = 1.0 / (torch.sqrt(torch.tensor(2.0) * math.pi) * fwhm)
gaussian = torch.exp(
torch.tensordot(
1.0 / (2.0 * fwhm.unsqueeze(1) ** 2),
(-(t ** 2.0)).unsqueeze(0),
dims=1,
)
)
center_frequency_complex = center.type(torch.complex64)
t_complex = t.type(torch.complex64)
sinusoid = torch.exp(
torch.complex(torch.tensor(0.0), torch.tensor(1.0))
* torch.tensordot(
center_frequency_complex.unsqueeze(1),
t_complex.unsqueeze(0),
dims=1,
)
)
denominator = denominator.type(torch.complex64).unsqueeze(1)
gaussian = gaussian.type(torch.complex64)
return denominator * sinusoid * gaussian
def gabor_impulse_response_legacy_complex(t, center, fwhm):
"""
Function for generating gabor impulse responses, but without using complex64 dtype
as used by GaborConv1d proposed in
Neil Zeghidour, Olivier Teboul, F{\'e}lix de Chaumont Quitry & Marco Tagliasacchi, "LEAF: A LEARNABLE FRONTEND
FOR AUDIO CLASSIFICATION", in Proc of ICLR 2021 (https://arxiv.org/abs/2101.08596)
"""
denominator = 1.0 / (torch.sqrt(torch.tensor(2.0) * math.pi) * fwhm)
gaussian = torch.exp(
torch.tensordot(
1.0 / (2.0 * fwhm.unsqueeze(1) ** 2),
(-(t ** 2.0)).unsqueeze(0),
dims=1,
)
)
temp = torch.tensordot(center.unsqueeze(1), t.unsqueeze(0), dims=1)
temp2 = torch.zeros(*temp.shape + (2,), device=temp.device)
# since output of torch.tensordot(..) is multiplied by 0+j
# output can simply be written as flipping real component of torch.tensordot(..) to the imag component
temp2[:, :, 0] *= -1 * temp2[:, :, 0]
temp2[:, :, 1] = temp[:, :]
# exponent of complex number c is
# o.real = exp(c.real) * cos(c.imag)
# o.imag = exp(c.real) * sin(c.imag)
sinusoid = torch.zeros_like(temp2, device=temp.device)
sinusoid[:, :, 0] = torch.exp(temp2[:, :, 0]) * torch.cos(temp2[:, :, 1])
sinusoid[:, :, 1] = torch.exp(temp2[:, :, 0]) * torch.sin(temp2[:, :, 1])
# multiplication of two complex numbers c1 and c2 -> out:
# out.real = c1.real * c2.real - c1.imag * c2.imag
# out.imag = c1.real * c2.imag + c1.imag * c2.real
denominator_sinusoid = torch.zeros(*temp.shape + (2,), device=temp.device)
denominator_sinusoid[:, :, 0] = (
denominator.view(-1, 1) * sinusoid[:, :, 0]
) - (torch.zeros_like(denominator).view(-1, 1) * sinusoid[:, :, 1])
denominator_sinusoid[:, :, 1] = (
denominator.view(-1, 1) * sinusoid[:, :, 1]
) + (torch.zeros_like(denominator).view(-1, 1) * sinusoid[:, :, 0])
output = torch.zeros(*temp.shape + (2,), device=temp.device)
output[:, :, 0] = (denominator_sinusoid[:, :, 0] * gaussian) - (
denominator_sinusoid[:, :, 1] * torch.zeros_like(gaussian)
)
output[:, :, 1] = (
denominator_sinusoid[:, :, 0] * torch.zeros_like(gaussian)
) + (denominator_sinusoid[:, :, 1] * gaussian)
return output
| 20,913 | 32.677939 | 123 | py |
speechbrain | speechbrain-main/speechbrain/processing/multi_mic.py | """Multi-microphone components.
This library contains functions for multi-microphone signal processing.
Example
-------
>>> import torch
>>>
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, SrpPhat, Music
>>> from speechbrain.processing.multi_mic import DelaySum, Mvdr, Gev
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise_diff = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise_diff = xs_noise_diff.unsqueeze(0)
>>> xs_noise_loc = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')
>>> xs_noise_loc = xs_noise_loc.unsqueeze(0)
>>> fs = 16000 # sampling rate
>>> ss = xs_speech
>>> nn_diff = 0.05 * xs_noise_diff
>>> nn_loc = 0.05 * xs_noise_loc
>>> xs_diffused_noise = ss + nn_diff
>>> xs_localized_noise = ss + nn_loc
>>> # Delay-and-Sum Beamforming with GCC-PHAT localization
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> delaysum = DelaySum()
>>> istft = ISTFT(sample_rate=fs)
>>> Xs = stft(xs_diffused_noise)
>>> Ns = stft(nn_diff)
>>> XXs = cov(Xs)
>>> NNs = cov(Ns)
>>> tdoas = gccphat(XXs)
>>> Ys_ds = delaysum(Xs, tdoas)
>>> ys_ds = istft(Ys_ds)
>>> # Mvdr Beamforming with SRP-PHAT localization
>>> mvdr = Mvdr()
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> srpphat = SrpPhat(mics=mics)
>>> doas = srpphat(XXs)
>>> Ys_mvdr = mvdr(Xs, NNs, doas, doa_mode=True, mics=mics, fs=fs)
>>> ys_mvdr = istft(Ys_mvdr)
>>> # Mvdr Beamforming with MUSIC localization
>>> music = Music(mics=mics)
>>> doas = music(XXs)
>>> Ys_mvdr2 = mvdr(Xs, NNs, doas, doa_mode=True, mics=mics, fs=fs)
>>> ys_mvdr2 = istft(Ys_mvdr2)
>>> # GeV Beamforming
>>> gev = Gev()
>>> Xs = stft(xs_localized_noise)
>>> Ss = stft(ss)
>>> Ns = stft(nn_loc)
>>> SSs = cov(Ss)
>>> NNs = cov(Ns)
>>> Ys_gev = gev(Xs, SSs, NNs)
>>> ys_gev = istft(Ys_gev)
Authors:
* William Aris
* Francois Grondin
"""
import torch
from packaging import version
import speechbrain.processing.decomposition as eig
class Covariance(torch.nn.Module):
"""Computes the covariance matrices of the signals.
Arguments:
----------
average : bool
Informs the module if it should return an average
(computed on the time dimension) of the covariance
matrices. The Default value is True.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0)
>>> xs = xs_speech + 0.05 * xs_noise
>>> fs = 16000
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> XXs.shape
torch.Size([1, 1001, 201, 2, 10])
"""
def __init__(self, average=True):
super().__init__()
self.average = average
def forward(self, Xs):
""" This method uses the utility function _cov to compute covariance
matrices. Therefore, the result has the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics + n_pairs).
The order on the last dimension corresponds to the triu_indices for a
square matrix. For instance, if we have 4 channels, we get the following
order: (0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 3), (2, 2), (2, 3)
and (3, 3). Therefore, XXs[..., 0] corresponds to channels (0, 0) and XXs[..., 1]
corresponds to channels (0, 1).
Arguments:
----------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
"""
XXs = Covariance._cov(Xs=Xs, average=self.average)
return XXs
@staticmethod
def _cov(Xs, average=True):
""" Computes the covariance matrices (XXs) of the signals. The result will
have the following format: (batch, time_step, n_fft/2 + 1, 2, n_mics + n_pairs).
Arguments:
----------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
average : boolean
Informs the function if it should return an average
(computed on the time dimension) of the covariance
matrices. Default value is True.
"""
# Get useful dimensions
n_mics = Xs.shape[4]
# Formatting the real and imaginary parts
Xs_re = Xs[..., 0, :].unsqueeze(4)
Xs_im = Xs[..., 1, :].unsqueeze(4)
# Computing the covariance
Rxx_re = torch.matmul(Xs_re, Xs_re.transpose(3, 4)) + torch.matmul(
Xs_im, Xs_im.transpose(3, 4)
)
Rxx_im = torch.matmul(Xs_re, Xs_im.transpose(3, 4)) - torch.matmul(
Xs_im, Xs_re.transpose(3, 4)
)
# Selecting the upper triangular part of the covariance matrices
idx = torch.triu_indices(n_mics, n_mics)
XXs_re = Rxx_re[..., idx[0], idx[1]]
XXs_im = Rxx_im[..., idx[0], idx[1]]
XXs = torch.stack((XXs_re, XXs_im), 3)
# Computing the average if desired
if average is True:
n_time_frames = XXs.shape[1]
XXs = torch.mean(XXs, 1, keepdim=True)
XXs = XXs.repeat(1, n_time_frames, 1, 1, 1)
return XXs
class DelaySum(torch.nn.Module):
"""Performs delay and sum beamforming by using the TDOAs and
the first channel as a reference.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, DelaySum
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech. unsqueeze(0) # [batch, time, channel]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]
>>> fs = 16000
>>> xs = xs_speech + 0.05 * xs_noise
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> delaysum = DelaySum()
>>> istft = ISTFT(sample_rate=fs)
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
>>> Ys = delaysum(Xs, tdoas)
>>> ys = istft(Ys)
"""
def __init__(self):
super().__init__()
def forward(
self,
Xs,
localization_tensor,
doa_mode=False,
mics=None,
fs=None,
c=343.0,
):
"""This method computes a steering vector by using the TDOAs/DOAs and
then calls the utility function _delaysum to perform beamforming.
The result has the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
localization_tensor : tensor
A tensor containing either time differences of arrival (TDOAs)
(in samples) for each timestamp or directions of arrival (DOAs)
(xyz coordinates in meters). If localization_tensor represents
TDOAs, then its format is (batch, time_steps, n_mics + n_pairs).
If localization_tensor represents DOAs, then its format is
(batch, time_steps, 3)
doa_mode : bool
The user needs to set this parameter to True if localization_tensor
represents DOAs instead of TDOAs. Its default value is set to False.
mics : tensor
The cartesian position (xyz coordinates in meters) of each microphone.
The tensor must have the following format (n_mics, 3). This
parameter is only mandatory when localization_tensor represents
DOAs.
fs : int
The sample rate in Hertz of the signals. This parameter is only
mandatory when localization_tensor represents DOAs.
c : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s. This
parameter is only used when localization_tensor represents DOAs.
"""
# Get useful dimensions
n_fft = Xs.shape[2]
localization_tensor = localization_tensor.to(Xs.device)
# Convert the tdoas to taus
if doa_mode:
taus = doas2taus(doas=localization_tensor, mics=mics, fs=fs, c=c)
else:
taus = tdoas2taus(tdoas=localization_tensor)
# Generate the steering vector
As = steering(taus=taus, n_fft=n_fft)
# Apply delay and sum
Ys = DelaySum._delaysum(Xs=Xs, As=As)
return Ys
@staticmethod
def _delaysum(Xs, As):
"""Perform delay and sum beamforming. The result has
the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
As : tensor
The steering vector to point in the direction of
the target source. The tensor must have the format
(batch, time_step, n_fft/2 + 1, 2, n_mics)
"""
# Get useful dimensions
n_mics = Xs.shape[4]
# Generate unmixing coefficients
Ws_re = As[..., 0, :] / n_mics
Ws_im = -1 * As[..., 1, :] / n_mics
# Get input signal
Xs_re = Xs[..., 0, :]
Xs_im = Xs[..., 1, :]
# Applying delay and sum
Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)
Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)
# Assembling the result
Ys = torch.stack((Ys_re, Ys_im), 3)
return Ys
class Mvdr(torch.nn.Module):
"""Perform minimum variance distortionless response (MVDR) beamforming
by using an input signal in the frequency domain, its covariance matrices
and tdoas (to compute a steering vector).
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, DelaySum
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channel]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]
>>> fs = 16000
>>> xs = xs_speech + 0.05 * xs_noise
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> mvdr = Mvdr()
>>> istft = ISTFT(sample_rate=fs)
>>>
>>> Xs = stft(xs)
>>> Ns = stft(xs_noise)
>>> XXs = cov(Xs)
>>> NNs = cov(Ns)
>>> tdoas = gccphat(XXs)
>>> Ys = mvdr(Xs, NNs, tdoas)
>>> ys = istft(Ys)
"""
def __init__(self, eps=1e-20):
super().__init__()
self.eps = eps
def forward(
self,
Xs,
NNs,
localization_tensor,
doa_mode=False,
mics=None,
fs=None,
c=343.0,
):
"""This method computes a steering vector before using the
utility function _mvdr to perform beamforming. The result has
the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs)
localization_tensor : tensor
A tensor containing either time differences of arrival (TDOAs)
(in samples) for each timestamp or directions of arrival (DOAs)
(xyz coordinates in meters). If localization_tensor represents
TDOAs, then its format is (batch, time_steps, n_mics + n_pairs).
If localization_tensor represents DOAs, then its format is
(batch, time_steps, 3)
doa_mode : bool
The user needs to set this parameter to True if localization_tensor
represents DOAs instead of TDOAs. Its default value is set to False.
mics : tensor
The cartesian position (xyz coordinates in meters) of each microphone.
The tensor must have the following format (n_mics, 3). This
parameter is only mandatory when localization_tensor represents
DOAs.
fs : int
The sample rate in Hertz of the signals. This parameter is only
mandatory when localization_tensor represents DOAs.
c : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s. This
parameter is only used when localization_tensor represents DOAs.
"""
# Get useful dimensions
n_fft = Xs.shape[2]
localization_tensor = localization_tensor.to(Xs.device)
NNs = NNs.to(Xs.device)
if mics is not None:
mics = mics.to(Xs.device)
# Convert the tdoas to taus
if doa_mode:
taus = doas2taus(doas=localization_tensor, mics=mics, fs=fs, c=c)
else:
taus = tdoas2taus(tdoas=localization_tensor)
# Generate the steering vector
As = steering(taus=taus, n_fft=n_fft)
# Perform mvdr
Ys = Mvdr._mvdr(Xs=Xs, NNs=NNs, As=As)
return Ys
@staticmethod
def _mvdr(Xs, NNs, As, eps=1e-20):
"""Perform minimum variance distortionless response beamforming.
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics).
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
As : tensor
The steering vector to point in the direction of
the target source. The tensor must have the format
(batch, time_step, n_fft/2 + 1, 2, n_mics).
"""
# Get unique covariance values to reduce the number of computations
NNs_val, NNs_idx = torch.unique(NNs, return_inverse=True, dim=1)
# Inverse covariance matrices
NNs_inv = eig.inv(NNs_val)
# Capture real and imaginary parts, and restore time steps
NNs_inv_re = NNs_inv[..., 0][:, NNs_idx]
NNs_inv_im = NNs_inv[..., 1][:, NNs_idx]
# Decompose steering vector
AsC_re = As[..., 0, :].unsqueeze(4)
AsC_im = 1.0 * As[..., 1, :].unsqueeze(4)
AsT_re = AsC_re.transpose(3, 4)
AsT_im = -1.0 * AsC_im.transpose(3, 4)
# Project
NNs_inv_AsC_re = torch.matmul(NNs_inv_re, AsC_re) - torch.matmul(
NNs_inv_im, AsC_im
)
NNs_inv_AsC_im = torch.matmul(NNs_inv_re, AsC_im) + torch.matmul(
NNs_inv_im, AsC_re
)
# Compute the gain
alpha = 1.0 / (
torch.matmul(AsT_re, NNs_inv_AsC_re)
- torch.matmul(AsT_im, NNs_inv_AsC_im)
)
# Get the unmixing coefficients
Ws_re = torch.matmul(NNs_inv_AsC_re, alpha).squeeze(4)
Ws_im = -torch.matmul(NNs_inv_AsC_im, alpha).squeeze(4)
# Applying MVDR
Xs_re = Xs[..., 0, :]
Xs_im = Xs[..., 1, :]
Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)
Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)
Ys = torch.stack((Ys_re, Ys_im), -2)
return Ys
class Gev(torch.nn.Module):
"""Generalized EigenValue decomposition (GEV) Beamforming.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> import torch
>>>
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import Gev
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')
>>> xs_noise = xs_noise.unsqueeze(0)
>>> fs = 16000
>>> ss = xs_speech
>>> nn = 0.05 * xs_noise
>>> xs = ss + nn
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gev = Gev()
>>> istft = ISTFT(sample_rate=fs)
>>>
>>> Ss = stft(ss)
>>> Nn = stft(nn)
>>> Xs = stft(xs)
>>>
>>> SSs = cov(Ss)
>>> NNs = cov(Nn)
>>>
>>> Ys = gev(Xs, SSs, NNs)
>>> ys = istft(Ys)
"""
def __init__(self):
super().__init__()
def forward(self, Xs, SSs, NNs):
""" This method uses the utility function _gev to perform generalized
eigenvalue decomposition beamforming. Therefore, the result has
the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics).
SSs : tensor
The covariance matrices of the target signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
Ys = Gev._gev(Xs=Xs, SSs=SSs, NNs=NNs)
return Ys
@staticmethod
def _gev(Xs, SSs, NNs):
""" Perform generalized eigenvalue decomposition beamforming. The result
has the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics).
SSs : tensor
The covariance matrices of the target signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
# Putting on the right device
SSs = SSs.to(Xs.device)
NNs = NNs.to(Xs.device)
# Get useful dimensions
n_mics = Xs.shape[4]
n_mics_pairs = SSs.shape[4]
# Computing the eigenvectors
SSs_NNs = torch.cat((SSs, NNs), dim=4)
SSs_NNs_val, SSs_NNs_idx = torch.unique(
SSs_NNs, return_inverse=True, dim=1
)
SSs = SSs_NNs_val[..., range(0, n_mics_pairs)]
NNs = SSs_NNs_val[..., range(n_mics_pairs, 2 * n_mics_pairs)]
NNs = eig.pos_def(NNs)
Vs, Ds = eig.gevd(SSs, NNs)
# Beamforming
F_re = Vs[..., (n_mics - 1), 0]
F_im = Vs[..., (n_mics - 1), 1]
# Normalize
F_norm = 1.0 / (
torch.sum(F_re ** 2 + F_im ** 2, dim=3, keepdim=True) ** 0.5
).repeat(1, 1, 1, n_mics)
F_re *= F_norm
F_im *= F_norm
Ws_re = F_re[:, SSs_NNs_idx]
Ws_im = F_im[:, SSs_NNs_idx]
Xs_re = Xs[..., 0, :]
Xs_im = Xs[..., 1, :]
Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)
Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)
# Assembling the output
Ys = torch.stack((Ys_re, Ys_im), 3)
return Ys
class GccPhat(torch.nn.Module):
"""Generalized Cross-Correlation with Phase Transform localization.
Arguments
---------
tdoa_max : int
Specifies a range to search for delays. For example, if
tdoa_max = 10, the method will restrict its search for delays
between -10 and 10 samples. This parameter is optional and its
default value is None. When tdoa_max is None, the method will
search for delays between -n_fft/2 and n_fft/2 (full range).
eps : float
A small value to avoid divisions by 0 with the phase transformation.
The default value is 1e-20.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, DelaySum
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channel]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]
>>> fs = 16000
>>> xs = xs_speech + 0.05 * xs_noise
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
"""
def __init__(self, tdoa_max=None, eps=1e-20):
super().__init__()
self.tdoa_max = tdoa_max
self.eps = eps
def forward(self, XXs):
""" Perform generalized cross-correlation with phase transform localization
by using the utility function _gcc_phat and by extracting the delays (in samples)
before performing a quadratic interpolation to improve the accuracy.
The result has the format: (batch, time_steps, n_mics + n_pairs).
The order on the last dimension corresponds to the triu_indices for a
square matrix. For instance, if we have 4 channels, we get the following
order: (0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 3), (2, 2), (2, 3)
and (3, 3). Therefore, delays[..., 0] corresponds to channels (0, 0) and delays[..., 1]
corresponds to channels (0, 1).
Arguments:
----------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
xxs = GccPhat._gcc_phat(XXs=XXs, eps=self.eps)
delays = GccPhat._extract_delays(xxs=xxs, tdoa_max=self.tdoa_max)
tdoas = GccPhat._interpolate(xxs=xxs, delays=delays)
return tdoas
@staticmethod
def _gcc_phat(XXs, eps=1e-20):
""" Evaluate GCC-PHAT for each timestamp. It returns the result in the time
domain. The result has the format: (batch, time_steps, n_fft, n_mics + n_pairs).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
eps : float
A small value to avoid divisions by 0 with the phase transform. The
default value is 1e-20.
"""
# Get useful dimensions
n_samples = (XXs.shape[2] - 1) * 2
# Extracting the tensors needed
XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=4)
XXs_re = XXs_val[..., 0, :]
XXs_im = XXs_val[..., 1, :]
# Applying the phase transform
XXs_abs = torch.sqrt(XXs_re ** 2 + XXs_im ** 2) + eps
XXs_re_phat = XXs_re / XXs_abs
XXs_im_phat = XXs_im / XXs_abs
XXs_phat = torch.stack((XXs_re_phat, XXs_im_phat), 4)
# Returning in the temporal domain
XXs_phat = XXs_phat.transpose(2, 3)
if version.parse(torch.__version__) >= version.parse("1.8.0"):
XXs_phat = torch.complex(XXs_phat[..., 0], XXs_phat[..., 1])
xxs = torch.fft.irfft(XXs_phat, n=n_samples)
else:
xxs = torch.irfft(XXs_phat, signal_ndim=1, signal_sizes=[n_samples])
xxs = xxs[..., XXs_idx, :]
# Formatting the output
xxs = xxs.transpose(2, 3)
return xxs
@staticmethod
def _extract_delays(xxs, tdoa_max=None):
""" Extract the rounded delays from the cross-correlation for each timestamp.
The result has the format: (batch, time_steps, n_mics + n_pairs).
Arguments
---------
xxs : tensor
The correlation signals obtained after a gcc-phat operation. The tensor
must have the format (batch, time_steps, n_fft, n_mics + n_pairs).
tdoa_max : int
Specifies a range to search for delays. For example, if
tdoa_max = 10, the method will restrict its search for delays
between -10 and 10 samples. This parameter is optional and its
default value is None. When tdoa_max is None, the method will
search for delays between -n_fft/2 and +n_fft/2 (full range).
"""
# Get useful dimensions
n_fft = xxs.shape[2]
# If no tdoa specified, cover the whole frame
if tdoa_max is None:
tdoa_max = torch.div(n_fft, 2, rounding_mode="floor")
# Splitting the GCC-PHAT values to search in the range
slice_1 = xxs[..., 0:tdoa_max, :]
slice_2 = xxs[..., -tdoa_max:, :]
xxs_sliced = torch.cat((slice_1, slice_2), 2)
# Extracting the delays in the range
_, delays = torch.max(xxs_sliced, 2)
# Adjusting the delays that were affected by the slicing
offset = n_fft - xxs_sliced.shape[2]
idx = delays >= slice_1.shape[2]
delays[idx] += offset
# Centering the delays around 0
delays[idx] -= n_fft
return delays
@staticmethod
def _interpolate(xxs, delays):
"""Perform quadratic interpolation on the cross-correlation to
improve the tdoa accuracy. The result has the format:
(batch, time_steps, n_mics + n_pairs)
Arguments
---------
xxs : tensor
The correlation signals obtained after a gcc-phat operation. The tensor
must have the format (batch, time_steps, n_fft, n_mics + n_pairs).
delays : tensor
The rounded tdoas obtained by selecting the sample with the highest
amplitude. The tensor must have the format
(batch, time_steps, n_mics + n_pairs).
"""
# Get useful dimensions
n_fft = xxs.shape[2]
# Get the max amplitude and its neighbours
tp = torch.fmod((delays - 1) + n_fft, n_fft).unsqueeze(2)
y1 = torch.gather(xxs, 2, tp).squeeze(2)
tp = torch.fmod(delays + n_fft, n_fft).unsqueeze(2)
y2 = torch.gather(xxs, 2, tp).squeeze(2)
tp = torch.fmod((delays + 1) + n_fft, n_fft).unsqueeze(2)
y3 = torch.gather(xxs, 2, tp).squeeze(2)
# Add a fractional part to the initially rounded delay
delays_frac = delays + (y1 - y3) / (2 * y1 - 4 * y2 + 2 * y3)
return delays_frac
class SrpPhat(torch.nn.Module):
"""Steered-Response Power with Phase Transform Localization.
Arguments
---------
mics : tensor
The cartesian coordinates (xyz) in meters of each microphone.
The tensor must have the following format (n_mics, 3).
space : string
If this parameter is set to 'sphere', the localization will
be done in 3D by searching in a sphere of possible doas. If
it set to 'circle', the search will be done in 2D by searching
in a circle. By default, this parameter is set to 'sphere'.
Note: The 'circle' option isn't implemented yet.
sample_rate : int
The sample rate in Hertz of the signals to perform SRP-PHAT on.
By default, this parameter is set to 16000 Hz.
speed_sound : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s.
eps : float
A small value to avoid errors like division by 0. The default value
of this parameter is 1e-20.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import SrpPhat
>>> xs_speech = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> fs = 16000
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = xs_noise.unsqueeze(0)
>>> ss1 = xs_speech
>>> ns1 = 0.05 * xs_noise
>>> xs1 = ss1 + ns1
>>> ss2 = xs_speech
>>> ns2 = 0.20 * xs_noise
>>> xs2 = ss2 + ns2
>>> ss = torch.cat((ss1,ss2), dim=0)
>>> ns = torch.cat((ns1,ns2), dim=0)
>>> xs = torch.cat((xs1,xs2), dim=0)
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> srpphat = SrpPhat(mics=mics)
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> doas = srpphat(XXs)
"""
def __init__(
self,
mics,
space="sphere",
sample_rate=16000,
speed_sound=343.0,
eps=1e-20,
):
super().__init__()
# Generate the doas
if space == "sphere":
self.doas = sphere()
if space == "circle":
pass
# Generate associated taus with the doas
self.taus = doas2taus(
self.doas, mics=mics, fs=sample_rate, c=speed_sound
)
# Save epsilon
self.eps = eps
def forward(self, XXs):
""" Perform SRP-PHAT localization on a signal by computing a steering
vector and then by using the utility function _srp_phat to extract the doas.
The result is a tensor containing the directions of arrival (xyz coordinates
(in meters) in the direction of the sound source). The output tensor
has the format (batch, time_steps, 3).
This localization method uses Global Coherence Field (GCF):
https://www.researchgate.net/publication/221491705_Speaker_localization_based_on_oriented_global_coherence_field
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
# Get useful dimensions
n_fft = XXs.shape[2]
# Generate the steering vector
As = steering(self.taus.to(XXs.device), n_fft)
# Perform srp-phat
doas = SrpPhat._srp_phat(XXs=XXs, As=As, doas=self.doas, eps=self.eps)
return doas
@staticmethod
def _srp_phat(XXs, As, doas, eps=1e-20):
"""Perform srp-phat to find the direction of arrival
of the sound source. The result is a tensor containing the directions
of arrival (xyz coordinates (in meters) in the direction of the sound source).
The output tensor has the format: (batch, time_steps, 3).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
As : tensor
The steering vector that cover the all the potential directions
of arrival. The tensor must have the format
(n_doas, n_fft/2 + 1, 2, n_mics).
doas : tensor
All the possible directions of arrival that will be scanned. The
tensor must have the format (n_doas, 3).
"""
# Putting on the right device
As = As.to(XXs.device)
doas = doas.to(XXs.device)
# Get useful dimensions
n_mics = As.shape[3]
# Get the indices for the pairs of microphones
idx = torch.triu_indices(n_mics, n_mics)
# Generate the demixing vector from the steering vector
As_1_re = As[:, :, 0, idx[0, :]]
As_1_im = As[:, :, 1, idx[0, :]]
As_2_re = As[:, :, 0, idx[1, :]]
As_2_im = As[:, :, 1, idx[1, :]]
Ws_re = As_1_re * As_2_re + As_1_im * As_2_im
Ws_im = As_1_re * As_2_im - As_1_im * As_2_re
Ws_re = Ws_re.reshape(Ws_re.shape[0], -1)
Ws_im = Ws_im.reshape(Ws_im.shape[0], -1)
# Get unique covariance values to reduce the number of computations
XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=1)
# Perform the phase transform
XXs_re = XXs_val[:, :, :, 0, :]
XXs_im = XXs_val[:, :, :, 1, :]
XXs_re = XXs_re.reshape((XXs_re.shape[0], XXs_re.shape[1], -1))
XXs_im = XXs_im.reshape((XXs_im.shape[0], XXs_im.shape[1], -1))
XXs_abs = torch.sqrt(XXs_re ** 2 + XXs_im ** 2) + eps
XXs_re_norm = XXs_re / XXs_abs
XXs_im_norm = XXs_im / XXs_abs
# Project on the demixing vectors, and keep only real part
Ys_A = torch.matmul(XXs_re_norm, Ws_re.transpose(0, 1))
Ys_B = torch.matmul(XXs_im_norm, Ws_im.transpose(0, 1))
Ys = Ys_A - Ys_B
# Get maximum points
_, doas_idx = torch.max(Ys, dim=2)
# Repeat for each frame
doas = (doas[doas_idx, :])[:, XXs_idx, :]
return doas
class Music(torch.nn.Module):
"""Multiple Signal Classification (MUSIC) localization.
Arguments
---------
mics : tensor
The cartesian coordinates (xyz) in meters of each microphone.
The tensor must have the following format (n_mics, 3).
space : string
If this parameter is set to 'sphere', the localization will
be done in 3D by searching in a sphere of possible doas. If
it set to 'circle', the search will be done in 2D by searching
in a circle. By default, this parameter is set to 'sphere'.
Note: The 'circle' option isn't implemented yet.
sample_rate : int
The sample rate in Hertz of the signals to perform SRP-PHAT on.
By default, this parameter is set to 16000 Hz.
speed_sound : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s.
eps : float
A small value to avoid errors like division by 0. The default value
of this parameter is 1e-20.
n_sig : int
An estimation of the number of sound sources. The default value is set
to one source.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import SrpPhat
>>> xs_speech = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> fs = 16000
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = xs_noise.unsqueeze(0)
>>> ss1 = xs_speech
>>> ns1 = 0.05 * xs_noise
>>> xs1 = ss1 + ns1
>>> ss2 = xs_speech
>>> ns2 = 0.20 * xs_noise
>>> xs2 = ss2 + ns2
>>> ss = torch.cat((ss1,ss2), dim=0)
>>> ns = torch.cat((ns1,ns2), dim=0)
>>> xs = torch.cat((xs1,xs2), dim=0)
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> music = Music(mics=mics)
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> doas = music(XXs)
"""
def __init__(
self,
mics,
space="sphere",
sample_rate=16000,
speed_sound=343.0,
eps=1e-20,
n_sig=1,
):
super().__init__()
# Generate the doas
if space == "sphere":
self.doas = sphere()
if space == "circle":
pass
# Generate associated taus with the doas
self.taus = doas2taus(
self.doas, mics=mics, fs=sample_rate, c=speed_sound
)
# Save epsilon
self.eps = eps
# Save number of signals
self.n_sig = n_sig
def forward(self, XXs):
"""Perform MUSIC localization on a signal by computing a steering
vector and then by using the utility function _music to extract the doas.
The result is a tensor containing the directions of arrival (xyz coordinates
(in meters) in the direction of the sound source). The output tensor
has the format (batch, time_steps, 3).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
# Get useful dimensions
n_fft = XXs.shape[2]
# Generate the steering vector
As = steering(self.taus.to(XXs.device), n_fft)
# Perform music
doas = Music._music(
XXs=XXs, As=As, doas=self.doas, n_sig=self.n_sig, eps=self.eps
)
return doas
@staticmethod
def _music(XXs, As, doas, n_sig, eps=1e-20):
"""Perform multiple signal classification to find the
direction of arrival of the sound source. The result
has the format: (batch, time_steps, 3).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
As : tensor
The steering vector that covers the all the potential directions
of arrival. The tensor must have the format.
(n_doas, n_fft/2 + 1, 2, n_mics).
doas : tensor
All the possible directions of arrival that will be scanned. The
tensor must have the format (n_doas, 3).
n_sig : int
The number of signals in the signal + noise subspace (default is 1).
"""
# Putting on the right device
As = As.to(XXs.device)
doas = doas.to(XXs.device)
# Collecting data
n_mics = As.shape[3]
n_doas = As.shape[0]
n_bins = As.shape[2]
svd_range = n_mics - n_sig
# Get unique values to reduce computations
XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=1)
# Singular value decomposition
Us, _ = eig.svdl(XXs_val)
# Format for the projection
Us = Us.unsqueeze(2).repeat(1, 1, n_doas, 1, 1, 1, 1)
Us_re = Us[..., range(0, svd_range), 0]
Us_im = Us[..., range(0, svd_range), 1]
# Fixing the format of the steering vector
As = (
As.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(6)
.permute(0, 1, 2, 3, 6, 5, 4)
)
As = As.repeat(Us.shape[0], Us.shape[1], 1, 1, 1, 1, 1)
As_re = As[..., 0]
As_im = As[..., 1]
# Applying MUSIC's formula
As_mm_Us_re = torch.matmul(As_re, Us_re) + torch.matmul(As_im, Us_im)
As_mm_Us_im = torch.matmul(As_re, Us_im) - torch.matmul(As_im, Us_re)
As_mm_Us_abs = torch.sqrt(As_mm_Us_re ** 2 + As_mm_Us_im ** 2)
As_mm_Us_sum = torch.sum(As_mm_Us_abs, dim=5)
As_As_abs = torch.sum(As_re ** 2, dim=5) + torch.sum(As_im ** 2, dim=5)
Ps = (As_As_abs / (As_mm_Us_sum + eps)).squeeze(4)
Ys = torch.sum(Ps, dim=3) / n_bins
# Get maximum points
_, doas_idx = torch.max(Ys, dim=2)
doas = (doas[doas_idx, :])[:, XXs_idx, :]
return doas
def doas2taus(doas, mics, fs, c=343.0):
"""This function converts directions of arrival (xyz coordinates
expressed in meters) in time differences of arrival (expressed in
samples). The result has the following format: (batch, time_steps, n_mics).
Arguments
---------
doas : tensor
The directions of arrival expressed with cartesian coordinates (xyz)
in meters. The tensor must have the following format: (batch, time_steps, 3).
mics : tensor
The cartesian position (xyz) in meters of each microphone.
The tensor must have the following format (n_mics, 3).
fs : int
The sample rate in Hertz of the signals.
c : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.multi_mic import sphere, doas2taus
>>> xs = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')
>>> xs = xs.unsqueeze(0) # [batch, time, channels]
>>> fs = 16000
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> doas = sphere()
>>> taus = doas2taus(doas, mics, fs)
"""
taus = (fs / c) * torch.matmul(doas.to(mics.device), mics.transpose(0, 1))
return taus
def tdoas2taus(tdoas):
""" This function selects the tdoas of each channel and put them
in a tensor. The result has the following format:
(batch, time_steps, n_mics).
Arguments:
----------
tdoas : tensor
The time difference of arrival (TDOA) (in samples) for
each timestamp. The tensor has the format
(batch, time_steps, n_mics + n_pairs).
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, tdoas2taus
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs = xs_speech + 0.05 * xs_noise
>>> xs = xs.unsqueeze(0)
>>> fs = 16000
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
>>> taus = tdoas2taus(tdoas)
"""
n_pairs = tdoas.shape[len(tdoas.shape) - 1]
n_channels = int(((1 + 8 * n_pairs) ** 0.5 - 1) / 2)
taus = tdoas[..., range(0, n_channels)]
return taus
def steering(taus, n_fft):
""" This function computes a steering vector by using the time differences
of arrival for each channel (in samples) and the number of bins (n_fft).
The result has the following format: (batch, time_step, n_fft/2 + 1, 2, n_mics).
Arguments:
----------
taus : tensor
The time differences of arrival for each channel. The tensor must have
the following format: (batch, time_steps, n_mics).
n_fft : int
The number of bins resulting of the STFT. It is assumed that the
argument "onesided" was set to True for the STFT.
Example:
--------f
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, tdoas2taus, steering
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs = xs_speech + 0.05 * xs_noise
>>> xs = xs.unsqueeze(0) # [batch, time, channels]
>>> fs = 16000
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>>
>>> Xs = stft(xs)
>>> n_fft = Xs.shape[2]
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
>>> taus = tdoas2taus(tdoas)
>>> As = steering(taus, n_fft)
"""
# Collecting useful numbers
pi = 3.141592653589793
frame_size = int((n_fft - 1) * 2)
# Computing the different parts of the steering vector
omegas = 2 * pi * torch.arange(0, n_fft, device=taus.device) / frame_size
omegas = omegas.repeat(taus.shape + (1,))
taus = taus.unsqueeze(len(taus.shape)).repeat(
(1,) * len(taus.shape) + (n_fft,)
)
# Assembling the steering vector
a_re = torch.cos(-omegas * taus)
a_im = torch.sin(-omegas * taus)
a = torch.stack((a_re, a_im), len(a_re.shape))
a = a.transpose(len(a.shape) - 3, len(a.shape) - 1).transpose(
len(a.shape) - 3, len(a.shape) - 2
)
return a
def sphere(levels_count=4):
""" This function generates cartesian coordinates (xyz) for a set
of points forming a 3D sphere. The coordinates are expressed in
meters and can be used as doas. The result has the format:
(n_points, 3).
Arguments
---------
levels_count : int
A number proportional to the number of points that the user
wants to generate.
- If levels_count = 1, then the sphere will have 42 points
- If levels_count = 2, then the sphere will have 162 points
- If levels_count = 3, then the sphere will have 642 points
- If levels_count = 4, then the sphere will have 2562 points
- If levels_count = 5, then the sphere will have 10242 points
- ...
By default, levels_count is set to 4.
Example
-------
>>> import torch
>>> from speechbrain.processing.multi_mic import sphere
>>> doas = sphere()
"""
# Generate points at level 0
h = (5.0 ** 0.5) / 5.0
r = (2.0 / 5.0) * (5.0 ** 0.5)
pi = 3.141592654
pts = torch.zeros((12, 3), dtype=torch.float)
pts[0, :] = torch.FloatTensor([0, 0, 1])
pts[11, :] = torch.FloatTensor([0, 0, -1])
pts[range(1, 6), 0] = r * torch.sin(2.0 * pi * torch.arange(0, 5) / 5.0)
pts[range(1, 6), 1] = r * torch.cos(2.0 * pi * torch.arange(0, 5) / 5.0)
pts[range(1, 6), 2] = h
pts[range(6, 11), 0] = (
-1.0 * r * torch.sin(2.0 * pi * torch.arange(0, 5) / 5.0)
)
pts[range(6, 11), 1] = (
-1.0 * r * torch.cos(2.0 * pi * torch.arange(0, 5) / 5.0)
)
pts[range(6, 11), 2] = -1.0 * h
# Generate triangles at level 0
trs = torch.zeros((20, 3), dtype=torch.long)
trs[0, :] = torch.LongTensor([0, 2, 1])
trs[1, :] = torch.LongTensor([0, 3, 2])
trs[2, :] = torch.LongTensor([0, 4, 3])
trs[3, :] = torch.LongTensor([0, 5, 4])
trs[4, :] = torch.LongTensor([0, 1, 5])
trs[5, :] = torch.LongTensor([9, 1, 2])
trs[6, :] = torch.LongTensor([10, 2, 3])
trs[7, :] = torch.LongTensor([6, 3, 4])
trs[8, :] = torch.LongTensor([7, 4, 5])
trs[9, :] = torch.LongTensor([8, 5, 1])
trs[10, :] = torch.LongTensor([4, 7, 6])
trs[11, :] = torch.LongTensor([5, 8, 7])
trs[12, :] = torch.LongTensor([1, 9, 8])
trs[13, :] = torch.LongTensor([2, 10, 9])
trs[14, :] = torch.LongTensor([3, 6, 10])
trs[15, :] = torch.LongTensor([11, 6, 7])
trs[16, :] = torch.LongTensor([11, 7, 8])
trs[17, :] = torch.LongTensor([11, 8, 9])
trs[18, :] = torch.LongTensor([11, 9, 10])
trs[19, :] = torch.LongTensor([11, 10, 6])
# Generate next levels
for levels_index in range(0, levels_count):
# 0
# / \
# A---B
# / \ / \
# 1---C---2
trs_count = trs.shape[0]
subtrs_count = trs_count * 4
subtrs = torch.zeros((subtrs_count, 6), dtype=torch.long)
subtrs[0 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]
subtrs[0 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 0]
subtrs[0 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 0]
subtrs[0 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 1]
subtrs[0 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]
subtrs[0 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 0]
subtrs[1 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]
subtrs[1 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 0]
subtrs[2 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]
subtrs[2 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 2]
subtrs[3 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]
subtrs[3 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 1]
subtrs[3 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]
subtrs[3 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 2]
subtrs[3 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]
subtrs[3 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 0]
subtrs_flatten = torch.cat(
(subtrs[:, [0, 1]], subtrs[:, [2, 3]], subtrs[:, [4, 5]]), axis=0
)
subtrs_sorted, _ = torch.sort(subtrs_flatten, axis=1)
index_max = torch.max(subtrs_sorted)
subtrs_scalar = (
subtrs_sorted[:, 0] * (index_max + 1) + subtrs_sorted[:, 1]
)
unique_scalar, unique_indices = torch.unique(
subtrs_scalar, return_inverse=True
)
unique_values = torch.zeros(
(unique_scalar.shape[0], 2), dtype=unique_scalar.dtype
)
unique_values[:, 0] = torch.div(
unique_scalar, index_max + 1, rounding_mode="floor"
)
unique_values[:, 1] = unique_scalar - unique_values[:, 0] * (
index_max + 1
)
trs = torch.transpose(torch.reshape(unique_indices, (3, -1)), 0, 1)
pts = pts[unique_values[:, 0], :] + pts[unique_values[:, 1], :]
pts /= torch.repeat_interleave(
torch.unsqueeze(torch.sum(pts ** 2, axis=1) ** 0.5, 1), 3, 1
)
return pts
| 53,438 | 33.836375 | 120 | py |
speechbrain | speechbrain-main/speechbrain/processing/decomposition.py | """
Generalized Eigenvalue Decomposition.
This library contains different methods to adjust the format of
complex Hermitian matrices and find their eigenvectors and
eigenvalues.
Authors
* William Aris 2020
* Francois Grondin 2020
"""
import torch
def gevd(a, b=None):
"""This method computes the eigenvectors and the eigenvalues
of complex Hermitian matrices. The method finds a solution to
the problem AV = BVD where V are the eigenvectors and D are
the eigenvalues.
The eigenvectors returned by the method (vs) are stored in a tensor
with the following format (*,C,C,2).
The eigenvalues returned by the method (ds) are stored in a tensor
with the following format (*,C,C,2).
Arguments
---------
a : tensor
A first input matrix. It is equivalent to the matrix A in the
equation in the description above. The tensor must have the
following format: (*,2,C+P).
b : tensor
A second input matrix. It is equivalent tot the matrix B in the
equation in the description above. The tensor must have the
following format: (*,2,C+P).
This argument is optional and its default value is None. If
b == None, then b is replaced by the identity matrix in the
computations.
Example
-------
Suppose we would like to compute eigenvalues/eigenvectors on the
following complex Hermitian matrix:
A = [ 52 34 + 37j 16 + j28 ;
34 - 37j 125 41 + j3 ;
16 - 28j 41 - j3 62 ]
>>> a = torch.FloatTensor([[52,34,16,125,41,62],[0,37,28,0,3,0]])
>>> vs, ds = gevd(a)
This corresponds to:
D = [ 20.9513 0 0 ;
0 43.9420 0 ;
0 0 174.1067 ]
V = [ 0.085976 - 0.85184j -0.24620 + 0.12244j -0.24868 - 0.35991j ;
-0.16006 + 0.20244j 0.37084 + 0.40173j -0.79175 - 0.087312j ;
-0.43990 + 0.082884j -0.36724 - 0.70045j -0.41728 + 0 j ]
where
A = VDV^-1
"""
# Dimensions
D = a.dim()
P = a.shape[D - 1]
C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))
# Converting the input matrices to block matrices
ash = f(a)
if b is None:
b = torch.zeros(a.shape, dtype=a.dtype, device=a.device)
ids = torch.triu_indices(C, C)
b[..., 0, ids[0] == ids[1]] = 1.0
bsh = f(b)
# Performing the Cholesky decomposition
lsh = torch.linalg.cholesky(bsh)
lsh_inv = torch.inverse(lsh)
lsh_inv_T = torch.transpose(lsh_inv, D - 2, D - 1)
# Computing the matrix C
csh = torch.matmul(lsh_inv, torch.matmul(ash, lsh_inv_T))
# Performing the eigenvalue decomposition
es, ysh = torch.linalg.eigh(csh, UPLO="U")
# Collecting the eigenvalues
dsh = torch.zeros(
a.shape[slice(0, D - 2)] + (2 * C, 2 * C),
dtype=a.dtype,
device=a.device,
)
dsh[..., range(0, 2 * C), range(0, 2 * C)] = es
# Collecting the eigenvectors
vsh = torch.matmul(lsh_inv_T, ysh)
# Converting the block matrices to full complex matrices
vs = ginv(vsh)
ds = ginv(dsh)
return vs, ds
def svdl(a):
""" Singular Value Decomposition (Left Singular Vectors).
This function finds the eigenvalues and eigenvectors of the
input multiplied by its transpose (a x a.T).
The function will return (in this order):
1. The eigenvalues in a tensor with the format (*,C,C,2)
2. The eigenvectors in a tensor with the format (*,C,C,2)
Arguments:
----------
a : tensor
A complex input matrix to work with. The tensor must have
the following format: (*,2,C+P).
Example:
--------
>>> import torch
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.decomposition import svdl
>>> from speechbrain.dataio.dataio import read_audio_multichannel
>>> xs_speech = read_audio_multichannel(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_noise = read_audio_multichannel('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs = xs_speech + 0.05 * xs_noise
>>> xs = xs.unsqueeze(0).float()
>>>
>>> stft = STFT(sample_rate=16000)
>>> cov = Covariance()
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> us, ds = svdl(XXs)
"""
# Dimensions
D = a.dim()
P = a.shape[D - 1]
C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))
# Computing As * As_T
ash = f(a)
ash_T = torch.transpose(ash, -2, -1)
ash_mm_ash_T = torch.matmul(ash, ash_T)
# Finding the eigenvectors and eigenvalues
es, ush = torch.linalg.eigh(ash_mm_ash_T, UPLO="U")
# Collecting the eigenvalues
dsh = torch.zeros(ush.shape, dtype=es.dtype, device=es.device)
dsh[..., range(0, 2 * C), range(0, 2 * C)] = torch.sqrt(es)
# Converting the block matrices to full complex matrices
us = ginv(ush)
ds = ginv(dsh)
return us, ds
def f(ws):
"""Transform 1.
This method takes a complex Hermitian matrix represented by its
upper triangular part and converts it to a block matrix
representing the full original matrix with real numbers.
The output tensor will have the following format:
(*,2C,2C)
Arguments
---------
ws : tensor
An input matrix. The tensor must have the following format:
(*,2,C+P)
"""
# Dimensions
D = ws.dim()
ws = ws.transpose(D - 2, D - 1)
P = ws.shape[D - 2]
C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))
# Output matrix
wsh = torch.zeros(
ws.shape[0 : (D - 2)] + (2 * C, 2 * C),
dtype=ws.dtype,
device=ws.device,
)
ids = torch.triu_indices(C, C)
wsh[..., ids[1] * 2, ids[0] * 2] = ws[..., 0]
wsh[..., ids[0] * 2, ids[1] * 2] = ws[..., 0]
wsh[..., ids[1] * 2 + 1, ids[0] * 2 + 1] = ws[..., 0]
wsh[..., ids[0] * 2 + 1, ids[1] * 2 + 1] = ws[..., 0]
wsh[..., ids[0] * 2, ids[1] * 2 + 1] = -1 * ws[..., 1]
wsh[..., ids[1] * 2 + 1, ids[0] * 2] = -1 * ws[..., 1]
wsh[..., ids[0] * 2 + 1, ids[1] * 2] = ws[..., 1]
wsh[..., ids[1] * 2, ids[0] * 2 + 1] = ws[..., 1]
return wsh
def finv(wsh):
""" Inverse transform 1
This method takes a block matrix representing a complex Hermitian
matrix and converts it to a complex matrix represented by its
upper triangular part. The result will have the following format:
(*,2,C+P)
Arguments
---------
wsh : tensor
An input matrix. The tensor must have the following format:
(*,2C,2C)
"""
# Dimensions
D = wsh.dim()
C = int(wsh.shape[D - 1] / 2)
P = int(C * (C + 1) / 2)
# Output matrix
ws = torch.zeros(
wsh.shape[0 : (D - 2)] + (2, P), dtype=wsh.dtype, device=wsh.device
)
ids = torch.triu_indices(C, C)
ws[..., 0, :] = wsh[..., ids[0] * 2, ids[1] * 2]
ws[..., 1, :] = -1 * wsh[..., ids[0] * 2, ids[1] * 2 + 1]
return ws
def g(ws):
"""Transform 2.
This method takes a full complex matrix and converts it to a block
matrix. The result will have the following format:
(*,2C,2C).
Arguments
---------
ws : tensor
An input matrix. The tensor must have the following format:
(*,C,C,2)
"""
# Dimensions
D = ws.dim()
C = ws.shape[D - 2]
# Output matrix
wsh = torch.zeros(
ws.shape[0 : (D - 3)] + (2 * C, 2 * C),
dtype=ws.dtype,
device=ws.device,
)
wsh[..., slice(0, 2 * C, 2), slice(0, 2 * C, 2)] = ws[..., 0]
wsh[..., slice(1, 2 * C, 2), slice(1, 2 * C, 2)] = ws[..., 0]
wsh[..., slice(0, 2 * C, 2), slice(1, 2 * C, 2)] = -1 * ws[..., 1]
wsh[..., slice(1, 2 * C, 2), slice(0, 2 * C, 2)] = ws[..., 1]
return wsh
def ginv(wsh):
"""Inverse transform 2.
This method takes a complex Hermitian matrix represented by a block
matrix and converts it to a full complex complex matrix. The
result will have the following format:
(*,C,C,2)
Arguments
---------
wsh : tensor
An input matrix. The tensor must have the following format:
(*,2C,2C)
"""
# Extracting data
D = wsh.dim()
C = int(wsh.shape[D - 1] / 2)
# Output matrix
ws = torch.zeros(
wsh.shape[0 : (D - 2)] + (C, C, 2), dtype=wsh.dtype, device=wsh.device
)
ws[..., 0] = wsh[..., slice(0, 2 * C, 2), slice(0, 2 * C, 2)]
ws[..., 1] = wsh[..., slice(1, 2 * C, 2), slice(0, 2 * C, 2)]
return ws
def pos_def(ws, alpha=0.001, eps=1e-20):
"""Diagonal modification.
This method takes a complex Hermitian matrix represented by its upper
triangular part and adds the value of its trace multiplied by alpha
to the real part of its diagonal. The output will have the format:
(*,2,C+P)
Arguments
---------
ws : tensor
An input matrix. The tensor must have the following format:
(*,2,C+P)
alpha : float
A coefficient to multiply the trace. The default value is 0.001.
eps : float
A small value to increase the real part of the diagonal. The
default value is 1e-20.
"""
# Extracting data
D = ws.dim()
P = ws.shape[D - 1]
C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2))
# Finding the indices of the diagonal
ids_triu = torch.triu_indices(C, C)
ids_diag = torch.eq(ids_triu[0, :], ids_triu[1, :])
# Computing the trace
trace = torch.sum(ws[..., 0, ids_diag], D - 2)
trace = trace.view(trace.shape + (1,))
trace = trace.repeat((1,) * (D - 2) + (C,))
# Adding the trace multiplied by alpha to the diagonal
ws_pf = ws.clone()
ws_pf[..., 0, ids_diag] += alpha * trace + eps
return ws_pf
def inv(x):
"""Inverse Hermitian Matrix.
This method finds the inverse of a complex Hermitian matrix
represented by its upper triangular part. The result will have
the following format: (*, C, C, 2).
Arguments
---------
x : tensor
An input matrix to work with. The tensor must have the
following format: (*, 2, C+P)
Example
-------
>>> import torch
>>>
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.decomposition import inv
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')
>>> xs = xs_speech + 0.05 * xs_noise
>>> xs = xs.unsqueeze(0).float()
>>>
>>> stft = STFT(sample_rate=16000)
>>> cov = Covariance()
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> XXs_inv = inv(XXs)
"""
# Dimensions
d = x.dim()
p = x.shape[-1]
n_channels = int(round(((1 + 8 * p) ** 0.5 - 1) / 2))
# Output matrix
ash = f(pos_def(x))
ash_inv = torch.inverse(ash)
as_inv = finv(ash_inv)
indices = torch.triu_indices(n_channels, n_channels)
x_inv = torch.zeros(
x.shape[slice(0, d - 2)] + (n_channels, n_channels, 2),
dtype=x.dtype,
device=x.device,
)
x_inv[..., indices[1], indices[0], 0] = as_inv[..., 0, :]
x_inv[..., indices[1], indices[0], 1] = -1 * as_inv[..., 1, :]
x_inv[..., indices[0], indices[1], 0] = as_inv[..., 0, :]
x_inv[..., indices[0], indices[1], 1] = as_inv[..., 1, :]
return x_inv
| 11,655 | 26.818616 | 92 | py |
speechbrain | speechbrain-main/speechbrain/lobes/features.py | """Basic feature pipelines.
Authors
* Mirco Ravanelli 2020
* Peter Plantinga 2020
* Sarthak Yadav 2020
"""
import torch
from speechbrain.processing.features import (
STFT,
spectral_magnitude,
Filterbank,
DCT,
Deltas,
ContextWindow,
)
from speechbrain.nnet.CNN import GaborConv1d
from speechbrain.nnet.normalization import PCEN
from speechbrain.nnet.pooling import GaussianLowpassPooling
class Fbank(torch.nn.Module):
"""Generate features for input to the speech pipeline.
Arguments
---------
deltas : bool (default: False)
Whether or not to append derivatives and second derivatives
to the features.
context : bool (default: False)
Whether or not to append forward and backward contexts to
the features.
requires_grad : bool (default: False)
Whether to allow parameters (i.e. fbank centers and
spreads) to update during training.
sample_rate : int (default: 160000)
Sampling rate for the input waveforms.
f_min : int (default: 0)
Lowest frequency for the Mel filters.
f_max : int (default: None)
Highest frequency for the Mel filters. Note that if f_max is not
specified it will be set to sample_rate // 2.
win_length : float (default: 25)
Length (in ms) of the sliding window used to compute the STFT.
hop_length : float (default: 10)
Length (in ms) of the hop of the sliding window used to compute
the STFT.
n_fft : int (default: 400)
Number of samples to use in each stft.
n_mels : int (default: 40)
Number of Mel filters.
filter_shape : str (default: triangular)
Shape of the filters ('triangular', 'rectangular', 'gaussian').
param_change_factor : float (default: 1.0)
If freeze=False, this parameter affects the speed at which the filter
parameters (i.e., central_freqs and bands) can be changed. When high
(e.g., param_change_factor=1) the filters change a lot during training.
When low (e.g. param_change_factor=0.1) the filter parameters are more
stable during training.
param_rand_factor : float (default: 0.0)
This parameter can be used to randomly change the filter parameters
(i.e, central frequencies and bands) during training. It is thus a
sort of regularization. param_rand_factor=0 does not affect, while
param_rand_factor=0.15 allows random variations within +-15% of the
standard values of the filter parameters (e.g., if the central freq
is 100 Hz, we can randomly change it from 85 Hz to 115 Hz).
left_frames : int (default: 5)
Number of frames of left context to add.
right_frames : int (default: 5)
Number of frames of right context to add.
Example
-------
>>> import torch
>>> inputs = torch.randn([10, 16000])
>>> feature_maker = Fbank()
>>> feats = feature_maker(inputs)
>>> feats.shape
torch.Size([10, 101, 40])
"""
def __init__(
self,
deltas=False,
context=False,
requires_grad=False,
sample_rate=16000,
f_min=0,
f_max=None,
n_fft=400,
n_mels=40,
filter_shape="triangular",
param_change_factor=1.0,
param_rand_factor=0.0,
left_frames=5,
right_frames=5,
win_length=25,
hop_length=10,
):
super().__init__()
self.deltas = deltas
self.context = context
self.requires_grad = requires_grad
if f_max is None:
f_max = sample_rate / 2
self.compute_STFT = STFT(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
)
self.compute_fbanks = Filterbank(
sample_rate=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
f_min=f_min,
f_max=f_max,
freeze=not requires_grad,
filter_shape=filter_shape,
param_change_factor=param_change_factor,
param_rand_factor=param_rand_factor,
)
self.compute_deltas = Deltas(input_size=n_mels)
self.context_window = ContextWindow(
left_frames=left_frames, right_frames=right_frames,
)
def forward(self, wav):
"""Returns a set of features generated from the input waveforms.
Arguments
---------
wav : tensor
A batch of audio signals to transform to features.
"""
STFT = self.compute_STFT(wav)
mag = spectral_magnitude(STFT)
fbanks = self.compute_fbanks(mag)
if self.deltas:
delta1 = self.compute_deltas(fbanks)
delta2 = self.compute_deltas(delta1)
fbanks = torch.cat([fbanks, delta1, delta2], dim=2)
if self.context:
fbanks = self.context_window(fbanks)
return fbanks
class MFCC(torch.nn.Module):
"""Generate features for input to the speech pipeline.
Arguments
---------
deltas : bool (default: True)
Whether or not to append derivatives and second derivatives
to the features.
context : bool (default: True)
Whether or not to append forward and backward contexts to
the features.
requires_grad : bool (default: False)
Whether to allow parameters (i.e. fbank centers and
spreads) to update during training.
sample_rate : int (default: 16000)
Sampling rate for the input waveforms.
f_min : int (default: 0)
Lowest frequency for the Mel filters.
f_max : int (default: None)
Highest frequency for the Mel filters. Note that if f_max is not
specified it will be set to sample_rate // 2.
win_length : float (default: 25)
Length (in ms) of the sliding window used to compute the STFT.
hop_length : float (default: 10)
Length (in ms) of the hop of the sliding window used to compute
the STFT.
n_fft : int (default: 400)
Number of samples to use in each stft.
n_mels : int (default: 23)
Number of filters to use for creating filterbank.
n_mfcc : int (default: 20)
Number of output coefficients
filter_shape : str (default 'triangular')
Shape of the filters ('triangular', 'rectangular', 'gaussian').
param_change_factor: bool (default 1.0)
If freeze=False, this parameter affects the speed at which the filter
parameters (i.e., central_freqs and bands) can be changed. When high
(e.g., param_change_factor=1) the filters change a lot during training.
When low (e.g. param_change_factor=0.1) the filter parameters are more
stable during training.
param_rand_factor: float (default 0.0)
This parameter can be used to randomly change the filter parameters
(i.e, central frequencies and bands) during training. It is thus a
sort of regularization. param_rand_factor=0 does not affect, while
param_rand_factor=0.15 allows random variations within +-15% of the
standard values of the filter parameters (e.g., if the central freq
is 100 Hz, we can randomly change it from 85 Hz to 115 Hz).
left_frames : int (default 5)
Number of frames of left context to add.
right_frames : int (default 5)
Number of frames of right context to add.
Example
-------
>>> import torch
>>> inputs = torch.randn([10, 16000])
>>> feature_maker = MFCC()
>>> feats = feature_maker(inputs)
>>> feats.shape
torch.Size([10, 101, 660])
"""
def __init__(
self,
deltas=True,
context=True,
requires_grad=False,
sample_rate=16000,
f_min=0,
f_max=None,
n_fft=400,
n_mels=23,
n_mfcc=20,
filter_shape="triangular",
param_change_factor=1.0,
param_rand_factor=0.0,
left_frames=5,
right_frames=5,
win_length=25,
hop_length=10,
):
super().__init__()
self.deltas = deltas
self.context = context
self.requires_grad = requires_grad
if f_max is None:
f_max = sample_rate / 2
self.compute_STFT = STFT(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
)
self.compute_fbanks = Filterbank(
sample_rate=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
f_min=f_min,
f_max=f_max,
freeze=not requires_grad,
filter_shape=filter_shape,
param_change_factor=param_change_factor,
param_rand_factor=param_rand_factor,
)
self.compute_dct = DCT(input_size=n_mels, n_out=n_mfcc)
self.compute_deltas = Deltas(input_size=n_mfcc)
self.context_window = ContextWindow(
left_frames=left_frames, right_frames=right_frames,
)
def forward(self, wav):
"""Returns a set of mfccs generated from the input waveforms.
Arguments
---------
wav : tensor
A batch of audio signals to transform to features.
"""
STFT = self.compute_STFT(wav)
mag = spectral_magnitude(STFT)
fbanks = self.compute_fbanks(mag)
mfccs = self.compute_dct(fbanks)
if self.deltas:
delta1 = self.compute_deltas(mfccs)
delta2 = self.compute_deltas(delta1)
mfccs = torch.cat([mfccs, delta1, delta2], dim=2)
if self.context:
mfccs = self.context_window(mfccs)
return mfccs
class Leaf(torch.nn.Module):
"""
This class implements the LEAF audio frontend from
Neil Zeghidour, Olivier Teboul, F{\'e}lix de Chaumont Quitry & Marco Tagliasacchi, "LEAF: A LEARNABLE FRONTEND
FOR AUDIO CLASSIFICATION", in Proc. of ICLR 2021 (https://arxiv.org/abs/2101.08596)
Arguments
---------
out_channels : int
It is the number of output channels.
window_len: float
length of filter window in milliseconds
window_stride : float
Stride factor of the filters in milliseconds
sample_rate : int,
Sampling rate of the input signals. It is only used for sinc_conv.
min_freq : float
Lowest possible frequency (in Hz) for a filter
max_freq : float
Highest possible frequency (in Hz) for a filter
use_pcen: bool
If True (default), a per-channel energy normalization layer is used
learnable_pcen: bool:
If True (default), the per-channel energy normalization layer is learnable
use_legacy_complex: bool
If False, torch.complex64 data type is used for gabor impulse responses
If True, computation is performed on two real-valued tensors
skip_transpose: bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
Example
-------
>>> inp_tensor = torch.rand([10, 8000])
>>> leaf = Leaf(
... out_channels=40, window_len=25., window_stride=10., in_channels=1
... )
>>> out_tensor = leaf(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 50, 40])
"""
def __init__(
self,
out_channels,
window_len: float = 25.0,
window_stride: float = 10.0,
sample_rate: int = 16000,
input_shape=None,
in_channels=None,
min_freq=60.0,
max_freq=None,
use_pcen=True,
learnable_pcen=True,
use_legacy_complex=False,
skip_transpose=False,
n_fft=512,
):
super(Leaf, self).__init__()
self.out_channels = out_channels
window_size = int(sample_rate * window_len // 1000 + 1)
window_stride = int(sample_rate * window_stride // 1000)
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input_shape(input_shape)
self.complex_conv = GaborConv1d(
out_channels=2 * out_channels,
in_channels=in_channels,
kernel_size=window_size,
stride=1,
padding="same",
bias=False,
n_fft=n_fft,
sample_rate=sample_rate,
min_freq=min_freq,
max_freq=max_freq,
use_legacy_complex=use_legacy_complex,
skip_transpose=True,
)
self.pooling = GaussianLowpassPooling(
in_channels=self.out_channels,
kernel_size=window_size,
stride=window_stride,
skip_transpose=True,
)
if use_pcen:
self.compression = PCEN(
self.out_channels,
alpha=0.96,
smooth_coef=0.04,
delta=2.0,
floor=1e-12,
trainable=learnable_pcen,
per_channel_smooth_coef=True,
skip_transpose=True,
)
else:
self.compression = None
self.skip_transpose = skip_transpose
def forward(self, x):
"""
Returns the learned LEAF features
Arguments
---------
x : torch.Tensor of shape (batch, time, 1) or (batch, time)
batch of input signals. 2d or 3d tensors are expected.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
unsqueeze = x.ndim == 2
if unsqueeze:
x = x.unsqueeze(1)
outputs = self.complex_conv(x)
outputs = self._squared_modulus_activation(outputs)
outputs = self.pooling(outputs)
outputs = torch.maximum(
outputs, torch.tensor(1e-5, device=outputs.device)
)
if self.compression:
outputs = self.compression(outputs)
if not self.skip_transpose:
outputs = outputs.transpose(1, -1)
return outputs
def _squared_modulus_activation(self, x):
x = x.transpose(1, 2)
output = 2 * torch.nn.functional.avg_pool1d(
x ** 2.0, kernel_size=2, stride=2
)
output = output.transpose(1, 2)
return output
def _check_input_shape(self, shape):
"""Checks the input shape and returns the number of input channels.
"""
if len(shape) == 2:
in_channels = 1
elif len(shape) == 3:
in_channels = 1
else:
raise ValueError(
"Leaf expects 2d or 3d inputs. Got " + str(len(shape))
)
return in_channels
| 14,790 | 32.615909 | 114 | py |
speechbrain | speechbrain-main/speechbrain/lobes/augment.py | """
Combinations of processing algorithms to implement common augmentations.
Examples:
* SpecAugment
* Environmental corruption (noise, reverberation)
Authors
* Peter Plantinga 2020
* Jianyuan Zhong 2020
"""
import os
import torch
import torchaudio
import speechbrain as sb
from speechbrain.utils.data_utils import download_file
from speechbrain.processing.speech_augmentation import (
SpeedPerturb,
DropFreq,
DropChunk,
AddBabble,
AddNoise,
AddReverb,
)
from speechbrain.utils.torch_audio_backend import check_torchaudio_backend
check_torchaudio_backend()
OPENRIR_URL = "http://www.openslr.org/resources/28/rirs_noises.zip"
class SpecAugment(torch.nn.Module):
"""An implementation of the SpecAugment algorithm.
Reference:
https://arxiv.org/abs/1904.08779
Arguments
---------
time_warp : bool
Whether applying time warping.
time_warp_window : int
Time warp window.
time_warp_mode : str
Interpolation mode for time warping (default "bicubic").
freq_mask : bool
Whether applying freq mask.
freq_mask_width : int or tuple
Freq mask width range.
n_freq_mask : int
Number of freq mask.
time_mask : bool
Whether applying time mask.
time_mask_width : int or tuple
Time mask width range.
n_time_mask : int
Number of time mask.
replace_with_zero : bool
If True, replace masked value with 0, else replace masked value with mean of the input tensor.
Example
-------
>>> aug = SpecAugment()
>>> a = torch.rand([8, 120, 80])
>>> a = aug(a)
>>> print(a.shape)
torch.Size([8, 120, 80])
"""
def __init__(
self,
time_warp=True,
time_warp_window=5,
time_warp_mode="bicubic",
freq_mask=True,
freq_mask_width=(0, 20),
n_freq_mask=2,
time_mask=True,
time_mask_width=(0, 100),
n_time_mask=2,
replace_with_zero=True,
):
super().__init__()
assert (
time_warp or freq_mask or time_mask
), "at least one of time_warp, time_mask, or freq_mask should be applied"
self.apply_time_warp = time_warp
self.time_warp_window = time_warp_window
self.time_warp_mode = time_warp_mode
self.freq_mask = freq_mask
if isinstance(freq_mask_width, int):
freq_mask_width = (0, freq_mask_width)
self.freq_mask_width = freq_mask_width
self.n_freq_mask = n_freq_mask
self.time_mask = time_mask
if isinstance(time_mask_width, int):
time_mask_width = (0, time_mask_width)
self.time_mask_width = time_mask_width
self.n_time_mask = n_time_mask
self.replace_with_zero = replace_with_zero
def forward(self, x):
"""Takes in input a tensors and returns an augmented one."""
if self.apply_time_warp:
x = self.time_warp(x)
if self.freq_mask:
x = self.mask_along_axis(x, dim=2)
if self.time_mask:
x = self.mask_along_axis(x, dim=1)
return x
def time_warp(self, x):
"""Time warping with torch.nn.functional.interpolate"""
original_size = x.shape
window = self.time_warp_window
# 2d interpolation requires 4D or higher dimension tensors
# x: (Batch, Time, Freq) -> (Batch, 1, Time, Freq)
if x.dim() == 3:
x = x.unsqueeze(1)
time = x.shape[2]
if time - window <= window:
return x.view(*original_size)
# compute center and corresponding window
c = torch.randint(window, time - window, (1,))[0]
w = torch.randint(c - window, c + window, (1,))[0] + 1
left = torch.nn.functional.interpolate(
x[:, :, :c],
(w, x.shape[3]),
mode=self.time_warp_mode,
align_corners=True,
)
right = torch.nn.functional.interpolate(
x[:, :, c:],
(time - w, x.shape[3]),
mode=self.time_warp_mode,
align_corners=True,
)
x[:, :, :w] = left
x[:, :, w:] = right
return x.view(*original_size)
def mask_along_axis(self, x, dim):
"""Mask along time or frequency axis.
Arguments
---------
x : tensor
Input tensor.
dim : int
Corresponding dimension to mask.
"""
original_size = x.shape
if x.dim() == 4:
x = x.view(-1, x.shape[2], x.shape[3])
batch, time, fea = x.shape
if dim == 1:
D = time
n_mask = self.n_time_mask
width_range = self.time_mask_width
else:
D = fea
n_mask = self.n_freq_mask
width_range = self.freq_mask_width
mask_len = torch.randint(
width_range[0], width_range[1], (batch, n_mask), device=x.device
).unsqueeze(2)
mask_pos = torch.randint(
0, max(1, D - mask_len.max()), (batch, n_mask), device=x.device
).unsqueeze(2)
# compute masks
arange = torch.arange(D, device=x.device).view(1, 1, -1)
mask = (mask_pos <= arange) * (arange < (mask_pos + mask_len))
mask = mask.any(dim=1)
if dim == 1:
mask = mask.unsqueeze(2)
else:
mask = mask.unsqueeze(1)
if self.replace_with_zero:
val = 0.0
else:
with torch.no_grad():
val = x.mean()
x = x.masked_fill_(mask, val)
return x.view(*original_size)
class TimeDomainSpecAugment(torch.nn.Module):
"""A time-domain approximation of the SpecAugment algorithm.
This augmentation module implements three augmentations in
the time-domain.
1. Drop chunks of the audio (zero amplitude or white noise)
2. Drop frequency bands (with band-drop filters)
3. Speed peturbation (via resampling to slightly different rate)
Arguments
---------
perturb_prob : float from 0 to 1
The probability that a batch will have speed perturbation applied.
drop_freq_prob : float from 0 to 1
The probability that a batch will have frequencies dropped.
drop_chunk_prob : float from 0 to 1
The probability that a batch will have chunks dropped.
speeds : list of ints
A set of different speeds to use to perturb each batch.
See ``speechbrain.processing.speech_augmentation.SpeedPerturb``
sample_rate : int
Sampling rate of the input waveforms.
drop_freq_count_low : int
Lowest number of frequencies that could be dropped.
drop_freq_count_high : int
Highest number of frequencies that could be dropped.
drop_chunk_count_low : int
Lowest number of chunks that could be dropped.
drop_chunk_count_high : int
Highest number of chunks that could be dropped.
drop_chunk_length_low : int
Lowest length of chunks that could be dropped.
drop_chunk_length_high : int
Highest length of chunks that could be dropped.
drop_chunk_noise_factor : float
The noise factor used to scale the white noise inserted, relative to
the average amplitude of the utterance. Default 0 (no noise inserted).
Example
-------
>>> inputs = torch.randn([10, 16000])
>>> feature_maker = TimeDomainSpecAugment(speeds=[80])
>>> feats = feature_maker(inputs, torch.ones(10))
>>> feats.shape
torch.Size([10, 12800])
"""
def __init__(
self,
perturb_prob=1.0,
drop_freq_prob=1.0,
drop_chunk_prob=1.0,
speeds=[95, 100, 105],
sample_rate=16000,
drop_freq_count_low=0,
drop_freq_count_high=3,
drop_chunk_count_low=0,
drop_chunk_count_high=5,
drop_chunk_length_low=1000,
drop_chunk_length_high=2000,
drop_chunk_noise_factor=0,
):
super().__init__()
self.speed_perturb = SpeedPerturb(
perturb_prob=perturb_prob, orig_freq=sample_rate, speeds=speeds
)
self.drop_freq = DropFreq(
drop_prob=drop_freq_prob,
drop_count_low=drop_freq_count_low,
drop_count_high=drop_freq_count_high,
)
self.drop_chunk = DropChunk(
drop_prob=drop_chunk_prob,
drop_count_low=drop_chunk_count_low,
drop_count_high=drop_chunk_count_high,
drop_length_low=drop_chunk_length_low,
drop_length_high=drop_chunk_length_high,
noise_factor=drop_chunk_noise_factor,
)
def forward(self, waveforms, lengths):
"""Returns the distorted waveforms.
Arguments
---------
waveforms : torch.Tensor
The waveforms to distort
"""
# Augmentation
with torch.no_grad():
waveforms = self.speed_perturb(waveforms)
waveforms = self.drop_freq(waveforms)
waveforms = self.drop_chunk(waveforms, lengths)
return waveforms
class EnvCorrupt(torch.nn.Module):
"""Environmental Corruptions for speech signals: noise, reverb, babble.
Arguments
---------
reverb_prob : float from 0 to 1
The probability that each batch will have reverberation applied.
babble_prob : float from 0 to 1
The probability that each batch will have babble added.
noise_prob : float from 0 to 1
The probability that each batch will have noise added.
openrir_folder : str
If provided, download and prepare openrir to this location. The
reverberation csv and noise csv will come from here unless overridden
by the ``reverb_csv`` or ``noise_csv`` arguments.
openrir_max_noise_len : float
The maximum length in seconds for a noise segment from openrir. Only
takes effect if ``openrir_folder`` is used for noises. Cuts longer
noises into segments equal to or less than this length.
reverb_csv : str
A prepared csv file for loading room impulse responses.
noise_csv : str
A prepared csv file for loading noise data.
noise_num_workers : int
Number of workers to use for loading noises.
babble_speaker_count : int
Number of speakers to use for babble. Must be less than batch size.
babble_snr_low : int
Lowest generated SNR of reverbed signal to babble.
babble_snr_high : int
Highest generated SNR of reverbed signal to babble.
noise_snr_low : int
Lowest generated SNR of babbled signal to noise.
noise_snr_high : int
Highest generated SNR of babbled signal to noise.
rir_scale_factor : float
It compresses or dilates the given impulse response.
If ``0 < rir_scale_factor < 1``, the impulse response is compressed
(less reverb), while if ``rir_scale_factor > 1`` it is dilated
(more reverb).
reverb_sample_rate : int
Sample rate of input audio signals (rirs) used for reverberation.
noise_sample_rate: int
Sample rate of input audio signals used for adding noise.
clean_sample_rate: int
Sample rate of original (clean) audio signals.
Example
-------
>>> inputs = torch.randn([10, 16000])
>>> corrupter = EnvCorrupt(babble_speaker_count=9)
>>> feats = corrupter(inputs, torch.ones(10))
"""
def __init__(
self,
reverb_prob=1.0,
babble_prob=1.0,
noise_prob=1.0,
openrir_folder=None,
openrir_max_noise_len=None,
reverb_csv=None,
noise_csv=None,
noise_num_workers=0,
babble_speaker_count=0,
babble_snr_low=0,
babble_snr_high=0,
noise_snr_low=0,
noise_snr_high=0,
rir_scale_factor=1.0,
reverb_sample_rate=16000,
noise_sample_rate=16000,
clean_sample_rate=16000,
):
super().__init__()
# Download and prepare openrir
if openrir_folder and (not reverb_csv or not noise_csv):
open_reverb_csv = os.path.join(openrir_folder, "reverb.csv")
open_noise_csv = os.path.join(openrir_folder, "noise.csv")
_prepare_openrir(
openrir_folder,
open_reverb_csv,
open_noise_csv,
openrir_max_noise_len,
)
# Specify filepath and sample rate if not specified already
if not reverb_csv:
reverb_csv = open_reverb_csv
reverb_sample_rate = 16000
if not noise_csv:
noise_csv = open_noise_csv
noise_sample_rate = 16000
# Initialize corrupters
if reverb_csv is not None and reverb_prob > 0.0:
self.add_reverb = AddReverb(
reverb_prob=reverb_prob,
csv_file=reverb_csv,
rir_scale_factor=rir_scale_factor,
reverb_sample_rate=reverb_sample_rate,
clean_sample_rate=clean_sample_rate,
)
if babble_speaker_count > 0 and babble_prob > 0.0:
self.add_babble = AddBabble(
mix_prob=babble_prob,
speaker_count=babble_speaker_count,
snr_low=babble_snr_low,
snr_high=babble_snr_high,
)
if noise_csv is not None and noise_prob > 0.0:
self.add_noise = AddNoise(
mix_prob=noise_prob,
csv_file=noise_csv,
num_workers=noise_num_workers,
snr_low=noise_snr_low,
snr_high=noise_snr_high,
noise_sample_rate=noise_sample_rate,
clean_sample_rate=clean_sample_rate,
)
def forward(self, waveforms, lengths):
"""Returns the distorted waveforms.
Arguments
---------
waveforms : torch.Tensor
The waveforms to distort.
"""
# Augmentation
with torch.no_grad():
if hasattr(self, "add_reverb"):
try:
waveforms = self.add_reverb(waveforms, lengths)
except Exception:
pass
if hasattr(self, "add_babble"):
waveforms = self.add_babble(waveforms, lengths)
if hasattr(self, "add_noise"):
waveforms = self.add_noise(waveforms, lengths)
return waveforms
def _prepare_openrir(folder, reverb_csv, noise_csv, max_noise_len):
"""Prepare the openrir dataset for adding reverb and noises.
Arguments
---------
folder : str
The location of the folder containing the dataset.
reverb_csv : str
Filename for storing the prepared reverb csv.
noise_csv : str
Filename for storing the prepared noise csv.
max_noise_len : float
The maximum noise length in seconds. Noises longer
than this will be cut into pieces.
"""
# Download and unpack if necessary
filepath = os.path.join(folder, "rirs_noises.zip")
if not os.path.isdir(os.path.join(folder, "RIRS_NOISES")):
download_file(OPENRIR_URL, filepath, unpack=True)
else:
download_file(OPENRIR_URL, filepath)
# Prepare reverb csv if necessary
if not os.path.isfile(reverb_csv):
rir_filelist = os.path.join(
folder, "RIRS_NOISES", "real_rirs_isotropic_noises", "rir_list"
)
_prepare_csv(folder, rir_filelist, reverb_csv)
# Prepare noise csv if necessary
if not os.path.isfile(noise_csv):
noise_filelist = os.path.join(
folder, "RIRS_NOISES", "pointsource_noises", "noise_list"
)
_prepare_csv(folder, noise_filelist, noise_csv, max_noise_len)
def _prepare_csv(folder, filelist, csv_file, max_length=None):
"""Iterate a set of wavs and write the corresponding csv file.
Arguments
---------
folder : str
The folder relative to which the files in the list are listed.
filelist : str
The location of a file listing the files to be used.
csvfile : str
The location to use for writing the csv file.
max_length : float
The maximum length in seconds. Waveforms longer
than this will be cut into pieces.
"""
try:
# make sure all processing reached here before main preocess create csv_file
sb.utils.distributed.ddp_barrier()
if sb.utils.distributed.if_main_process():
with open(csv_file, "w") as w:
w.write("ID,duration,wav,wav_format,wav_opts\n\n")
for line in open(filelist):
# Read file for duration/channel info
filename = os.path.join(folder, line.split()[-1])
signal, rate = torchaudio.load(filename)
# Ensure only one channel
if signal.shape[0] > 1:
signal = signal[0].unsqueeze(0)
torchaudio.save(filename, signal, rate)
ID, ext = os.path.basename(filename).split(".")
duration = signal.shape[1] / rate
# Handle long waveforms
if max_length is not None and duration > max_length:
# Delete old file
os.remove(filename)
for i in range(int(duration / max_length)):
start = int(max_length * i * rate)
stop = int(
min(max_length * (i + 1), duration) * rate
)
new_filename = (
filename[: -len(f".{ext}")] + f"_{i}.{ext}"
)
torchaudio.save(
new_filename, signal[:, start:stop], rate
)
csv_row = (
f"{ID}_{i}",
str((stop - start) / rate),
new_filename,
ext,
"\n",
)
w.write(",".join(csv_row))
else:
w.write(
",".join((ID, str(duration), filename, ext, "\n"))
)
finally:
sb.utils.distributed.ddp_barrier()
| 18,577 | 32.473874 | 102 | py |
speechbrain | speechbrain-main/speechbrain/lobes/downsampling.py | """
Combinations of processing algorithms to implement downsampling methods.
Authors
* Salah Zaiem
"""
import torch
import torchaudio.transforms as T
from speechbrain.nnet.CNN import Conv1d
from speechbrain.nnet.pooling import Pooling1d
class Downsampler(torch.nn.Module):
""" Wrapper for downsampling techniques
"""
def forward(self, x):
""" Downsampling function
Arguments
---------
x : tensor
Speech samples of shape [B,n_samples] with B the batch size
"""
return self.downsampler(x)
class SignalDownsampler(Downsampler):
"""Signal downsampling (Decimation)
Arguments
---------
downsampling_factor : int
Factor of downsampling (i.e. ratio (length before ds / length after ds))
initial_sampling_rate : int
Sampling_rate of the input audios
Example
-------
>>> sd = SignalDownsampler(2,16000)
>>> a = torch.rand([8,28000])
>>> a = sd(a)
>>> print(a.shape)
torch.Size([8, 14000])
"""
def __init__(self, downsampling_factor, initial_sampling_rate):
super().__init__()
self.downsampling_factor = downsampling_factor
self.target_ds_rate = int(initial_sampling_rate / downsampling_factor)
self.downsampler = T.Resample(
initial_sampling_rate, self.target_ds_rate, dtype=torch.float32
)
class Conv1DDownsampler(Downsampler):
"""1D Convolutional downsampling with a learned convolution
Arguments
---------
downsampling_factor : int
Factor of downsampling (i.e. ratio (length before ds / length after ds))
kernel_size : int
Kernel size of the 1D filter (must be an odd integer)
Example
-------
>>> sd = Conv1DDownsampler(3,161)
>>> a = torch.rand([8,33000])
>>> a = sd(a)
>>> print(a.shape)
torch.Size([8, 10947])
"""
def __init__(self, downsampling_factor, kernel_size):
super().__init__()
self.kernel_size = kernel_size
self.downsampling_factor = downsampling_factor
self.downsampler = Conv1d(
stride=self.downsampling_factor,
padding="valid",
kernel_size=self.kernel_size,
out_channels=1,
input_shape=[None, None],
)
class PoolingDownsampler(Downsampler):
"""1D Pooling downsampling (non-learned)
Arguments
---------
downsampling_factor : int
Factor of downsampling (i.e. ratio (length before ds / length after ds))
kernel_size : int
Kernel size of the 1D filter (must be an odd integer)
padding : int
The number of padding elements to apply.
pool_type : string
Pooling approach, must be within ["avg","max"]
Example
-------
>>> sd = PoolingDownsampler(3,41)
>>> a = torch.rand([8,33000])
>>> a = sd(a)
>>> print(a.shape)
torch.Size([8, 10987])
"""
def __init__(
self, downsampling_factor, kernel_size, padding=0, pool_type="avg"
):
super().__init__()
self.kernel_size = kernel_size
self.padding = padding
self.pool_type = pool_type
self.downsampling_factor = downsampling_factor
self.downsampler = Pooling1d(
stride=self.downsampling_factor,
padding=self.padding,
kernel_size=self.kernel_size,
input_dims=3,
pool_type=self.pool_type,
)
| 3,444 | 26.782258 | 80 | py |
speechbrain | speechbrain-main/speechbrain/lobes/beamform_multimic.py | """Beamformer for multi-mic processing.
Authors
* Nauman Dawalatabad
"""
import torch
from speechbrain.processing.features import (
STFT,
ISTFT,
)
from speechbrain.processing.multi_mic import (
Covariance,
GccPhat,
DelaySum,
)
class DelaySum_Beamformer(torch.nn.Module):
"""Generate beamformed signal from multi-mic data using DelaySum beamforming.
Arguments
---------
sampling_rate : int (default: 16000)
Sampling rate of audio signals.
"""
def __init__(self, sampling_rate=16000):
super().__init__()
self.fs = sampling_rate
self.stft = STFT(sample_rate=self.fs)
self.cov = Covariance()
self.gccphat = GccPhat()
self.delaysum = DelaySum()
self.istft = ISTFT(sample_rate=self.fs)
def forward(self, mics_signals):
"""Returns beamformed signal using multi-mic data.
Arguments
---------
mics_sginal : tensor
Set of audio signals to be transformed.
"""
with torch.no_grad():
Xs = self.stft(mics_signals)
XXs = self.cov(Xs)
tdoas = self.gccphat(XXs)
Ys_ds = self.delaysum(Xs, tdoas)
sig = self.istft(Ys_ds)
return sig
| 1,264 | 22.425926 | 81 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/wav2vec.py | """Components necessary to build a wav2vec 2.0 architecture following the
original paper: https://arxiv.org/abs/2006.11477.
Authors
* Rudolf A Braun 2022
* Guillermo Cambara 2022
* Titouan Parcollet 2022
"""
import logging
import torch
import torch.nn.functional as F
import torch.nn as nn
import random
import numpy as np
from speechbrain.lobes.models.transformer.Transformer import PositionalEncoding
from speechbrain.utils.data_utils import batch_pad_right
from speechbrain.dataio.dataio import length_to_mask
from speechbrain.lobes.models.convolution import ConvolutionFrontEnd
from speechbrain.nnet.CNN import Conv1d
from speechbrain.nnet.normalization import LayerNorm
from speechbrain.nnet.quantisers import GumbelVectorQuantizer
logger = logging.getLogger()
class W2VLatentExtractor(nn.Module):
"""Convolution based feature extractor from raw audio.
Channel numbers increasing is based on https://arxiv.org/abs/2109.06870
Arguments
---------
out_channels : list of ints
Out channels of convolutional layers.
kernel_sizes : list of ints
Kernels of convolutional layers.
strides : list of ints
Strides of convolutional layers.
dropout : float
Dropout of CNN.
Example
-------
>>> extractor = W2VLatentExtractor()
>>> inputs = torch.rand(10, 5000)
>>> outputs = extractor(inputs)
>>> outputs.shape
torch.Size([10, 14, 512])
"""
def __init__(
self,
out_channels=[512, 512, 512, 512, 512, 512, 512],
kernel_sizes=[11, 3, 3, 3, 3, 3, 3],
strides=[5, 2, 2, 2, 2, 2, 2],
dropout=0.0,
conv_init="kaiming",
):
super().__init__()
assert len(out_channels) == len(kernel_sizes) == len(strides)
num_blocks = len(out_channels)
self.kernel_sizes = kernel_sizes
self.strides = strides
self.out_dim = out_channels[-1]
# ! Note this does conv, norm, gelu, dropout. while fairseq does conv, dropout, norm, gelu
# Also fairseq layernorm is forced to fp32
self.extractor = ConvolutionFrontEnd(
(None, 16000, 1,),
num_blocks=num_blocks,
num_layers_per_block=1,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
strides=strides,
dilations=[1] * num_blocks,
residuals=[False] * num_blocks,
conv_module=Conv1d,
activation=nn.GELU,
norm=LayerNorm,
dropout=dropout,
conv_bias=False,
padding="valid",
conv_init=conv_init,
)
self.norm = nn.LayerNorm(out_channels[-1])
def forward(self, x, normalize_signal=True):
""" Calculates latents from audio input.
"""
if normalize_signal:
x = F.layer_norm(x, x.shape[1:])
x = x.unsqueeze(2)
latents = self.extractor(x)
return self.norm(latents)
def get_output_lengths(self, input_lengths: torch.LongTensor):
""" Calculates output lengths for given input lengths. """
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
for kernel_size, stride in zip(self.kernel_sizes, self.strides):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths.to(torch.long)
class W2VTargetQuantiser(nn.Module):
""" Wraps ``nnet.quantiser.GumbelVectorQuantizer``, see for documentation on
arguments.
Example
-------
>>> quantiser = W2VTargetQuantiser()
>>> inputs = torch.rand(10, 12, 512)
>>> output, meta = quantiser(inputs)
>>> output.shape
torch.Size([10, 12, 256])
"""
def __init__(
self,
in_dim=512,
out_dim=256,
quantiser=GumbelVectorQuantizer,
num_vars=320,
temperature_decay=(2.0, 0.25, 0.999995,),
):
super().__init__()
self.quantiser = quantiser(
in_dim, num_vars, temperature_decay, 2, out_dim
)
self.proj = nn.Linear(out_dim, out_dim)
def forward(self, x):
""" Returns quantised targets plus meta information. """
x = self.quantiser(x)
targets = self.proj(x["x"])
code_perplex = x["code_perplexity"]
prob_perplex = x["prob_perplex"]
num_vars = x["num_vars"]
temp = x["temp"]
diversity_loss = (num_vars - prob_perplex) / num_vars
meta = {
"diversity_loss": diversity_loss,
"code_perplex": code_perplex,
"prob_perplex": prob_perplex,
"num_vars": num_vars,
"temp": temp,
}
return targets, meta
class EncoderWrapper(nn.Module):
"""A wrapper that adds positional information,
masks the input and then runs the latent encoder.
Arguments
---------
in_dim : int
Last dimension of input tensor.
embedding_dim : int
Dimension to project input to and that the latent encoder will use.
latent_encoder : torch.nn.module
Initialized latent encoder object.
positional_encoding : torch.nn.module
Uninitialized nn.module for adding positional information, will use ``embedding_dim``.
dropout_encoder_input : float
Dropout on encoder input.
Example
-------
>>> from speechbrain.lobes.models.transformer.Transformer import TransformerEncoder
>>> encoder = TransformerEncoder(d_model=768, num_layers=4, nhead=4, d_ffn=1024)
>>> wrapper = EncoderWrapper(1024, 768, encoder)
>>> inputs = torch.rand(10, 12, 1024)
>>> outputs = wrapper(inputs)
>>> outputs["embeddings"].shape
torch.Size([10, 12, 768])
"""
def __init__(
self,
in_dim,
embedding_dim,
latent_encoder,
positional_encoding=PositionalEncoding,
dropout_encoder_input=0.05,
):
super().__init__()
self.input_projector = nn.Linear(in_dim, embedding_dim)
self.latent_encoder = latent_encoder
self.positional_encoding = positional_encoding(embedding_dim)
self.dropout_encoder_input = nn.Dropout(dropout_encoder_input)
self.mask_emb = nn.Parameter(
torch.FloatTensor(embedding_dim).uniform_(), requires_grad=True
)
def forward(
self, latents, wav_lens=None, padding_mask=None, mask=None,
):
"""
Arguments
---------
latents : torch.Tensor, shape (B, T, C)
Batch of latent representations (AKA frames) output from latent extractor.
wav_lens : torch.Tensor, shape (B,)
The actual (unpadded) relative lengths for each sample of the batch (0<wav_lens<1).
padding_mask : Torch.Tensor, shape (B, T,)
Can be provided instead of wav_lens.
mask : torch.Tensor, shape (B, T)
Boolean mask which decides which latent frames will be masked.
"""
results = {}
T = latents.size(1)
latents = self.input_projector(latents)
latents = self.dropout_encoder_input(latents)
if mask is not None:
latents[mask] = self.mask_emb.to(latents.dtype)
num_masked = mask.sum()
results["num_masked"] = num_masked
results["ratio_masked"] = num_masked / mask.numel()
if wav_lens is not None:
wav_lens = torch.round(wav_lens * T)
padding_mask = ~length_to_mask(wav_lens, dtype=bool)
latents = latents + self.positional_encoding(latents)
feats, _ = self.latent_encoder(
latents, src_key_padding_mask=padding_mask
)
results["embeddings"] = feats
return results
def compute_mask(shape, sample_lens, mask_prob, mask_length):
""" This creates the boolean mask for a target shape which respects
the sample lengths and will half roughly ``mask_prob`` entries set to
``True``.
Arguments
---------
shape : list of ints, like (N, M)
Shape of boolean mask to return.
sample_lens: list of ints
Absolute lengths of per sample lengths.
mask_prob : float
Percentage to mask.
mask_length: int
Length of contiguous subsequence to mask.
Returns
-------
mask : numpy.ndarray
Boolean mask with shape of input argument ``shape``.
"""
bs, padded_sample_len = shape
min_sample_len = min(sample_lens)
# So we dont have ragged tensors number of masks is the same for each sample.
num_mask = int(
mask_prob * min_sample_len / float(mask_length) + random.random() + 1
)
# Now loop through and for each sample select indices so that no indices land
# in the padded part of the signal.
mask_idcs = []
for i in range(bs):
sample_len = sample_lens[i]
# This are the starting indices.
mask_indices = np.random.choice(
sample_len - mask_length, num_mask, replace=False
)
# Now using the starting indices create contiguous masks.
mask_indices = np.asarray(
[
mask_indices[j] + offset
for j in range(len(mask_indices))
for offset in range(mask_length)
]
)
# Last step might have created overlapping masks, remove overlapping part.
mask_idcs.append(np.unique(mask_indices[mask_indices < sample_len]))
mask = np.full((bs, padded_sample_len), False)
num_mask_total = num_mask * mask_length
# Unique could have caused number to go below target count,
# this randomly adds some unused indices.
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) < num_mask_total:
num_mask_missing = num_mask_total - len(mask_idc)
arange = np.arange(sample_lens[i])
arange = np.delete(arange, mask_idc)
extra_indcs = np.random.choice(
arange, num_mask_missing, replace=False
)
mask[i, extra_indcs] = True
mask[i, mask_idc] = True
return mask
def sample_negatives(y, num_neg):
""" Samples negatives from target tensor y.
Arguments
---------
y : torch.Tensor
Tensor of shape (B, T, C)
num_neg : int
Number of negatives to sample.
Returns
-------
negs : torch.Tensor
Negatives in shape (N, B, T, C)
"""
B, T, C = y.shape
high = T - 1
with torch.no_grad():
targets = torch.arange(T).unsqueeze(-1).expand(-1, num_neg).flatten()
neg_indcs = torch.randint(low=0, high=high, size=(B, T * num_neg))
# negative should not be target and to make distribution uniform shift all >
neg_indcs[neg_indcs >= targets] += 1
neg_indcs = neg_indcs + torch.arange(B).unsqueeze(1) * high
y = y.view(-1, C)
negs = y[neg_indcs.view(-1)]
negs = negs.view(B, T, num_neg, C).permute(2, 0, 1, 3) # to N, B, T, C
return negs
def w2v_mask_collate_fn(samples_lst, get_out_len_fn, mask_prob, mask_length):
""" This creates a batch from a list of samples and also creates
the boolean mask that will be used to mask the inputs of the latent
encoder. To create the mask we need to know the output shape after the
latent extractor, therefore the argument `get_out_len_fn`.
One could also create masks per sample (when loading the audio file) and
then collate them but at that time one doesn't know the length of the
shortest sample in the batch (which determines the number of masked frames)
so it's better this way.
Arguments
---------
samples_lst : list
List of samples returned by the audio_pipeline.
get_out_len_fn : function
Function that calculates length of sample after it passes through feature extractor.
mask_prob : float
Approximate percentage of frames to mask.
mask_length : int
Number of contiguous frames that will be masked.
Returns
-------
wavs_padded : torch.Tensor, shape (B, T)
Audio arrays with right-sided padding.
wav_lens : torch.Tensor, shape (B,)
For each sample the percentage of the array that is not padding.
mask : torch.Tensor, shape (B, T)
Boolean mask to mask frames.
"""
wav_lst, latent_length_lst = [], []
ids = []
for sample in samples_lst:
ids.append(sample["id"])
sig = sample["sig"]
wav_lst.append(sig)
latent_length = get_out_len_fn(torch.as_tensor(sig.size(-1)))
latent_length_lst.append(latent_length.item())
bs = len(wav_lst)
wavs_padded, wav_lens = batch_pad_right(wav_lst)
batch_time_len = max(latent_length_lst)
mask = compute_mask(
(bs, batch_time_len,), latent_length_lst, mask_prob, mask_length
)
return (
torch.as_tensor(wavs_padded),
torch.as_tensor(wav_lens),
torch.as_tensor(mask, dtype=torch.bool),
)
| 12,989 | 32.916449 | 98 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/conv_tasnet.py | """ Implementation of a popular speech separation model.
"""
import torch
import torch.nn as nn
import speechbrain as sb
import torch.nn.functional as F
from speechbrain.processing.signal_processing import overlap_and_add
EPS = 1e-8
class Encoder(nn.Module):
"""This class learns the adaptive frontend for the ConvTasnet model.
Arguments
---------
L : int
The filter kernel size. Needs to be an odd number.
N : int
Number of dimensions at the output of the adaptive front end.
Example
-------
>>> inp = torch.rand(10, 100)
>>> encoder = Encoder(11, 20)
>>> h = encoder(inp)
>>> h.shape
torch.Size([10, 20, 20])
"""
def __init__(self, L, N):
super(Encoder, self).__init__()
# 50% overlap
self.conv1d_U = sb.nnet.CNN.Conv1d(
in_channels=1,
out_channels=N,
kernel_size=L,
stride=L // 2,
bias=False,
)
def forward(self, mixture):
"""
Arguments
---------
mixture : Tensor
Tensor shape is [M, T]. M is batch size. T is #samples
Returns
-------
mixture_w : Tensor
Tensor shape is [M, K, N], where K = (T-L)/(L/2)+1 = 2T/L-1
"""
mixture = torch.unsqueeze(mixture, -1) # [M, T, 1]
conv_out = self.conv1d_U(mixture)
mixture_w = F.relu(conv_out) # [M, K, N]
return mixture_w
class Decoder(nn.Module):
"""This class implements the decoder for the ConvTasnet.
The separated source embeddings are fed to the decoder to reconstruct
the estimated sources in the time domain.
Arguments
---------
L : int
Number of bases to use when reconstructing.
Example
-------
>>> L, C, N = 8, 2, 8
>>> mixture_w = torch.randn(10, 100, N)
>>> est_mask = torch.randn(10, 100, C, N)
>>> Decoder = Decoder(L, N)
>>> mixture_hat = Decoder(mixture_w, est_mask)
>>> mixture_hat.shape
torch.Size([10, 404, 2])
"""
def __init__(self, L, N):
super(Decoder, self).__init__()
# Hyper-parameter
self.L = L
# Components
self.basis_signals = sb.nnet.linear.Linear(
input_size=N, n_neurons=L, bias=False
)
def forward(self, mixture_w, est_mask):
"""
Arguments
---------
mixture_w : Tensor
Tensor shape is [M, K, N].
est_mask : Tensor
Tensor shape is [M, K, C, N].
Returns
-------
est_source : Tensor
Tensor shape is [M, T, C].
"""
# D = W * M
source_w = (
torch.unsqueeze(mixture_w, 2).repeat(1, 1, est_mask.size(2), 1)
* est_mask
) # [M, K, C, N]
source_w = source_w.permute(0, 2, 1, 3) # [M, C, K, N]
# S = DV
est_source = self.basis_signals(source_w) # [M, C, K, L]
est_source = overlap_and_add(est_source, self.L // 2) # M x C x T
return est_source.permute(0, 2, 1) # M x T x C
class TemporalBlocksSequential(sb.nnet.containers.Sequential):
"""
A wrapper for the temporal-block layer to replicate it
Arguments
---------
input_shape : tuple
Expected shape of the input.
H : int
The number of intermediate channels.
P : int
The kernel size in the convolutions.
R : int
The number of times to replicate the multilayer Temporal Blocks.
X : int
The number of layers of Temporal Blocks with different dilations.
norm type : str
The type of normalization, in ['gLN', 'cLN'].
causal : bool
To use causal or non-causal convolutions, in [True, False].
Example
-------
>>> x = torch.randn(14, 100, 10)
>>> H, P, R, X = 10, 5, 2, 3
>>> TemporalBlocks = TemporalBlocksSequential(
... x.shape, H, P, R, X, 'gLN', False
... )
>>> y = TemporalBlocks(x)
>>> y.shape
torch.Size([14, 100, 10])
"""
def __init__(self, input_shape, H, P, R, X, norm_type, causal):
super().__init__(input_shape=input_shape)
for r in range(R):
for x in range(X):
dilation = 2 ** x
self.append(
TemporalBlock,
out_channels=H,
kernel_size=P,
stride=1,
padding="same",
dilation=dilation,
norm_type=norm_type,
causal=causal,
layer_name=f"temporalblock_{r}_{x}",
)
class MaskNet(nn.Module):
"""
Arguments
---------
N : int
Number of filters in autoencoder.
B : int
Number of channels in bottleneck 1 × 1-conv block.
H : int
Number of channels in convolutional blocks.
P : int
Kernel size in convolutional blocks.
X : int
Number of convolutional blocks in each repeat.
R : int
Number of repeats.
C : int
Number of speakers.
norm_type : str
One of BN, gLN, cLN.
causal : bool
Causal or non-causal.
mask_nonlinear : str
Use which non-linear function to generate mask, in ['softmax', 'relu'].
Example:
---------
>>> N, B, H, P, X, R, C = 11, 12, 2, 5, 3, 1, 2
>>> MaskNet = MaskNet(N, B, H, P, X, R, C)
>>> mixture_w = torch.randn(10, 11, 100)
>>> est_mask = MaskNet(mixture_w)
>>> est_mask.shape
torch.Size([2, 10, 11, 100])
"""
def __init__(
self,
N,
B,
H,
P,
X,
R,
C,
norm_type="gLN",
causal=False,
mask_nonlinear="relu",
):
super(MaskNet, self).__init__()
# Hyper-parameter
self.C = C
self.mask_nonlinear = mask_nonlinear
# Components
# [M, K, N] -> [M, K, N]
self.layer_norm = ChannelwiseLayerNorm(N)
# [M, K, N] -> [M, K, B]
self.bottleneck_conv1x1 = sb.nnet.CNN.Conv1d(
in_channels=N, out_channels=B, kernel_size=1, bias=False,
)
# [M, K, B] -> [M, K, B]
in_shape = (None, None, B)
self.temporal_conv_net = TemporalBlocksSequential(
in_shape, H, P, R, X, norm_type, causal
)
# [M, K, B] -> [M, K, C*N]
self.mask_conv1x1 = sb.nnet.CNN.Conv1d(
in_channels=B, out_channels=C * N, kernel_size=1, bias=False
)
def forward(self, mixture_w):
"""Keep this API same with TasNet.
Arguments
---------
mixture_w : Tensor
Tensor shape is [M, K, N], M is batch size.
Returns
-------
est_mask : Tensor
Tensor shape is [M, K, C, N].
"""
mixture_w = mixture_w.permute(0, 2, 1)
M, K, N = mixture_w.size()
y = self.layer_norm(mixture_w)
y = self.bottleneck_conv1x1(y)
y = self.temporal_conv_net(y)
score = self.mask_conv1x1(y)
# score = self.network(mixture_w) # [M, K, N] -> [M, K, C*N]
score = score.contiguous().reshape(
M, K, self.C, N
) # [M, K, C*N] -> [M, K, C, N]
# [M, K, C, N] -> [C, M, N, K]
score = score.permute(2, 0, 3, 1)
if self.mask_nonlinear == "softmax":
est_mask = F.softmax(score, dim=2)
elif self.mask_nonlinear == "relu":
est_mask = F.relu(score)
else:
raise ValueError("Unsupported mask non-linear function")
return est_mask
class TemporalBlock(torch.nn.Module):
"""The conv1d compound layers used in Masknet.
Arguments
---------
input_shape : tuple
The expected shape of the input.
out_channels : int
The number of intermediate channels.
kernel_size : int
The kernel size in the convolutions.
stride : int
Convolution stride in convolutional layers.
padding : str
The type of padding in the convolutional layers,
(same, valid, causal). If "valid", no padding is performed.
dilation : int
Amount of dilation in convolutional layers.
norm type : str
The type of normalization, in ['gLN', 'cLN'].
causal : bool
To use causal or non-causal convolutions, in [True, False].
Example:
---------
>>> x = torch.randn(14, 100, 10)
>>> TemporalBlock = TemporalBlock(x.shape, 10, 11, 1, 'same', 1)
>>> y = TemporalBlock(x)
>>> y.shape
torch.Size([14, 100, 10])
"""
def __init__(
self,
input_shape,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__()
M, K, B = input_shape
self.layers = sb.nnet.containers.Sequential(input_shape=input_shape)
# [M, K, B] -> [M, K, H]
self.layers.append(
sb.nnet.CNN.Conv1d,
out_channels=out_channels,
kernel_size=1,
bias=False,
layer_name="conv",
)
self.layers.append(nn.PReLU(), layer_name="act")
self.layers.append(
choose_norm(norm_type, out_channels), layer_name="norm"
)
# [M, K, H] -> [M, K, B]
self.layers.append(
DepthwiseSeparableConv,
out_channels=B,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
norm_type=norm_type,
causal=causal,
layer_name="DSconv",
)
def forward(self, x):
"""
Arguments
---------
x : Tensor
Tensor shape is [M, K, B].
Returns
-------
x : Tensor
Tensor shape is [M, K, B].
"""
residual = x
x = self.layers(x)
return x + residual
class DepthwiseSeparableConv(sb.nnet.containers.Sequential):
"""Building block for the Temporal Blocks of Masknet in ConvTasNet.
Arguments
---------
input_shape : tuple
Expected shape of the input.
out_channels : int
Number of output channels.
kernel_size : int
The kernel size in the convolutions.
stride : int
Convolution stride in convolutional layers.
padding : str
The type of padding in the convolutional layers,
(same, valid, causal). If "valid", no padding is performed.
dilation : int
Amount of dilation in convolutional layers.
norm type : str
The type of normalization, in ['gLN', 'cLN'].
causal : bool
To use causal or non-causal convolutions, in [True, False].
Example
-------
>>> x = torch.randn(14, 100, 10)
>>> DSconv = DepthwiseSeparableConv(x.shape, 10, 11, 1, 'same', 1)
>>> y = DSconv(x)
>>> y.shape
torch.Size([14, 100, 10])
"""
def __init__(
self,
input_shape,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__(input_shape=input_shape)
batchsize, time, in_channels = input_shape
# [M, K, H] -> [M, K, H]
if causal:
paddingval = dilation * (kernel_size - 1)
padding = "causal"
default_padding = "same"
else:
default_padding = 0
self.append(
sb.nnet.CNN.Conv1d,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
layer_name="conv_0",
default_padding=default_padding,
)
if causal:
self.append(Chomp1d(paddingval), layer_name="chomp")
self.append(nn.PReLU(), layer_name="act")
self.append(choose_norm(norm_type, in_channels), layer_name="act")
# [M, K, H] -> [M, K, B]
self.append(
sb.nnet.CNN.Conv1d,
out_channels=out_channels,
kernel_size=1,
bias=False,
layer_name="conv_1",
)
class Chomp1d(nn.Module):
"""This class cuts out a portion of the signal from the end.
It is written as a class to be able to incorporate it inside a sequential
wrapper.
Arguments
---------
chomp_size : int
The size of the portion to discard (in samples).
Example
-------
>>> x = torch.randn(10, 110, 5)
>>> chomp = Chomp1d(10)
>>> x_chomped = chomp(x)
>>> x_chomped.shape
torch.Size([10, 100, 5])
"""
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
"""
Arguments
x : Tensor
Tensor shape is [M, Kpad, H].
Returns
-------
x : Tensor
Tensor shape is [M, K, H].
"""
return x[:, : -self.chomp_size, :].contiguous()
def choose_norm(norm_type, channel_size):
"""This function returns the chosen normalization type.
Arguments
---------
norm_type : str
One of ['gLN', 'cLN', 'batchnorm'].
channel_size : int
Number of channels.
Example
-------
>>> choose_norm('gLN', 10)
GlobalLayerNorm()
"""
if norm_type == "gLN":
return GlobalLayerNorm(channel_size)
elif norm_type == "cLN":
return ChannelwiseLayerNorm(channel_size)
else:
return nn.BatchNorm1d(channel_size)
class ChannelwiseLayerNorm(nn.Module):
"""Channel-wise Layer Normalization (cLN).
Arguments
---------
channel_size : int
Number of channels in the normalization dimension (the third dimension).
Example
-------
>>> x = torch.randn(2, 3, 3)
>>> norm_func = ChannelwiseLayerNorm(3)
>>> x_normalized = norm_func(x)
>>> x.shape
torch.Size([2, 3, 3])
"""
def __init__(self, channel_size):
super(ChannelwiseLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.Tensor(1, 1, channel_size)) # [1, 1, N]
self.beta = nn.Parameter(torch.Tensor(1, 1, channel_size)) # [1, 1, N]
self.reset_parameters()
def reset_parameters(self):
"""Resets the parameters."""
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""
Args:
y: [M, K, N], M is batch size, N is channel size, K is length
Returns:
cLN_y: [M, K, N]
"""
mean = torch.mean(y, dim=2, keepdim=True) # [M, K, 1]
var = torch.var(y, dim=2, keepdim=True, unbiased=False) # [M, K, 1]
cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return cLN_y
class GlobalLayerNorm(nn.Module):
"""Global Layer Normalization (gLN).
Arguments
---------
channel_size : int
Number of channels in the third dimension.
Example
-------
>>> x = torch.randn(2, 3, 3)
>>> norm_func = GlobalLayerNorm(3)
>>> x_normalized = norm_func(x)
>>> x.shape
torch.Size([2, 3, 3])
"""
def __init__(self, channel_size):
super(GlobalLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.Tensor(1, 1, channel_size)) # [1, 1, N]
self.beta = nn.Parameter(torch.Tensor(1, 1, channel_size)) # [1, 1, N]
self.reset_parameters()
def reset_parameters(self):
"""Resets the parameters."""
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""
Arguments
---------
y : Tensor
Tensor shape [M, K, N]. M is batch size, N is channel size, and K is length.
Returns
-------
gLN_y : Tensor
Tensor shape [M, K. N]
"""
mean = y.mean(dim=1, keepdim=True).mean(
dim=2, keepdim=True
) # [M, 1, 1]
var = (
(torch.pow(y - mean, 2))
.mean(dim=1, keepdim=True)
.mean(dim=2, keepdim=True)
)
gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return gLN_y
| 16,379 | 25.721044 | 88 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/MetricGAN.py | """Generator and discriminator used in MetricGAN
Authors:
* Szu-Wei Fu 2020
"""
import torch
import speechbrain as sb
from torch import nn
from torch.nn.utils import spectral_norm
def xavier_init_layer(
in_size, out_size=None, spec_norm=True, layer_type=nn.Linear, **kwargs
):
"Create a layer with spectral norm, xavier uniform init and zero bias"
if out_size is None:
out_size = in_size
layer = layer_type(in_size, out_size, **kwargs)
if spec_norm:
layer = spectral_norm(layer)
# Perform initialization
nn.init.xavier_uniform_(layer.weight, gain=1.0)
nn.init.zeros_(layer.bias)
return layer
def shifted_sigmoid(x):
"Computes the shifted sigmoid."
return 1.2 / (1 + torch.exp(-(1 / 1.6) * x))
class Learnable_sigmoid(nn.Module):
"""Implementation of a leanable sigmoid.
Arguments
---------
in_features : int
Input dimensionality
"""
def __init__(self, in_features=257):
super().__init__()
self.slope = nn.Parameter(torch.ones(in_features))
self.slope.requiresGrad = True # set requiresGrad to true!
# self.scale = nn.Parameter(torch.ones(1))
# self.scale.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
return 1.2 * torch.sigmoid(self.slope * x)
class EnhancementGenerator(nn.Module):
"""Simple LSTM for enhancement with custom initialization.
Arguments
---------
input_size : int
Size of the input tensor's last dimension.
hidden_size : int
Number of neurons to use in the LSTM layers.
num_layers : int
Number of layers to use in the LSTM.
dropout : int
Fraction of neurons to drop during training.
"""
def __init__(
self, input_size=257, hidden_size=200, num_layers=2, dropout=0,
):
super().__init__()
self.activation = nn.LeakyReLU(negative_slope=0.3)
self.blstm = sb.nnet.RNN.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=True,
)
"""
Use orthogonal init for recurrent layers, xavier uniform for input layers
Bias is 0
"""
for name, param in self.blstm.named_parameters():
if "bias" in name:
nn.init.zeros_(param)
elif "weight_ih" in name:
nn.init.xavier_uniform_(param)
elif "weight_hh" in name:
nn.init.orthogonal_(param)
self.linear1 = xavier_init_layer(400, 300, spec_norm=False)
self.linear2 = xavier_init_layer(300, 257, spec_norm=False)
self.Learnable_sigmoid = Learnable_sigmoid()
self.sigmoid = nn.Sigmoid()
def forward(self, x, lengths):
""" Processes the input tensor x and returns an output tensor."""
out, _ = self.blstm(x, lengths=lengths)
out = self.linear1(out)
out = self.activation(out)
out = self.linear2(out)
out = self.Learnable_sigmoid(out)
return out
class MetricDiscriminator(nn.Module):
"""Metric estimator for enhancement training.
Consists of:
* four 2d conv layers
* channel averaging
* three linear layers
Arguments
---------
kernel_size : tuple
The dimensions of the 2-d kernel used for convolution.
base_channels : int
Number of channels used in each conv layer.
"""
def __init__(
self, kernel_size=(5, 5), base_channels=15, activation=nn.LeakyReLU,
):
super().__init__()
self.activation = activation(negative_slope=0.3)
self.BN = nn.BatchNorm2d(num_features=2, momentum=0.01)
self.conv1 = xavier_init_layer(
2, base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.conv2 = xavier_init_layer(
base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.conv3 = xavier_init_layer(
base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.conv4 = xavier_init_layer(
base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.Linear1 = xavier_init_layer(base_channels, out_size=50)
self.Linear2 = xavier_init_layer(in_size=50, out_size=10)
self.Linear3 = xavier_init_layer(in_size=10, out_size=1)
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
out = self.BN(x)
out = self.conv1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.activation(out)
out = self.conv3(out)
out = self.activation(out)
out = self.conv4(out)
out = self.activation(out)
out = torch.mean(out, (2, 3))
out = self.Linear1(out)
out = self.activation(out)
out = self.Linear2(out)
out = self.activation(out)
out = self.Linear3(out)
return out
| 5,148 | 26.832432 | 81 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/MetricGAN_U.py | """Generator and discriminator used in MetricGAN-U
Authors:
* Szu-Wei Fu 2020
"""
import torch
import speechbrain as sb
from torch import nn
from torch.nn.utils import spectral_norm
def xavier_init_layer(
in_size, out_size=None, spec_norm=True, layer_type=nn.Linear, **kwargs
):
"Create a layer with spectral norm, xavier uniform init and zero bias"
if out_size is None:
out_size = in_size
layer = layer_type(in_size, out_size, **kwargs)
if spec_norm:
layer = spectral_norm(layer)
# Perform initialization
nn.init.xavier_uniform_(layer.weight, gain=1.0)
nn.init.zeros_(layer.bias)
return layer
class EnhancementGenerator(nn.Module):
"""Simple LSTM for enhancement with custom initialization.
Arguments
---------
input_size : int
Size of the input tensor's last dimension.
hidden_size : int
Number of neurons to use in the LSTM layers.
num_layers : int
Number of layers to use in the LSTM.
lin_dim: int
Number of neurons in the last two linear layers.
dropout : int
Fraction of neurons to drop during training.
Example
-------
>>> inputs = torch.rand([10, 100, 40])
>>> model = EnhancementGenerator(input_size=40, hidden_size=50)
>>> outputs = model(inputs, lengths=torch.ones([10]))
>>> outputs.shape
torch.Size([10, 100, 40])
"""
def __init__(
self,
input_size=257,
hidden_size=200,
num_layers=2,
lin_dim=300,
dropout=0,
):
super().__init__()
self.activation = nn.LeakyReLU(negative_slope=0.3)
self.blstm = sb.nnet.RNN.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=True,
)
"""
Use orthogonal init for recurrent layers, xavier uniform for input layers
Bias is 0
"""
for name, param in self.blstm.named_parameters():
if "bias" in name:
nn.init.zeros_(param)
elif "weight_ih" in name:
nn.init.xavier_uniform_(param)
elif "weight_hh" in name:
nn.init.orthogonal_(param)
self.linear1 = xavier_init_layer(
hidden_size * 2, lin_dim, spec_norm=False
)
self.linear2 = xavier_init_layer(lin_dim, input_size, spec_norm=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x, lengths):
""" Processes the input tensor x and returns an output tensor."""
out, _ = self.blstm(x, lengths=lengths)
out = self.linear1(out)
out = self.activation(out)
out = self.linear2(out)
out = self.sigmoid(out)
return out
class MetricDiscriminator(nn.Module):
"""Metric estimator for enhancement training.
Consists of:
* four 2d conv layers
* channel averaging
* three linear layers
Arguments
---------
kernel_size : tuple
The dimensions of the 2-d kernel used for convolution.
base_channels : int
Number of channels used in each conv layer.
lin_dim1: int
Dimensionality of the first linear layer.
lin_dim2: int
Dimensionality of the second linear layer.
Example
-------
>>> inputs = torch.rand([1, 1, 100, 257])
>>> model = MetricDiscriminator()
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([1, 1])
"""
# FCN
def __init__(
self,
kernel_size=(5, 5),
base_channels=15,
activation=nn.LeakyReLU,
lin_dim1=50,
lin_dim2=10,
):
super().__init__()
self.activation = activation(negative_slope=0.3)
self.BN = nn.BatchNorm2d(num_features=1, momentum=0.01)
self.conv1 = xavier_init_layer(
1, base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.conv2 = xavier_init_layer(
base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.conv3 = xavier_init_layer(
base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.conv4 = xavier_init_layer(
base_channels, layer_type=nn.Conv2d, kernel_size=kernel_size
)
self.Linear1 = xavier_init_layer(base_channels, out_size=lin_dim1)
self.Linear2 = xavier_init_layer(in_size=lin_dim1, out_size=lin_dim2)
self.Linear3 = xavier_init_layer(in_size=lin_dim2, out_size=1)
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
out = self.conv1(x)
out = self.activation(out)
out = self.conv2(out)
out = self.activation(out)
out = self.conv3(out)
out = self.activation(out)
out = self.conv4(out)
out = self.activation(out)
out = torch.mean(out, (2, 3))
out = self.Linear1(out)
out = self.activation(out)
out = self.Linear2(out)
out = self.activation(out)
out = self.Linear3(out)
return out
| 5,154 | 25.989529 | 81 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/Tacotron2.py | """
Neural network modules for the Tacotron2 end-to-end neural
Text-to-Speech (TTS) model
Authors
* Georges Abous-Rjeili 2021
* Artem Ploujnikov 2021
"""
# This code uses a significant portion of the NVidia implementation, even though it
# has been modified and enhanced
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/tacotron2/model.py
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from math import sqrt
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
import torch
from torch import nn
from torch.nn import functional as F
from collections import namedtuple
class LinearNorm(torch.nn.Module):
"""A linear layer with Xavier initialization
Arguments
---------
in_dim: int
the input dimension
out_dim: int
the output dimension
bias: bool
whether or not to use a bias
w_init_gain: linear
the weight initialization gain type (see torch.nn.init.calculate_gain)
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import Tacotron2
>>> layer = LinearNorm(in_dim=5, out_dim=3)
>>> x = torch.randn(3, 5)
>>> y = layer(x)
>>> y.shape
torch.Size([3, 3])
"""
def __init__(self, in_dim, out_dim, bias=True, w_init_gain="linear"):
super().__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain),
)
def forward(self, x):
"""Computes the forward pass
Arguments
---------
x: torch.Tensor
a (batch, features) input tensor
Returns
-------
output: torch.Tensor
the linear layer output
"""
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
"""A 1D convolution layer with Xavier initialization
Arguments
---------
in_channels: int
the number of input channels
out_channels: int
the number of output channels
kernel_size: int
the kernel size
stride: int
the convolutional stride
padding: int
the amount of padding to include. If not provided, it will be calculated
as dilation * (kernel_size - 1) / 2
dilation: int
the dilation of the convolution
bias: bool
whether or not to use a bias
w_init_gain: linear
the weight initialization gain type (see torch.nn.init.calculate_gain)
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import ConvNorm
>>> layer = ConvNorm(in_channels=10, out_channels=5, kernel_size=3)
>>> x = torch.randn(3, 10, 5)
>>> y = layer(x)
>>> y.shape
torch.Size([3, 5, 5])
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super().__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
def forward(self, signal):
"""Computes the forward pass
Arguments
---------
signal: torch.Tensor
the input to the convolutional layer
Returns
-------
output: torch.Tensor
the output
"""
return self.conv(signal)
class LocationLayer(nn.Module):
"""A location-based attention layer consisting of a Xavier-initialized
convolutional layer followed by a dense layer
Arguments
---------
attention_n_filters: int
the number of filters used in attention
attention_kernel_size: int
the kernel size of the attention layer
attention_dim: int
the dimension of linear attention layers
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import LocationLayer
>>> layer = LocationLayer()
>>> attention_weights_cat = torch.randn(3, 2, 64)
>>> processed_attention = layer(attention_weights_cat)
>>> processed_attention.shape
torch.Size([3, 64, 128])
"""
def __init__(
self,
attention_n_filters=32,
attention_kernel_size=31,
attention_dim=128,
):
super().__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(
2,
attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding,
bias=False,
stride=1,
dilation=1,
)
self.location_dense = LinearNorm(
attention_n_filters, attention_dim, bias=False, w_init_gain="tanh"
)
def forward(self, attention_weights_cat):
"""Performs the forward pass for the attention layer
Arguments
---------
attention_weights_cat: torch.Tensor
the concatenating attention weights
Results
-------
processed_attention: torch.Tensor
the attention layer output
"""
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
"""The Tacotron attention layer. Location-based attention is used.
Arguments
---------
attention_rnn_dim: int
the dimension of the RNN to which the attention layer
is applied
embedding_dim: int
the embedding dimension
attention_dim: int
the dimension of the memory cell
attenion_location_n_filters: int
the number of location filters
attention_location_kernel_size: int
the kernel size of the location layer
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import (
... Attention, get_mask_from_lengths)
>>> layer = Attention()
>>> attention_hidden_state = torch.randn(2, 1024)
>>> memory = torch.randn(2, 173, 512)
>>> processed_memory = torch.randn(2, 173, 128)
>>> attention_weights_cat = torch.randn(2, 2, 173)
>>> memory_lengths = torch.tensor([173, 91])
>>> mask = get_mask_from_lengths(memory_lengths)
>>> attention_context, attention_weights = layer(
... attention_hidden_state,
... memory,
... processed_memory,
... attention_weights_cat,
... mask
... )
>>> attention_context.shape, attention_weights.shape
(torch.Size([2, 512]), torch.Size([2, 173]))
"""
def __init__(
self,
attention_rnn_dim=1024,
embedding_dim=512,
attention_dim=128,
attention_location_n_filters=32,
attention_location_kernel_size=31,
):
super().__init__()
self.query_layer = LinearNorm(
attention_rnn_dim, attention_dim, bias=False, w_init_gain="tanh"
)
self.memory_layer = LinearNorm(
embedding_dim, attention_dim, bias=False, w_init_gain="tanh"
)
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(
attention_location_n_filters,
attention_location_kernel_size,
attention_dim,
)
self.score_mask_value = -float("inf")
def get_alignment_energies(
self, query, processed_memory, attention_weights_cat
):
"""Computes the alignment energies
Arguments
---------
query: torch.Tensor
decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: torch.Tensor
processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: torch.Tensor
cumulative and prev. att weights (B, 2, max_time)
Returns
-------
alignment : torch.Tensor
(batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(
torch.tanh(
processed_query + processed_attention_weights + processed_memory
)
)
energies = energies.squeeze(2)
return energies
def forward(
self,
attention_hidden_state,
memory,
processed_memory,
attention_weights_cat,
mask,
):
"""Computes the forward pass
Arguments
---------
attention_hidden_state: torch.Tensor
attention rnn last output
memory: torch.Tensor
encoder outputs
processed_memory: torch.Tensor
processed encoder outputs
attention_weights_cat: torch.Tensor
previous and cummulative attention weights
mask: torch.Tensor
binary mask for padded data
Returns
-------
result: tuple
a (attention_context, attention_weights) tuple
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat
)
alignment = alignment.masked_fill(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
"""The Tacotron pre-net module consisting of a specified number of
normalized (Xavier-initialized) linear layers
Arguments
---------
in_dim: int
the input dimensions
sizes: int
the dimension of the hidden layers/outout
dropout: float
the dropout probability
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import Prenet
>>> layer = Prenet()
>>> x = torch.randn(862, 2, 80)
>>> output = layer(x)
>>> output.shape
torch.Size([862, 2, 256])
"""
def __init__(self, in_dim=80, sizes=[256, 256], dropout=0.5):
super().__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[
LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)
]
)
self.dropout = dropout
def forward(self, x):
"""Computes the forward pass for the prenet
Arguments
---------
x: torch.Tensor
the prenet inputs
Returns
-------
output: torch.Tensor
the output
"""
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=self.dropout, training=True)
return x
class Postnet(nn.Module):
"""The Tacotron postnet consists of a number of 1-d convolutional layers
with Xavier initialization and a tanh activation, with batch normalization.
Depending on configuration, the postnet may either refine the MEL spectrogram
or upsample it to a linear spectrogram
Arguments
---------
n_mel_channels: int
the number of MEL spectrogram channels
postnet_embedding_dim: int
the postnet embedding dimension
postnet_kernel_size: int
the kernel size of the convolutions within the decoders
postnet_n_convolutions: int
the number of convolutions in the postnet
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import Postnet
>>> layer = Postnet()
>>> x = torch.randn(2, 80, 861)
>>> output = layer(x)
>>> output.shape
torch.Size([2, 80, 861])
"""
def __init__(
self,
n_mel_channels=80,
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
):
super().__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(
n_mel_channels,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
nn.BatchNorm1d(postnet_embedding_dim),
)
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(
postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
nn.BatchNorm1d(postnet_embedding_dim),
)
)
self.convolutions.append(
nn.Sequential(
ConvNorm(
postnet_embedding_dim,
n_mel_channels,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="linear",
),
nn.BatchNorm1d(n_mel_channels),
)
)
self.n_convs = len(self.convolutions)
def forward(self, x):
"""Computes the forward pass of the postnet
Arguments
---------
x: torch.Tensor
the postnet input (usually a MEL spectrogram)
Returns
-------
output: torch.Tensor
the postnet output (a refined MEL spectrogram or a
linear spectrogram depending on how the model is
configured)
"""
i = 0
for conv in self.convolutions:
if i < self.n_convs - 1:
x = F.dropout(torch.tanh(conv(x)), 0.5, training=self.training)
else:
x = F.dropout(conv(x), 0.5, training=self.training)
i += 1
return x
class Encoder(nn.Module):
"""The Tacotron2 encoder module, consisting of a sequence of 1-d convolution banks (3 by default)
and a bidirectional LSTM
Arguments
---------
encoder_n_convolutions: int
the number of encoder convolutions
encoder_embedding_dim: int
the dimension of the encoder embedding
encoder_kernel_size: int
the kernel size of the 1-D convolutional layers within
the encoder
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import Encoder
>>> layer = Encoder()
>>> x = torch.randn(2, 512, 128)
>>> input_lengths = torch.tensor([128, 83])
>>> outputs = layer(x, input_lengths)
>>> outputs.shape
torch.Size([2, 128, 512])
"""
def __init__(
self,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
encoder_kernel_size=5,
):
super().__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(
encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size,
stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1,
w_init_gain="relu",
),
nn.BatchNorm1d(encoder_embedding_dim),
)
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(
encoder_embedding_dim,
int(encoder_embedding_dim / 2),
1,
batch_first=True,
bidirectional=True,
)
@torch.jit.ignore
def forward(self, x, input_lengths):
"""Computes the encoder forward pass
Arguments
---------
x: torch.Tensor
a batch of inputs (sequence embeddings)
input_lengths: torch.Tensor
a tensor of input lengths
Returns
-------
outputs: torch.Tensor
the encoder output
"""
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True
)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
return outputs
@torch.jit.export
def infer(self, x, input_lengths):
"""Performs a forward stap in the inference context
Arguments
---------
x: torch.Tensor
a batch of inputs (sequence embeddings)
input_lengths: torch.Tensor
a tensor of input lengths
Returns
-------
outputs: torch.Tensor
the encoder output
"""
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, self.training)
x = x.transpose(1, 2)
input_lengths = input_lengths.cpu()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True
)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
return outputs
class Decoder(nn.Module):
"""The Tacotron decoder
Arguments
---------
n_mel_channels: int
the number of channels in the MEL sepctrogram
n_frames_per_step:
the number of frames in the spectrogram for each
time step of the decoder
encoder_embedding_dim: int
the dimension of the encoder embedding
attention_location_n_filters: int
the number of filters in location-based attention
attention_location_kernel_size: int
the kernel size of location-based attention
attention_rnn_dim: int
RNN dimension for the attention layer
decoder_rnn_dim: int
the encoder RNN dimension
prenet_dim: int
the dimension of the prenet (inner and output layers)
max_decoder_steps: int
the maximum number of decoder steps for the longest utterance
expected for the model
gate_threshold: float
the fixed threshold to which the outputs of the decoders will be compared
p_attention_dropout: float
dropout probability for attention layers
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.Tacotron2 import Decoder
>>> layer = Decoder()
>>> memory = torch.randn(2, 173, 512)
>>> decoder_inputs = torch.randn(2, 80, 173)
>>> memory_lengths = torch.tensor([173, 91])
>>> mel_outputs, gate_outputs, alignments = layer(
... memory, decoder_inputs, memory_lengths)
>>> mel_outputs.shape, gate_outputs.shape, alignments.shape
(torch.Size([2, 80, 173]), torch.Size([2, 173]), torch.Size([2, 173, 173]))
"""
def __init__(
self,
n_mel_channels=80,
n_frames_per_step=1,
encoder_embedding_dim=512,
attention_dim=128,
attention_location_n_filters=32,
attention_location_kernel_size=31,
attention_rnn_dim=1024,
decoder_rnn_dim=1024,
prenet_dim=256,
max_decoder_steps=1000,
gate_threshold=0.5,
p_attention_dropout=0.1,
p_decoder_dropout=0.1,
early_stopping=True,
):
super().__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.p_attention_dropout = p_attention_dropout
self.p_decoder_dropout = p_decoder_dropout
self.early_stopping = early_stopping
self.prenet = Prenet(
n_mel_channels * n_frames_per_step, [prenet_dim, prenet_dim]
)
self.attention_rnn = nn.LSTMCell(
prenet_dim + encoder_embedding_dim, attention_rnn_dim
)
self.attention_layer = Attention(
attention_rnn_dim,
encoder_embedding_dim,
attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
)
self.decoder_rnn = nn.LSTMCell(
attention_rnn_dim + encoder_embedding_dim, decoder_rnn_dim, 1
)
self.linear_projection = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim,
n_mel_channels * n_frames_per_step,
)
self.gate_layer = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim,
1,
bias=True,
w_init_gain="sigmoid",
)
def get_go_frame(self, memory):
"""Gets all zeros frames to use as first decoder input
Arguments
---------
memory: torch.Tensor
decoder outputs
Returns
-------
decoder_input: torch.Tensor
all zeros frames
"""
B = memory.size(0)
dtype = memory.dtype
device = memory.device
decoder_input = torch.zeros(
B,
self.n_mel_channels * self.n_frames_per_step,
dtype=dtype,
device=device,
)
return decoder_input
def initialize_decoder_states(self, memory):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
Arguments
---------
memory: torch.Tensor
Encoder outputs
mask: torch.Tensor
Mask for padded data if training, expects None for inference
Returns
-------
result: tuple
A tuple of tensors
(
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory,
)
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
dtype = memory.dtype
device = memory.device
attention_hidden = torch.zeros(
B, self.attention_rnn_dim, dtype=dtype, device=device
)
attention_cell = torch.zeros(
B, self.attention_rnn_dim, dtype=dtype, device=device
)
decoder_hidden = torch.zeros(
B, self.decoder_rnn_dim, dtype=dtype, device=device
)
decoder_cell = torch.zeros(
B, self.decoder_rnn_dim, dtype=dtype, device=device
)
attention_weights = torch.zeros(B, MAX_TIME, dtype=dtype, device=device)
attention_weights_cum = torch.zeros(
B, MAX_TIME, dtype=dtype, device=device
)
attention_context = torch.zeros(
B, self.encoder_embedding_dim, dtype=dtype, device=device
)
processed_memory = self.attention_layer.memory_layer(memory)
return (
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory,
)
def parse_decoder_inputs(self, decoder_inputs):
"""Prepares decoder inputs, i.e. mel outputs
Arguments
----------
decoder_inputs: torch.Tensor
inputs used for teacher-forced training, i.e. mel-specs
Returns
-------
decoder_inputs: torch.Tensor
processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1) / self.n_frames_per_step),
-1,
)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
"""Prepares decoder outputs for output
Arguments
---------
mel_outputs: torch.Tensor
MEL-scale spectrogram outputs
gate_outputs: torch.Tensor
gate output energies
alignments: torch.Tensor
the alignment tensor
Returns
-------
mel_outputs: torch.Tensor
MEL-scale spectrogram outputs
gate_outputs: torch.Tensor
gate output energies
alignments: torch.Tensor
the alignment tensor
"""
# (T_out, B) -> (B, T_out)
alignments = alignments.transpose(0, 1).contiguous()
# (T_out, B) -> (B, T_out)
if gate_outputs.dim() == 1:
gate_outputs = gate_outputs.unsqueeze(0)
else:
gate_outputs = gate_outputs.transpose(0, 1).contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = mel_outputs.transpose(0, 1).contiguous()
# decouple frames per step
shape = (mel_outputs.shape[0], -1, self.n_mel_channels)
mel_outputs = mel_outputs.view(*shape)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(
self,
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask,
):
"""Decoder step using stored states, attention and memory
Arguments
---------
decoder_input: torch.Tensor
previous mel output
attention_hidden: torch.Tensor
the hidden state of the attention module
attention_cell: torch.Tensor
the attention cell state
decoder_hidden: torch.Tensor
the decoder hidden state
decoder_cell: torch.Tensor
the decoder cell state
attention_weights: torch.Tensor
the attention weights
attention_weights_cum: torch.Tensor
cumulative attention weights
attention_context: torch.Tensor
the attention context tensor
memory: torch.Tensor
the memory tensor
processed_memory: torch.Tensor
the processed memory tensor
mask: torch.Tensor
Returns
-------
mel_output: torch.Tensor
the MEL-scale outputs
gate_output: torch.Tensor
gate output energies
attention_weights: torch.Tensor
attention weights
"""
cell_input = torch.cat((decoder_input, attention_context), -1)
attention_hidden, attention_cell = self.attention_rnn(
cell_input, (attention_hidden, attention_cell)
)
attention_hidden = F.dropout(
attention_hidden, self.p_attention_dropout, self.training
)
attention_weights_cat = torch.cat(
(
attention_weights.unsqueeze(1),
attention_weights_cum.unsqueeze(1),
),
dim=1,
)
attention_context, attention_weights = self.attention_layer(
attention_hidden,
memory,
processed_memory,
attention_weights_cat,
mask,
)
attention_weights_cum += attention_weights
decoder_input = torch.cat((attention_hidden, attention_context), -1)
decoder_hidden, decoder_cell = self.decoder_rnn(
decoder_input, (decoder_hidden, decoder_cell)
)
decoder_hidden = F.dropout(
decoder_hidden, self.p_decoder_dropout, self.training
)
decoder_hidden_attention_context = torch.cat(
(decoder_hidden, attention_context), dim=1
)
decoder_output = self.linear_projection(
decoder_hidden_attention_context
)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (
decoder_output,
gate_prediction,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
)
@torch.jit.ignore
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
Arguments
----------
memory: torch.Tensor
Encoder outputs
decoder_inputs: torch.Tensor
Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: torch.Tensor
Encoder output lengths for attention masking.
Returns
-------
mel_outputs: torch.Tensor
mel outputs from the decoder
gate_outputs: torch.Tensor
gate outputs from the decoder
alignments: torch.Tensor
sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
mask = get_mask_from_lengths(memory_lengths)
(
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory,
) = self.initialize_decoder_states(memory)
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
(
mel_output,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
) = self.decode(
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask,
)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze()]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
torch.stack(mel_outputs),
torch.stack(gate_outputs),
torch.stack(alignments),
)
return mel_outputs, gate_outputs, alignments
@torch.jit.export
def infer(self, memory, memory_lengths):
""" Decoder inference
Arguments
---------
memory: torch.Tensor
Encoder outputs
Returns
-------
mel_outputs: torch.Tensor
mel outputs from the decoder
gate_outputs: torch.Tensor
gate outputs from the decoder
alignments: torch.Tensor
sequence of attention weights from the decoder
mel_lengths: torch.Tensor
the length of MEL spectrograms
"""
decoder_input = self.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory,
) = self.initialize_decoder_states(memory)
mel_lengths = torch.zeros(
[memory.size(0)], dtype=torch.int32, device=memory.device
)
not_finished = torch.ones(
[memory.size(0)], dtype=torch.int32, device=memory.device
)
mel_outputs, gate_outputs, alignments = (
torch.zeros(1),
torch.zeros(1),
torch.zeros(1),
)
first_iter = True
while True:
decoder_input = self.prenet(decoder_input)
(
mel_output,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
) = self.decode(
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask,
)
if first_iter:
mel_outputs = mel_output.unsqueeze(0)
gate_outputs = gate_output
alignments = attention_weights
first_iter = False
else:
mel_outputs = torch.cat(
(mel_outputs, mel_output.unsqueeze(0)), dim=0
)
gate_outputs = torch.cat((gate_outputs, gate_output), dim=0)
alignments = torch.cat((alignments, attention_weights), dim=0)
dec = (
torch.le(torch.sigmoid(gate_output), self.gate_threshold)
.to(torch.int32)
.squeeze(1)
)
not_finished = not_finished * dec
mel_lengths += not_finished
if self.early_stopping and torch.sum(not_finished) == 0:
break
if len(mel_outputs) == self.max_decoder_steps:
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments
)
return mel_outputs, gate_outputs, alignments, mel_lengths
class Tacotron2(nn.Module):
"""The Tactron2 text-to-speech model, based on the NVIDIA implementation.
This class is the main entry point for the model, which is responsible
for instantiating all submodules, which, in turn, manage the individual
neural network layers
Simplified STRUCTURE: input->word embedding ->encoder ->attention \
->decoder(+prenet) -> postnet ->output
prenet(input is decoder previous time step) output is input to decoder
concatenanted with the attention output
Arguments
---------
mask_padding: bool
whether or not to mask pad-outputs of tacotron
#mel generation parameter in data io
n_mel_channels: int
number of mel channels for constructing spectrogram
#symbols
n_symbols: int=128
number of accepted char symbols defined in textToSequence
symbols_embedding_dim: int
number of embeding dimension for symbols fed to nn.Embedding
# Encoder parameters
encoder_kernel_size: int
size of kernel processing the embeddings
encoder_n_convolutions: int
number of convolution layers in encoder
encoder_embedding_dim: int
number of kernels in encoder, this is also the dimension
of the bidirectional LSTM in the encoder
# Attention parameters
attention_rnn_dim: int
input dimension
attention_dim: int
number of hidden represetation in attention
# Location Layer parameters
attention_location_n_filters: int
number of 1-D convulation filters in attention
attention_location_kernel_size: int
length of the 1-D convolution filters
# Decoder parameters
n_frames_per_step: int=1
only 1 generated mel-frame per step is supported for the decoder as of now.
decoder_rnn_dim: int
number of 2 unidirectionnal stacked LSTM units
prenet_dim: int
dimension of linear prenet layers
max_decoder_steps: int
maximum number of steps/frames the decoder generates before stopping
p_attention_dropout: float
attention drop out probability
p_decoder_dropout: float
decoder drop out probability
gate_threshold: int
cut off level any output probabilty above that is considered
complete and stops genration so we have variable length outputs
decoder_no_early_stopping: bool
determines early stopping of decoder
along with gate_threshold . The logical inverse of this is fed to the decoder
#Mel-post processing network parameters
postnet_embedding_dim: int
number os postnet dfilters
postnet_kernel_size: int
1d size of posnet kernel
postnet_n_convolutions: int
number of convolution layers in postnet
Example
-------
>>> import torch
>>> _ = torch.manual_seed(213312)
>>> from speechbrain.lobes.models.Tacotron2 import Tacotron2
>>> model = Tacotron2(
... mask_padding=True,
... n_mel_channels=80,
... n_symbols=148,
... symbols_embedding_dim=512,
... encoder_kernel_size=5,
... encoder_n_convolutions=3,
... encoder_embedding_dim=512,
... attention_rnn_dim=1024,
... attention_dim=128,
... attention_location_n_filters=32,
... attention_location_kernel_size=31,
... n_frames_per_step=1,
... decoder_rnn_dim=1024,
... prenet_dim=256,
... max_decoder_steps=32,
... gate_threshold=0.5,
... p_attention_dropout=0.1,
... p_decoder_dropout=0.1,
... postnet_embedding_dim=512,
... postnet_kernel_size=5,
... postnet_n_convolutions=5,
... decoder_no_early_stopping=False
... )
>>> _ = model.eval()
>>> inputs = torch.tensor([
... [13, 12, 31, 14, 19],
... [31, 16, 30, 31, 0],
... ])
>>> input_lengths = torch.tensor([5, 4])
>>> outputs, output_lengths, alignments = model.infer(inputs, input_lengths)
>>> outputs.shape, output_lengths.shape, alignments.shape
(torch.Size([2, 80, 1]), torch.Size([2]), torch.Size([2, 1, 5]))
"""
def __init__(
self,
mask_padding=True,
n_mel_channels=80,
n_symbols=148,
symbols_embedding_dim=512,
encoder_kernel_size=5,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
attention_rnn_dim=1024,
attention_dim=128,
attention_location_n_filters=32,
attention_location_kernel_size=31,
n_frames_per_step=1,
decoder_rnn_dim=1024,
prenet_dim=256,
max_decoder_steps=1000,
gate_threshold=0.5,
p_attention_dropout=0.1,
p_decoder_dropout=0.1,
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
decoder_no_early_stopping=False,
):
super().__init__()
self.mask_padding = mask_padding
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.embedding = nn.Embedding(n_symbols, symbols_embedding_dim)
std = sqrt(2.0 / (n_symbols + symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(
encoder_n_convolutions, encoder_embedding_dim, encoder_kernel_size
)
self.decoder = Decoder(
n_mel_channels,
n_frames_per_step,
encoder_embedding_dim,
attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim,
decoder_rnn_dim,
prenet_dim,
max_decoder_steps,
gate_threshold,
p_attention_dropout,
p_decoder_dropout,
not decoder_no_early_stopping,
)
self.postnet = Postnet(
n_mel_channels,
postnet_embedding_dim,
postnet_kernel_size,
postnet_n_convolutions,
)
def parse_output(self, outputs, output_lengths, alignments_dim=None):
"""
Masks the padded part of output
Arguments
---------
outputs: list
a list of tensors - raw outputs
outputs_lengths: torch.Tensor
a tensor representing the lengths of all outputs
alignments_dim: int
the desired dimension of the alignments along the last axis
Optional but needed for data-parallel training
Returns
-------
result: tuple
a (mel_outputs, mel_outputs_postnet, gate_outputs, alignments) tuple with
the original outputs - with the mask applied
"""
mel_outputs, mel_outputs_postnet, gate_outputs, alignments = outputs
if self.mask_padding and output_lengths is not None:
mask = get_mask_from_lengths(
output_lengths, max_len=mel_outputs.size(-1)
)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
mel_outputs.clone().masked_fill_(mask, 0.0)
mel_outputs_postnet.masked_fill_(mask, 0.0)
gate_outputs.masked_fill_(mask[:, 0, :], 1e3) # gate energies
if alignments_dim is not None:
alignments = F.pad(
alignments, (0, alignments_dim - alignments.size(-1))
)
return mel_outputs, mel_outputs_postnet, gate_outputs, alignments
def forward(self, inputs, alignments_dim=None):
"""Decoder forward pass for training
Arguments
---------
inputs: tuple
batch object
alignments_dim: int
the desired dimension of the alignments along the last axis
Optional but needed for data-parallel training
Returns
---------
mel_outputs: torch.Tensor
mel outputs from the decoder
mel_outputs_postnet: torch.Tensor
mel outputs from postnet
gate_outputs: torch.Tensor
gate outputs from the decoder
alignments: torch.Tensor
sequence of attention weights from the decoder
output_legnths: torch.Tensor
length of the output without padding
"""
inputs, input_lengths, targets, max_len, output_lengths = inputs
input_lengths, output_lengths = input_lengths.data, output_lengths.data
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, input_lengths)
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, targets, memory_lengths=input_lengths
)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths,
alignments_dim,
)
def infer(self, inputs, input_lengths):
"""Produces outputs
Arguments
---------
inputs: torch.tensor
text or phonemes converted
input_lengths: torch.tensor
the lengths of input parameters
Returns
-------
mel_outputs_postnet: torch.Tensor
final mel output of tacotron 2
mel_lengths: torch.Tensor
length of mels
alignments: torch.Tensor
sequence of attention weights
"""
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.infer(embedded_inputs, input_lengths)
mel_outputs, gate_outputs, alignments, mel_lengths = self.decoder.infer(
encoder_outputs, input_lengths
)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
BS = mel_outputs_postnet.size(0)
alignments = alignments.unfold(1, BS, BS).transpose(0, 2)
return mel_outputs_postnet, mel_lengths, alignments
def get_mask_from_lengths(lengths, max_len=None):
"""Creates a mask from a tensor of lengths
Arguments
---------
lengths: torch.Tensor
a tensor of sequence lengths
Returns
-------
mask: torch.Tensor
the mask
max_len: int
The maximum length, i.e. the last dimension of
the mask tensor. If not provided, it will be
calculated automatically
"""
if max_len is None:
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, device=lengths.device, dtype=lengths.dtype)
mask = (ids < lengths.unsqueeze(1)).byte()
mask = torch.le(mask, 0)
return mask
def infer(model, text_sequences, input_lengths):
"""
An inference hook for pretrained synthesizers
Arguments
---------
model: Tacotron2
the tacotron model
text_sequences: torch.Tensor
encoded text sequences
input_lengths: torch.Tensor
input lengths
Returns
-------
result: tuple
(mel_outputs_postnet, mel_lengths, alignments) - the exact
model output
"""
return model.infer(text_sequences, input_lengths)
LossStats = namedtuple(
"TacotronLoss", "loss mel_loss gate_loss attn_loss attn_weight"
)
class Loss(nn.Module):
"""The Tacotron loss implementation
The loss consists of an MSE loss on the spectrogram, a BCE gate loss
and a guided attention loss (if enabled) that attempts to make the
attention matrix diagonal
The output of the moduel is a LossStats tuple, which includes both the
total loss
Arguments
---------
guided_attention_sigma: float
The guided attention sigma factor, controling the "width" of
the mask
gate_loss_weight: float
The constant by which the hate loss will be multiplied
guided_attention_weight: float
The weight for the guided attention
guided_attention_scheduler: callable
The scheduler class for the guided attention loss
guided_attention_hard_stop: int
The number of epochs after which guided attention will be compeltely
turned off
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from speechbrain.lobes.models.Tacotron2 import Loss
>>> loss = Loss(guided_attention_sigma=0.2)
>>> mel_target = torch.randn(2, 80, 861)
>>> gate_target = torch.randn(1722, 1)
>>> mel_out = torch.randn(2, 80, 861)
>>> mel_out_postnet = torch.randn(2, 80, 861)
>>> gate_out = torch.randn(2, 861)
>>> alignments = torch.randn(2, 861, 173)
>>> targets = mel_target, gate_target
>>> model_outputs = mel_out, mel_out_postnet, gate_out, alignments
>>> input_lengths = torch.tensor([173, 91])
>>> target_lengths = torch.tensor([861, 438])
>>> loss(model_outputs, targets, input_lengths, target_lengths, 1)
TacotronLoss(loss=tensor(4.8566), mel_loss=tensor(4.0097), gate_loss=tensor(0.8460), attn_loss=tensor(0.0010), attn_weight=tensor(1.))
"""
def __init__(
self,
guided_attention_sigma=None,
gate_loss_weight=1.0,
guided_attention_weight=1.0,
guided_attention_scheduler=None,
guided_attention_hard_stop=None,
):
super().__init__()
if guided_attention_weight == 0:
guided_attention_weight = None
self.guided_attention_weight = guided_attention_weight
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCEWithLogitsLoss()
self.guided_attention_loss = GuidedAttentionLoss(
sigma=guided_attention_sigma
)
self.gate_loss_weight = gate_loss_weight
self.guided_attention_weight = guided_attention_weight
self.guided_attention_scheduler = guided_attention_scheduler
self.guided_attention_hard_stop = guided_attention_hard_stop
def forward(
self, model_output, targets, input_lengths, target_lengths, epoch
):
"""Computes the loss
Arguments
---------
model_output: tuple
the output of the model's forward():
(mel_outputs, mel_outputs_postnet, gate_outputs, alignments)
targets: tuple
the targets
input_lengths: torch.Tensor
a (batch, length) tensor of input lengths
target_lengths: torch.Tensor
a (batch, length) tensor of target (spectrogram) lengths
epoch: int
the current epoch number (used for the scheduling of the guided attention
loss) A StepScheduler is typically used
Returns
-------
result: LossStats
the total loss - and individual losses (mel and gate)
"""
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, alignments = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = self.mse_loss(mel_out, mel_target) + self.mse_loss(
mel_out_postnet, mel_target
)
gate_loss = self.gate_loss_weight * self.bce_loss(gate_out, gate_target)
attn_loss, attn_weight = self.get_attention_loss(
alignments, input_lengths, target_lengths, epoch
)
total_loss = mel_loss + gate_loss + attn_loss
return LossStats(
total_loss, mel_loss, gate_loss, attn_loss, attn_weight
)
def get_attention_loss(
self, alignments, input_lengths, target_lengths, epoch
):
"""Computes the attention loss
Arguments
---------
alignments: torch.Tensor
the aligment matrix from the model
input_lengths: torch.Tensor
a (batch, length) tensor of input lengths
target_lengths: torch.Tensor
a (batch, length) tensor of target (spectrogram) lengths
epoch: int
the current epoch number (used for the scheduling of the guided attention
loss) A StepScheduler is typically used
Returns
-------
attn_loss: torch.Tensor
the attention loss value
"""
zero_tensor = torch.tensor(0.0, device=alignments.device)
if (
self.guided_attention_weight is None
or self.guided_attention_weight == 0
):
attn_weight, attn_loss = zero_tensor, zero_tensor
else:
hard_stop_reached = (
self.guided_attention_hard_stop is not None
and epoch > self.guided_attention_hard_stop
)
if hard_stop_reached:
attn_weight, attn_loss = zero_tensor, zero_tensor
else:
attn_weight = self.guided_attention_weight
if self.guided_attention_scheduler is not None:
_, attn_weight = self.guided_attention_scheduler(epoch)
attn_weight = torch.tensor(attn_weight, device=alignments.device)
attn_loss = attn_weight * self.guided_attention_loss(
alignments, input_lengths, target_lengths
)
return attn_loss, attn_weight
class TextMelCollate:
""" Zero-pads model inputs and targets based on number of frames per step
Arguments
---------
n_frames_per_step: int
the number of output frames per step
Returns
-------
result: tuple
a tuple of tensors to be used as inputs/targets
(
text_padded,
input_lengths,
mel_padded,
gate_padded,
output_lengths,
len_x
)
"""
def __init__(self, n_frames_per_step=1):
self.n_frames_per_step = n_frames_per_step
# TODO: Make this more intuitive, use the pipeline
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
Arguments
---------
batch: list
[text_normalized, mel_normalized]
"""
# TODO: Remove for loops and this dirty hack
raw_batch = list(batch)
for i in range(
len(batch)
): # the pipline return a dictionary wiht one elemnent
batch[i] = batch[i]["mel_text_pair"]
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]), dim=0, descending=True
)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, : text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += (
self.n_frames_per_step - max_target_len % self.n_frames_per_step
)
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
labels, wavs = [], []
for i in range(len(ids_sorted_decreasing)):
idx = ids_sorted_decreasing[i]
mel = batch[idx][1]
mel_padded[i, :, : mel.size(1)] = mel
gate_padded[i, mel.size(1) - 1 :] = 1
output_lengths[i] = mel.size(1)
labels.append(raw_batch[idx]["label"])
wavs.append(raw_batch[idx]["wav"])
# count number of items - characters in text
len_x = [x[2] for x in batch]
len_x = torch.Tensor(len_x)
return (
text_padded,
input_lengths,
mel_padded,
gate_padded,
output_lengths,
len_x,
labels,
wavs,
)
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""Dynamic range compression for audio signals
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def mel_spectogram(
sample_rate,
hop_length,
win_length,
n_fft,
n_mels,
f_min,
f_max,
power,
normalized,
norm,
mel_scale,
compression,
audio,
):
"""calculates MelSpectrogram for a raw audio signal
Arguments
---------
sample_rate : int
Sample rate of audio signal.
hop_length : int
Length of hop between STFT windows.
win_length : int
Window size.
n_fft : int
Size of FFT.
n_mels : int
Number of mel filterbanks.
f_min : float
Minimum frequency.
f_max : float
Maximum frequency.
power : float
Exponent for the magnitude spectrogram.
normalized : bool
Whether to normalize by magnitude after stft.
norm : str or None
If "slaney", divide the triangular mel weights by the width of the mel band
mel_scale : str
Scale to use: "htk" or "slaney".
compression : bool
whether to do dynamic range compression
audio : torch.tensor
input audio signal
"""
from torchaudio import transforms
audio_to_mel = transforms.MelSpectrogram(
sample_rate=sample_rate,
hop_length=hop_length,
win_length=win_length,
n_fft=n_fft,
n_mels=n_mels,
f_min=f_min,
f_max=f_max,
power=power,
normalized=normalized,
norm=norm,
mel_scale=mel_scale,
).to(audio.device)
mel = audio_to_mel(audio)
if compression:
mel = dynamic_range_compression(mel)
return mel
| 59,832 | 30.441408 | 138 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/segan_model.py | """
This file contains two PyTorch modules which together consist of the SEGAN model architecture
(based on the paper: Pascual et al. https://arxiv.org/pdf/1703.09452.pdf).
Modification of the initialization parameters allows the change of the model described in the class project,
such as turning the generator to a VAE, or removing the latent variable concatenation.
Loss functions for training SEGAN are also defined in this file.
Authors
* Francis Carter 2021
"""
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from math import floor
class Generator(torch.nn.Module):
"""CNN Autoencoder model to clean speech signals.
Arguments
---------
kernel_size : int
Size of the convolutional kernel.
latent_vae : bool
Whether or not to convert the autoencoder to a vae
z_prob : bool
Whether to remove the latent variable concatenation. Is only applicable if latent_vae is False
"""
def __init__(self, kernel_size, latent_vae, z_prob):
super().__init__()
self.EncodeLayers = torch.nn.ModuleList()
self.DecodeLayers = torch.nn.ModuleList()
self.kernel_size = 5
self.latent_vae = latent_vae
self.z_prob = z_prob
EncoderChannels = [1, 16, 32, 32, 64, 64, 128, 128, 256, 256, 512, 1024]
DecoderChannels = [
2048,
1024,
512,
512,
256,
256,
128,
128,
64,
64,
32,
1,
]
# Create encoder and decoder layers.
for i in range(len(EncoderChannels) - 1):
if i == len(EncoderChannels) - 2 and self.latent_vae:
outs = EncoderChannels[i + 1] * 2
else:
outs = EncoderChannels[i + 1]
self.EncodeLayers.append(
nn.Conv1d(
in_channels=EncoderChannels[i],
out_channels=outs,
kernel_size=kernel_size,
stride=2,
padding=floor(kernel_size / 2), # same
)
)
for i in range(len(DecoderChannels) - 1):
if i == 0 and self.latent_vae:
ins = EncoderChannels[-1 * (i + 1)]
else:
ins = EncoderChannels[-1 * (i + 1)] * 2
self.DecodeLayers.append(
nn.ConvTranspose1d(
in_channels=ins,
out_channels=EncoderChannels[-1 * (i + 2)],
kernel_size=kernel_size
+ 1, # adding one to kernel size makes the dimensions match
stride=2,
padding=floor(kernel_size / 2), # same
)
)
def forward(self, x):
"""Forward pass through autoencoder"""
# encode
skips = []
x = x.permute(0, 2, 1)
for i, layer in enumerate(self.EncodeLayers):
x = layer(x)
skips.append(x.clone())
if i == len(self.DecodeLayers) - 1:
continue
else:
x = F.leaky_relu(x, negative_slope=0.3)
# fuse z
if self.latent_vae:
z_mean, z_logvar = x.chunk(2, dim=1)
x = z_mean + torch.exp(z_logvar / 2.0) * torch.randn_like(
z_logvar, device=x.device
) # sampling from latent var probability distribution
elif self.z_prob:
z = torch.normal(torch.zeros_like(x), torch.ones_like(x))
x = torch.cat((x, z), 1)
else:
z = torch.zeros_like(x)
x = torch.cat((x, z), 1)
# decode
for i, layer in enumerate(self.DecodeLayers):
x = layer(x)
if i == len(self.DecodeLayers) - 1:
continue
else:
x = torch.cat((x, skips[-1 * (i + 2)]), 1)
x = F.leaky_relu(x, negative_slope=0.3)
x = x.permute(0, 2, 1)
if self.latent_vae:
return x, z_mean, z_logvar
else:
return x
class Discriminator(torch.nn.Module):
"""CNN discriminator of SEGAN
Arguments
---------
kernel_size : int
Size of the convolutional kernel.
"""
def __init__(self, kernel_size):
super().__init__()
self.Layers = torch.nn.ModuleList()
self.Norms = torch.nn.ModuleList()
Channels = [2, 16, 32, 32, 64, 64, 128, 128, 256, 256, 512, 1024, 1]
# Create encoder and decoder layers.
for i in range(len(Channels) - 1):
if i != len(Channels) - 2:
self.Layers.append(
nn.Conv1d(
in_channels=Channels[i],
out_channels=Channels[i + 1],
kernel_size=kernel_size,
stride=2,
padding=floor(kernel_size / 2), # same
)
)
self.Norms.append(
nn.BatchNorm1d(
num_features=Channels[
i + 1
] # not sure what the last dim should be here
)
)
# output convolution
else:
self.Layers.append(
nn.Conv1d(
in_channels=Channels[i],
out_channels=Channels[i + 1],
kernel_size=1,
stride=1,
padding=0, # same
)
)
self.Layers.append(
nn.Linear(in_features=8, out_features=1,) # Channels[i+1],
)
def forward(self, x):
"""forward pass through the discriminator"""
x = x.permute(0, 2, 1)
# encode
for i in range(len(self.Norms)):
x = self.Layers[i](x)
x = self.Norms[i](x)
x = F.leaky_relu(x, negative_slope=0.3)
# output
x = self.Layers[-2](x)
x = self.Layers[-1](x)
# x = F.sigmoid(x)
x = x.permute(0, 2, 1)
return x # in logit format
def d1_loss(d_outputs, reduction="mean"):
"""Calculates the loss of the discriminator when the inputs are clean """
output = 0.5 * ((d_outputs - 1) ** 2)
if reduction == "mean":
return output.mean()
elif reduction == "batch":
return output.view(output.size(0), -1).mean(1)
def d2_loss(d_outputs, reduction="mean"):
"""Calculates the loss of the discriminator when the inputs are not clean """
output = 0.5 * ((d_outputs) ** 2)
if reduction == "mean":
return output.mean()
elif reduction == "batch":
return output.view(output.size(0), -1).mean(1)
def g3_loss(
d_outputs,
predictions,
targets,
length,
l1LossCoeff,
klLossCoeff,
z_mean=None,
z_logvar=None,
reduction="mean",
):
"""Calculates the loss of the generator given the discriminator outputs """
discrimloss = 0.5 * ((d_outputs - 1) ** 2)
l1norm = torch.nn.functional.l1_loss(predictions, targets, reduction="none")
if not (
z_mean is None
): # This will determine if model is being trained as a vae
ZERO = torch.zeros_like(z_mean)
distq = torch.distributions.normal.Normal(
z_mean, torch.exp(z_logvar) ** (1 / 2)
)
distp = torch.distributions.normal.Normal(
ZERO, torch.exp(ZERO) ** (1 / 2)
)
kl = torch.distributions.kl.kl_divergence(distq, distp)
kl = kl.sum(axis=1).sum(axis=1).mean()
else:
kl = 0
if reduction == "mean":
return (
discrimloss.mean() + l1LossCoeff * l1norm.mean() + klLossCoeff * kl
)
elif reduction == "batch":
dloss = discrimloss.view(discrimloss.size(0), -1).mean(1)
lloss = l1norm.view(l1norm.size(0), -1).mean(1)
return dloss + l1LossCoeff * lloss + klLossCoeff * kl
| 8,123 | 31.496 | 108 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/L2I.py | """This file implements the necessary classes and functions to implement Listen-to-Interpret (L2I) interpretation method from https://arxiv.org/abs/2202.11479v2
Authors
* Cem Subakan 2022
* Francesco Paissan 2022
"""
import torch.nn as nn
import torch.nn.functional as F
import torch
from speechbrain.lobes.models.PIQ import ResBlockAudio
class Psi(nn.Module):
"""Convolutional Layers to estimate NMF Activations from Classifier Representations
Arguments
---------
n_comp : int
Number of NMF components (or equivalently number of neurons at the output per timestep)
T: int
The targeted length along the time dimension
in_emb_dims: List with int elements
A list with length 3 that contains the dimensionality of the input dimensions
The list needs to match the number of channels in the input classifier representations
The last entry should be the smallest entry
Example
-------
>>> inp = [torch.ones(2, 150, 6, 2), torch.ones(2, 100, 6, 2), torch.ones(2, 50, 12, 5)]
>>> psi = Psi(n_comp=100, T=120, in_emb_dims=[150, 100, 50])
>>> h = psi(inp)
>>> print(h.shape)
torch.Size([2, 100, 120])
"""
def __init__(self, n_comp=100, T=431, in_emb_dims=[2048, 1024, 512]):
"""
Computes NMF activations given classifier hidden representations
"""
super(Psi, self).__init__()
self.in_emb_dims = in_emb_dims
self.upsamp = nn.UpsamplingBilinear2d(scale_factor=(2, 2))
self.upsamp_time = nn.UpsamplingBilinear2d(size=(T, 1))
out_c = min(in_emb_dims)
self.c1 = nn.Conv2d(
in_emb_dims[0], out_c, kernel_size=3, padding="same"
)
self.c2 = nn.Conv2d(
in_emb_dims[1], out_c, kernel_size=3, padding="same"
)
self.out_conv = nn.Conv2d(out_c, n_comp, kernel_size=3, padding="same")
self.conv = nn.Sequential(
nn.Conv2d(out_c * 3, out_c, kernel_size=3, padding="same"),
nn.BatchNorm2d(out_c),
nn.ReLU(),
)
self.act = nn.ReLU()
def forward(self, inp):
"""This forward function returns the NMF time activations given classifier activations
Arguments
---------
inp: A length 3 list of classifier input representions.
"""
error = "in PSI doesn't match. The embedding dimensions need to be consistent with the list self.in_emb_dims"
for i, in_emb_dim in enumerate(self.in_emb_dims):
# sanity check on shapes
assert inp[i].shape[1] == self.in_emb_dims[i], (
"Nr. of channels " + error
)
assert inp[0].shape[2] == inp[1].shape[2], "Spatial dimension " + error
assert inp[0].shape[3] == inp[1].shape[3], "Spatial dimension " + error
assert 2 * inp[0].shape[3] == (inp[2].shape[3] - 1), (
"Spatial dimension "
+ error
+ f" 1st (idx 0) element has shape {inp[0].shape[3]} second element (idx 1) has shape {inp[2].shape[3]}"
)
x1, x2, x3 = inp
# upsample inp[0] and inp[1] time and frequency axis once
x1 = self.upsamp(x1)
x2 = self.upsamp(x2)
# compress feature number to the min among given hidden representations
x1 = self.act(self.c1(x1))
x2 = self.act(self.c2(x2))
# for compatibility with cnn14 fixed frequency dimension
x1 = F.pad(x1, (0, 1, 0, 0))
x2 = F.pad(x2, (0, 1, 0, 0))
x = torch.cat((x1, x2, x3), axis=1)
# upsample time axis and collapse freq
x = self.upsamp_time(x)
# mix contribution for the three hidden layers -- work on this when fixing training
x = self.conv(x)
x = self.act(self.out_conv(x)).squeeze(3)
return x
class NMFDecoderAudio(nn.Module):
"""This class implements an NMF decoder
Arguments
---------
n_comp : int
Number of NMF components
n_freq : int
The number of frequency bins in the NMF dictionary
device : str
The device to run the model
Example:
--------
>>> NMF_dec = NMFDecoderAudio(20, 210, device='cpu')
>>> H = torch.rand(1, 20, 150)
>>> Xhat = NMF_dec.forward(H)
>>> print(Xhat.shape)
torch.Size([1, 210, 150])
"""
def __init__(self, n_comp=100, n_freq=513, device="cuda"):
super(NMFDecoderAudio, self).__init__()
self.W = nn.Parameter(
0.1 * torch.rand(n_freq, n_comp), requires_grad=True
)
self.activ = nn.ReLU()
def forward(self, H):
"""The forward pass for NMF given the activations H
Arguments:
---------
H : torch.Tensor
The activations Tensor with shape B x n_comp x T
where B = Batchsize
n_comp = number of NMF components
T = number of timepoints
"""
# Assume input of shape n_batch x n_comp x T
H = self.activ(H)
temp = self.activ(self.W).unsqueeze(0)
output = torch.einsum("bij, bjk -> bik", temp, H)
return output
def return_W(self):
"""This function returns the NMF dictionary"""
W = self.W
return self.activ(W)
def weights_init(m):
"""
Applies Xavier initialization to network weights.
Arguments
---------
m : nn.Module
Module to initialize.
"""
classname = m.__class__.__name__
if classname.find("Conv") != -1:
try:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
except AttributeError:
print("Skipping initialization of ", classname)
class PsiOptimized(nn.Module):
"""Convolutional Layers to estimate NMF Activations from Classifier Representations, optimized for log-spectra.
Arguments
---------
dim: int
Dimension of the hidden representations (input to the classifier).
K : int
Number of NMF components (or equivalently number of neurons at the output per timestep)
num_classes : int
Number of possible classes.
use_adapter : bool
`True` if you wish to learn an adapter for the latent representations.
adapter_reduce_dim: bool
`True` if the adapter should compress the latent representations.
Example
-------
>>> inp = torch.randn(1, 256, 26, 32)
>>> psi = PsiOptimized(dim=256, K=100, use_adapter=False, adapter_reduce_dim=False)
>>> h, inp_ad= psi(inp)
>>> print(h.shape, inp_ad.shape)
torch.Size([1, 1, 417, 100]) torch.Size([1, 256, 26, 32])
"""
def __init__(
self,
dim=128,
K=100,
numclasses=50,
use_adapter=False,
adapter_reduce_dim=True,
):
"""
Computes NMF activations from hidden state.
"""
super().__init__()
self.use_adapter = use_adapter
self.adapter_reduce_dim = adapter_reduce_dim
if use_adapter:
self.adapter = ResBlockAudio(dim)
if adapter_reduce_dim:
self.down = nn.Conv2d(dim, dim, 4, (2, 2), 1)
self.up = nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(dim, dim, 3, (2, 2), 1),
nn.ReLU(True),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1),
nn.ReLU(),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1),
nn.ReLU(),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1),
nn.ReLU(),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, 1, 12, 1, 1),
nn.ReLU(),
nn.Linear(513, K),
nn.ReLU(),
)
self.apply(weights_init)
def forward(self, hs):
"""
Computes forward step.
Arguments
-------
hs : torch.Tensor
Latent representations (input to the classifier). Expected shape `torch.Size([B, C, H, W])`.
Returns
-------
NMF activations and adapted representations. Shape `torch.Size([B, 1, T, 100])`. : torch.Tensor
"""
if self.use_adapter:
hcat = self.adapter(hs)
else:
hcat = hs
if self.adapter_reduce_dim:
hcat = self.down(hcat)
z_q_x_st = self.up(hcat)
out = self.decoder(z_q_x_st)
else:
out = self.decoder(hcat)
return out, hcat
class Theta(nn.Module):
"""This class implements a linear classifier on top of NMF activations
Arguments
---------
n_comp : int
Number of NMF components
T : int
Number of Timepoints in the NMF activations
num_classes : int
Number of classes that the classifier works with
Example:
--------
>>> theta = Theta(30, 120, 50)
>>> H = torch.rand(1, 30, 120)
>>> c_hat = theta.forward(H)
>>> print(c_hat.shape)
torch.Size([1, 50])
"""
def __init__(self, n_comp=100, T=431, num_classes=50):
super().__init__()
# This linear layer collapses the time axis using "attention" based pooling
self.hard_att = nn.Linear(T, 1, bias=False)
# The Linear layer for classification
self.classifier = nn.Sequential(
nn.Linear(n_comp, num_classes, bias=False), nn.Softmax(dim=1)
)
def forward(self, H):
"""We first collapse the time axis, and then pass through the linear layer
Arguments:
---------
H : torch.Tensor
The activations Tensor with shape B x n_comp x T
where B = Batchsize
n_comp = number of NMF components
T = number of timepoints
"""
theta_out = self.hard_att(H).squeeze(2)
theta_out = self.classifier(theta_out)
return theta_out
class NMFEncoder(nn.Module):
"""This class implements an NMF encoder with a convolutional network
Arguments
---------
n_freq : int
The number of frequency bins in the NMF dictionary
n_comp : int
Number of NMF components
Example:
--------
>>> nmfencoder = NMFEncoder(513, 100)
>>> X = torch.rand(1, 513, 240)
>>> Hhat = nmfencoder(X)
>>> print(Hhat.shape)
torch.Size([1, 100, 240])
"""
def __init__(self, n_freq, n_comp):
super().__init__()
self.convenc = nn.Sequential(
nn.Conv1d(n_freq, 256, kernel_size=8, padding="same"),
nn.ReLU(),
nn.Conv1d(256, 128, kernel_size=8, padding="same"),
nn.ReLU(),
nn.Conv1d(128, n_comp, kernel_size=8, padding="same"),
nn.ReLU(),
)
def forward(self, X):
"""
Arguments:
---------
X : torch.Tensor
The input spectrogram Tensor with shape B x n_freq x T
where B = Batchsize
n_freq = nfft for the input spectrogram
T = number of timepoints
"""
return self.convenc(X)
| 11,147 | 29.376022 | 160 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/fairseq_wav2vec.py | """This lobe enables the integration of fairseq pretrained wav2vec models.
Reference: https://arxiv.org/abs/2006.11477
Reference: https://arxiv.org/abs/1904.05862
FairSeq >= 1.0.0 needs to be installed: https://fairseq.readthedocs.io/en/latest/
Authors
* Titouan Parcollet 2021
* Salima Mdhaffar 2021
"""
import torch
import logging
import torch.nn.functional as F
from torch import nn
from speechbrain.utils.data_utils import download_file
from speechbrain.dataio.dataio import length_to_mask
# We check if fairseq is installed.
try:
import fairseq
except ImportError:
MSG = "Please install Fairseq to use pretrained wav2vec\n"
MSG += "E.G. run: pip install fairseq"
raise ImportError(MSG)
logger = logging.getLogger(__name__)
class FairseqWav2Vec2(nn.Module):
"""This lobe enables the integration of fairseq pretrained wav2vec2.0 models.
Source paper: https://arxiv.org/abs/2006.11477
FairSeq >= 0.10.0 needs to be installed:
https://fairseq.readthedocs.io/en/latest/
The model can be used as a fixed features extractor or can be finetuned. It
will download automatically the model if a url is given (e.g FairSeq
repository from GitHub).
Arguments
---------
pretrained_path : str
Path of the pretrained wav2vec2 model. It can be a url or a local path.
save_path : str
Path and filename of the downloaded model.
input_norm : bool (default: None)
If True, a layer_norm (affine) will be applied to the input waveform.
By default, it is extracted from the checkpoint of the downloaded model
in order to match the pretraining conditions. However, if this information
is not given in the checkpoint, it has to be given manually.
output_norm : bool (default: True)
If True, a layer_norm (affine) will be applied to the output obtained
from the wav2vec model.
freeze : bool (default: True)
If True, the model is frozen. If False, the model will be trained
alongside with the rest of the pipeline.
pretrain : bool (default: True)
If True, the model is pretrained with the specified source.
If False, the randomly-initialized model is instantiated.
dropout : float (default: None)
If different from None (0.0 to 1.0), it will override the given fairseq
dropout rates. This is useful if the wav2vec2 model has been trained
without dropout and one wants to reactivate it for downstream task
fine-tuning (better performance observed).
layer_drop : float (default: None)
If different from None (0.0 to 1.0), it will override the given fairseq
layer_drop rate. This is useful if the wav2vec2 model has been trained
without layer_drop and one wants to reactivate it for downstream task
fine-tuning.
Example
-------
>>> inputs = torch.rand([10, 600])
>>> model_url = "https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt"
>>> save_path = "models_checkpoints/wav2vec2.pt"
>>> model = FairseqWav2Vec2(model_url, save_path)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 100, 768])
"""
def __init__(
self,
pretrained_path,
save_path,
input_norm=None,
output_norm=False,
freeze=False,
freeze_feature_extractor=False,
pretrain=True,
dropout=None,
layer_drop=None,
):
super().__init__()
# Download the pretrained wav2vec2 model. It can be local or online.
download_file(pretrained_path, save_path)
# During pretraining dropout might be set to 0. However, we might want
# to apply dropout when fine-tuning on a downstream task. Hence we need
# to modify the fairseq cfg to activate dropout (if requested).
overrides = {}
if not freeze and dropout is not None:
overrides["model"] = {}
if dropout is not None:
overrides["model"]["dropout"] = dropout
overrides["model"]["dropout_input"] = dropout
overrides["model"]["attention_dropout"] = dropout
if layer_drop is not None:
overrides["model"]["layer_drop"] = layer_drop
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[save_path], arg_overrides=overrides
)
# wav2vec pretrained models may need the input waveform to be normalized
# Hence, we check if the model has be trained with or without it.
# If the information isn't contained in the checkpoint IT HAS TO BE GIVEN
# BY THE USER.
if input_norm is None:
if hasattr(cfg["task"], "normalize"):
self.normalize = cfg["task"].normalize
elif hasattr(cfg, "normalize"):
self.normalize = cfg.normalize
else:
self.normalize = False
else:
self.normalize = input_norm
model = model[0]
self.model = model
self.freeze = freeze
self.output_norm = output_norm
self.freeze_feature_extractor = freeze_feature_extractor
if self.freeze:
logger.warning(
"speechbrain.lobes.models.fairseq_wav2vec - wav2vec 2.0 is frozen."
)
self.model.eval()
# Freeze parameters
for param in self.model.parameters():
param.requires_grad = False
else:
self.model.train()
if self.freeze_feature_extractor:
logger.warning(
"speechbrain.lobes.models.fairseq_wav2vec - wav2vec 2.0 feature extractor is frozen."
)
self.model.feature_extractor.eval()
for param in self.model.feature_extractor.parameters():
param.requires_grad = False
# Randomly initialized layers if pretrain is False
if not (pretrain):
self.reset_layer(self.model)
# Following the fairseq implementation of downstream training,
# we remove some modules that are unnecessary.
self.remove_pretraining_modules()
def forward(self, wav, wav_lens):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
padding_mask = self.make_masks(wav, wav_len=wav_lens)
# If we freeze, we simply remove all grads and features from the graph.
if self.freeze:
with torch.no_grad():
return self.extract_features(wav, padding_mask)
return self.extract_features(wav, padding_mask)
def extract_features(self, wav, padding_mask=None):
"""Extracts the wav2vect embeddings"""
# We normalize the input signal if needed.
if self.normalize:
wav = F.layer_norm(wav, wav.shape[1:])
# Extract wav2vec output
out = self.model.extract_features(
wav, padding_mask=padding_mask, mask=False
)["x"]
# We normalize the output if required
if self.output_norm:
out = F.layer_norm(out, out.shape[1:])
return out
def reset_layer(self, model):
"""Reinitializes the parameters of the network"""
if hasattr(model, "reset_parameters"):
model.reset_parameters()
for child_layer in model.children():
if model != child_layer:
self.reset_layer(child_layer)
def remove_pretraining_modules(self):
""" Remove uneeded modules. Inspired by the same fairseq function."""
self.model.quantizer = None
self.model.project_q = None
self.model.target_glu = None
self.model.final_proj = None
def make_masks(self, src, wav_len=None, pad_idx=0):
"""This method generates the padding masks.
Arguments
---------
src : tensor
The sequence to the encoder (required).
wav_len : tensor
The relative length of the wav given in SpeechBrain format.
pad_idx : int
The index for <pad> token (default=0).
"""
src_key_padding_mask = None
if wav_len is not None:
abs_len = torch.round(wav_len * src.shape[1])
src_key_padding_mask = ~length_to_mask(abs_len).bool()
return src_key_padding_mask
class FairseqWav2Vec1(nn.Module):
"""This lobes enables the integration of fairseq pretrained wav2vec1.0 models.
Arguments
---------
pretrained_path : str
Path of the pretrained wav2vec1 model. It can be a url or a local path.
save_path : str
Path and filename of the downloaded model.
output_norm : bool (default: True)
If True, a layer_norm (affine) will be applied to the output obtained
from the wav2vec model.
freeze : bool (default: True)
If True, the model is frozen. If False, the model will be trained
alongside with the rest of the pipeline.
pretrain : bool (default: True)
If True, the model is pretrained with the specified source.
If False, the randomly-initialized model is instantiated.
Example
-------
>>> inputs = torch.rand([10, 600])
>>> model_url = ""
>>> save_path = "models_checkpoints/wav2vec.pt"
>>> model = FairseqWav2Vec1(model_url, save_path)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 100, 512])
"""
def __init__(
self,
pretrained_path,
save_path,
output_norm=True,
freeze=True,
pretrain=True,
):
super().__init__()
self.freeze = freeze
self.output_norm = output_norm
# Download the pretrained wav2vec1 model. It can be local or online.
download_file(pretrained_path, save_path)
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[pretrained_path]
)
self.model = model
self.model = self.model[0]
if self.freeze:
self.model.eval()
# Randomly initialized layers if pretrain is False
if not (pretrain):
self.reset_layer(self.model)
def forward(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
# If we freeze, we simply remove all grads and features from the graph.
if self.freeze:
with torch.no_grad():
return self.extract_features(wav).detach()
return self.extract_features(wav)
def extract_features(self, wav):
"""Extracts the wav2vect embeddings"""
out = self.model.feature_extractor(wav)
out = self.model.feature_aggregator(out).squeeze(0)
out = out.transpose(2, 1)
# We normalize the output if required
if self.output_norm:
out = F.layer_norm(out, out.shape)
return out
def reset_layer(self, model):
"""Reinitializes the parameters of the network"""
if hasattr(model, "reset_parameters"):
model.reset_parameters()
for child_layer in model.children():
if model != child_layer:
self.reset_layer(child_layer)
| 11,652 | 33.785075 | 105 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/convolution.py | """This is a module to ensemble a convolution (depthwise) encoder with or without residule connection.
Authors
* Jianyuan Zhong 2020
"""
import torch
from speechbrain.nnet.CNN import Conv2d
from speechbrain.nnet.containers import Sequential
from speechbrain.nnet.normalization import LayerNorm
class ConvolutionFrontEnd(Sequential):
"""This is a module to ensemble a convolution (depthwise) encoder with or
without residual connection.
Arguments
----------
out_channels: int
Number of output channels of this model (default 640).
out_channels: Optional(list[int])
Number of output channels for each of block.
kernel_size: int
Kernel size of convolution layers (default 3).
strides: Optional(list[int])
Striding factor for each block, this stride is applied at the last convolution layer at each block.
num_blocks: int
Number of block (default 21).
num_per_layers: int
Number of convolution layers for each block (default 5).
dropout: float
Dropout (default 0.15).
activation: torch class
Activation function for each block (default Swish).
norm: torch class
Normalization to regularize the model (default BatchNorm1d).
residuals: Optional(list[bool])
Whether apply residual connection at each block (default None).
Example
-------
>>> x = torch.rand((8, 30, 10))
>>> conv = ConvolutionFrontEnd(input_shape=x.shape)
>>> out = conv(x)
>>> out.shape
torch.Size([8, 8, 3, 512])
"""
def __init__(
self,
input_shape,
num_blocks=3,
num_layers_per_block=5,
out_channels=[128, 256, 512],
kernel_sizes=[3, 3, 3],
strides=[1, 2, 2],
dilations=[1, 1, 1],
residuals=[True, True, True],
conv_module=Conv2d,
activation=torch.nn.LeakyReLU,
norm=LayerNorm,
dropout=0.1,
conv_bias=True,
padding="same",
conv_init=None,
):
super().__init__(input_shape=input_shape)
for i in range(num_blocks):
self.append(
ConvBlock,
num_layers=num_layers_per_block,
out_channels=out_channels[i],
kernel_size=kernel_sizes[i],
stride=strides[i],
dilation=dilations[i],
residual=residuals[i],
conv_module=conv_module,
activation=activation,
norm=norm,
dropout=dropout,
layer_name=f"convblock_{i}",
conv_bias=conv_bias,
padding=padding,
conv_init=conv_init,
)
class ConvBlock(torch.nn.Module):
"""An implementation of convolution block with 1d or 2d convolutions (depthwise).
Arguments
----------
out_channels : int
Number of output channels of this model (default 640).
kernel_size : int
Kernel size of convolution layers (default 3).
strides : int
Striding factor for this block (default 1).
num_layers : int
Number of depthwise convolution layers for this block.
activation : torch class
Activation function for this block.
norm : torch class
Normalization to regularize the model (default BatchNorm1d).
residuals: bool
Whether apply residual connection at this block (default None).
Example
-------
>>> x = torch.rand((8, 30, 10))
>>> conv = ConvBlock(2, 16, input_shape=x.shape)
>>> out = conv(x)
>>> x.shape
torch.Size([8, 30, 10])
"""
def __init__(
self,
num_layers,
out_channels,
input_shape,
kernel_size=3,
stride=1,
dilation=1,
residual=False,
conv_module=Conv2d,
activation=torch.nn.LeakyReLU,
norm=None,
dropout=0.1,
conv_bias=True,
padding="same",
conv_init=None,
):
super().__init__()
self.convs = Sequential(input_shape=input_shape)
for i in range(num_layers):
self.convs.append(
conv_module,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride if i == num_layers - 1 else 1,
dilation=dilation,
layer_name=f"conv_{i}",
bias=conv_bias,
padding=padding,
conv_init=conv_init,
)
if norm is not None:
self.convs.append(norm, layer_name=f"norm_{i}")
self.convs.append(activation(), layer_name=f"act_{i}")
self.convs.append(
torch.nn.Dropout(dropout), layer_name=f"dropout_{i}"
)
self.reduce_conv = None
self.drop = None
if residual:
self.reduce_conv = Sequential(input_shape=input_shape)
self.reduce_conv.append(
conv_module,
out_channels=out_channels,
kernel_size=1,
stride=stride,
layer_name="conv",
)
self.reduce_conv.append(norm, layer_name="norm")
self.drop = torch.nn.Dropout(dropout)
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
out = self.convs(x)
if self.reduce_conv:
out = out + self.reduce_conv(x)
out = self.drop(out)
return out
| 5,520 | 30.369318 | 107 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/ESPnetVGG.py | """This lobes replicate the encoder first introduced in ESPNET v1
source: https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/rnn/encoders.py
Authors
* Titouan Parcollet 2020
"""
import torch
import speechbrain as sb
class ESPnetVGG(sb.nnet.containers.Sequential):
"""This model is a combination of CNNs and RNNs following
the ESPnet encoder. (VGG+RNN+MLP+tanh())
Arguments
---------
input_shape : tuple
The shape of an example expected input.
activation : torch class
A class used for constructing the activation layers. For CNN and DNN.
dropout : float
Neuron dropout rate, applied to RNN only.
cnn_channels : list of ints
A list of the number of output channels for each CNN block.
rnn_class : torch class
The type of RNN to use (LiGRU, LSTM, GRU, RNN)
rnn_layers : int
The number of recurrent layers to include.
rnn_neurons : int
Number of neurons in each layer of the RNN.
rnn_bidirectional : bool
Whether this model will process just forward or both directions.
projection_neurons : int
The number of neurons in the last linear layer.
Example
-------
>>> inputs = torch.rand([10, 40, 60])
>>> model = ESPnetVGG(input_shape=inputs.shape)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 10, 512])
"""
def __init__(
self,
input_shape,
activation=torch.nn.ReLU,
dropout=0.15,
cnn_channels=[64, 128],
rnn_class=sb.nnet.RNN.LSTM,
rnn_layers=4,
rnn_neurons=512,
rnn_bidirectional=True,
rnn_re_init=False,
projection_neurons=512,
):
super().__init__(input_shape=input_shape)
self.append(sb.nnet.containers.Sequential, layer_name="VGG")
self.append(
sb.nnet.CNN.Conv2d,
out_channels=cnn_channels[0],
kernel_size=(3, 3),
layer_name="conv_1_1",
)
self.append(activation(), layer_name="act_1_1")
self.append(
sb.nnet.CNN.Conv2d,
out_channels=cnn_channels[0],
kernel_size=(3, 3),
layer_name="conv_1_2",
)
self.append(activation(), layer_name="act_1_2")
self.append(
sb.nnet.pooling.Pooling2d(
pool_type="max", kernel_size=(2, 2), pool_axis=(1, 2),
),
layer_name="pooling_1",
)
self.append(
sb.nnet.CNN.Conv2d,
out_channels=cnn_channels[1],
kernel_size=(3, 3),
layer_name="conv_2_1",
)
self.append(activation(), layer_name="act_2_1")
self.append(
sb.nnet.CNN.Conv2d,
out_channels=cnn_channels[1],
kernel_size=(3, 3),
layer_name="conv_2_2",
)
self.append(activation(), layer_name="act_2_2")
self.append(
sb.nnet.pooling.Pooling2d(
pool_type="max", kernel_size=(2, 2), pool_axis=(1, 2),
),
layer_name="pooling_2",
)
if rnn_layers > 0:
self.append(
rnn_class,
layer_name="RNN",
hidden_size=rnn_neurons,
num_layers=rnn_layers,
dropout=dropout,
bidirectional=rnn_bidirectional,
re_init=rnn_re_init,
)
self.append(
sb.nnet.linear.Linear,
n_neurons=projection_neurons,
layer_name="proj",
)
self.append(torch.nn.Tanh(), layer_name="proj_act")
| 3,675 | 29.131148 | 96 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/EnhanceResnet.py | """Wide ResNet for Speech Enhancement.
Author
* Peter Plantinga 2022
"""
import torch
import speechbrain as sb
from speechbrain.processing.features import STFT, ISTFT, spectral_magnitude
class EnhanceResnet(torch.nn.Module):
"""Model for enhancement based on Wide ResNet.
Full model description at: https://arxiv.org/pdf/2112.06068.pdf
Arguments
---------
n_fft : int
Number of points in the fourier transform, see ``speechbrain.processing.features.STFT``
win_length : int
Length of stft window in ms, see ``speechbrain.processing.features.STFT``
hop_length : int
Time between windows in ms, see ``speechbrain.processing.features.STFT``
sample_rate : int
Number of samples per second of input audio.
channel_counts : list of ints
Number of output channels in each CNN block. Determines number of blocks.
dense_count : int
Number of dense layers.
dense_nodes : int
Number of nodes in the dense layers.
activation : function
Function to apply before convolution layers.
normalization : class
Name of class to use for constructing norm layers.
dropout : float
Portion of layer outputs to drop during training (between 0 and 1).
mask_weight : float
Amount of weight to give mask. 0 - no masking, 1 - full masking.
Example
-------
>>> inputs = torch.rand([10, 16000])
>>> model = EnhanceResnet()
>>> outputs, feats = model(inputs)
>>> outputs.shape
torch.Size([10, 15872])
>>> feats.shape
torch.Size([10, 63, 257])
"""
def __init__(
self,
n_fft=512,
win_length=32,
hop_length=16,
sample_rate=16000,
channel_counts=[128, 128, 256, 256, 512, 512],
dense_count=2,
dense_nodes=1024,
activation=torch.nn.GELU(),
normalization=sb.nnet.normalization.BatchNorm2d,
dropout=0.1,
mask_weight=0.99,
):
super().__init__()
self.mask_weight = mask_weight
# First, convert time-domain to log spectral magnitude inputs
self.stft = STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
sample_rate=sample_rate,
)
# CNN takes log spectral mag inputs
self.CNN = sb.nnet.containers.Sequential(
input_shape=[None, None, n_fft // 2 + 1]
)
for channel_count in channel_counts:
self.CNN.append(
ConvBlock,
channels=channel_count,
activation=activation,
normalization=normalization,
dropout=dropout,
)
# Fully connected layers
self.DNN = sb.nnet.containers.Sequential(
input_shape=self.CNN.get_output_shape()
)
for _ in range(dense_count):
self.DNN.append(
sb.nnet.linear.Linear, n_neurons=dense_nodes, combine_dims=True,
)
self.DNN.append(activation)
self.DNN.append(sb.nnet.normalization.LayerNorm)
self.DNN.append(torch.nn.Dropout(p=dropout))
# Output layer produces real mask that is applied to complex inputs
self.DNN.append(sb.nnet.linear.Linear, n_neurons=n_fft // 2 + 1)
# Convert back to time domain
self.istft = ISTFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
sample_rate=sample_rate,
)
def forward(self, x):
"""Processes the input tensor and outputs the enhanced speech."""
# Generate features
noisy_spec = self.stft(x)
log_mag = self.extract_feats(noisy_spec)
# Generate mask
mask = self.DNN(self.CNN(log_mag))
mask = mask.clamp(min=0, max=1).unsqueeze(-1)
# Apply mask
masked_spec = self.mask_weight * mask * noisy_spec
masked_spec += (1 - self.mask_weight) * noisy_spec
# Extract feats for loss computation
enhanced_features = self.extract_feats(masked_spec)
# Return resynthesized waveform
return self.istft(masked_spec), enhanced_features
def extract_feats(self, x):
"""Takes the stft output and produces features for computation."""
return torch.log1p(spectral_magnitude(x, power=0.5))
class ConvBlock(torch.nn.Module):
"""Convolution block, including squeeze-and-excitation.
Arguments
---------
input_shape : tuple of ints
The expected size of the inputs.
channels : int
Number of output channels.
activation : function
Function applied before each block.
normalization : class
Name of a class to use for constructing norm layers.
dropout : float
Portion of block outputs to drop during training.
Example
-------
>>> inputs = torch.rand([10, 20, 30, 128])
>>> block = ConvBlock(input_shape=inputs.shape, channels=256)
>>> outputs = block(inputs)
>>> outputs.shape
torch.Size([10, 20, 15, 256])
"""
def __init__(
self,
input_shape,
channels,
activation=torch.nn.GELU(),
normalization=sb.nnet.normalization.LayerNorm,
dropout=0.1,
):
super().__init__()
self.activation = activation
self.downsample = sb.nnet.CNN.Conv2d(
input_shape=input_shape,
out_channels=channels,
kernel_size=3,
stride=(2, 1),
)
self.conv1 = sb.nnet.CNN.Conv2d(
in_channels=channels, out_channels=channels, kernel_size=3
)
self.norm1 = normalization(input_size=channels)
self.conv2 = sb.nnet.CNN.Conv2d(
in_channels=channels, out_channels=channels, kernel_size=3,
)
self.norm2 = normalization(input_size=channels)
self.dropout = sb.nnet.dropout.Dropout2d(drop_rate=dropout)
self.se_block = SEblock(input_size=channels)
def forward(self, x):
"""Processes the input tensor with a convolutional block."""
x = self.downsample(x)
residual = self.activation(x)
residual = self.norm1(residual)
residual = self.dropout(residual)
residual = self.conv1(residual)
residual = self.activation(residual)
residual = self.norm2(residual)
residual = self.dropout(residual)
residual = self.conv2(residual)
residual *= self.se_block(residual)
return x + residual
class SEblock(torch.nn.Module):
"""Squeeze-and-excitation block.
Defined: https://arxiv.org/abs/1709.01507
Arguments
---------
input_size : tuple of ints
Expected size of the input tensor
Example
-------
>>> inputs = torch.rand([10, 20, 30, 256])
>>> se_block = SEblock(input_size=inputs.shape[-1])
>>> outputs = se_block(inputs)
>>> outputs.shape
torch.Size([10, 1, 1, 256])
"""
def __init__(self, input_size):
super().__init__()
self.linear1 = sb.nnet.linear.Linear(
input_size=input_size, n_neurons=input_size
)
self.linear2 = sb.nnet.linear.Linear(
input_size=input_size, n_neurons=input_size
)
def forward(self, x):
"""Processes the input tensor with a speech enhancement block."""
x = torch.mean(x, dim=(1, 2), keepdim=True)
x = self.linear1(x)
x = torch.nn.functional.relu(x)
x = self.linear2(x)
return torch.sigmoid(x)
| 7,571 | 30.160494 | 95 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/ContextNet.py | """The SpeechBrain implementation of ContextNet by
https://arxiv.org/pdf/2005.03191.pdf
Authors
* Jianyuan Zhong 2020
"""
import torch
from torch.nn import Dropout
from speechbrain.nnet.CNN import DepthwiseSeparableConv1d, Conv1d
from speechbrain.nnet.linear import Linear
from speechbrain.nnet.pooling import AdaptivePool
from speechbrain.nnet.containers import Sequential
from speechbrain.nnet.normalization import BatchNorm1d
from speechbrain.nnet.activations import Swish
class ContextNet(Sequential):
"""This class implements the ContextNet.
Reference paper: https://arxiv.org/pdf/2005.03191.pdf
Arguments
----------
out_channels : int
Number of output channels of this model (default 640).
conv_channels : Optional (list[int])
Number of output channels for each of the contextnet block. If not provided, it will be initialized as the default setting of above mentioned paper.
kernel_size : int
Kernel size of convolution layers (default 3).
strides: Optional (list[int])
Striding factor for each context block. This stride is applied at the last convolution layer at each context block. If not provided, it will be initialize as the default setting of above paper.
num_blocks : int
Number of context block (default 21).
num_layers : int
Number of depthwise convolution layers for each context block (default 5).
inner_dim : int
Inner dimension of bottle-neck network of the SE Module (default 12).
alpha : float
The factor to scale the output channel of the network (default 1).
beta : float
Beta to scale the Swish activation (default 1).
dropout : float
Dropout (default 0.15).
activation : torch class
Activation function for each context block (default Swish).
se_activation : torch class
Activation function for SE Module (default torch.nn.Sigmoid).
norm : torch class
Normalization to regularize the model (default BatchNorm1d).
residuals : Optional (list[bool])
Whether to apply residual connection at each context block (default None).
Example
-------
>>> inp = torch.randn([8, 48, 40])
>>> block = ContextNet(input_shape=inp.shape, num_blocks=14)
>>> out = block(inp)
>>> out.shape
torch.Size([8, 6, 640])
"""
def __init__(
self,
input_shape,
out_channels=640,
conv_channels=None,
kernel_size=3,
strides=None,
num_blocks=21,
num_layers=5,
inner_dim=12,
alpha=1,
beta=1,
dropout=0.15,
activation=Swish,
se_activation=torch.nn.Sigmoid,
norm=BatchNorm1d,
residuals=None,
):
super().__init__(input_shape=input_shape)
if conv_channels is None:
conv_channels = [*[256] * 10, *[512] * 11]
if strides is None:
strides = [1] * num_blocks
strides[2] = 2
strides[6] = 2
strides[13] = 2
if residuals is None:
residuals = [True] * num_blocks
self.append(
DepthwiseSeparableConv1d,
conv_channels[0],
kernel_size,
layer_name="conv_start",
)
self.append(norm, layer_name="norm_start")
if isinstance(activation, Swish):
self.append(activation(beta), layer_name="act_start")
else:
self.append(activation(), layer_name="act_start")
for i in range(num_blocks):
channels = int(conv_channels[i] * alpha)
self.append(
ContextNetBlock,
out_channels=channels,
kernel_size=kernel_size,
num_layers=num_layers,
inner_dim=inner_dim,
stride=strides[i],
beta=beta,
dropout=dropout,
activation=activation,
se_activation=se_activation,
norm=norm,
residual=residuals[i],
layer_name=f"block_{i}",
)
self.append(
DepthwiseSeparableConv1d,
out_channels,
kernel_size,
layer_name="conv_end",
)
self.append(norm, layer_name="norm_end")
if isinstance(activation, Swish):
self.append(activation(beta), layer_name="act_end")
else:
self.append(activation(), layer_name="act_end")
class SEmodule(torch.nn.Module):
"""This class implements the Squeeze-and-Excitation module.
Arguments
---------
inner_dim : int
Inner dimension of bottle-neck network of the SE Module (default 12).
activation : torch class
Activation function for SE Module (default torch.nn.Sigmoid).
norm : torch class
Normalization to regularize the model (default BatchNorm1d).
Example
-------
>>> inp = torch.randn([8, 120, 40])
>>> net = SEmodule(input_shape=inp.shape, inner_dim=64)
>>> out = net(inp)
>>> out.shape
torch.Size([8, 120, 40])
"""
def __init__(
self,
input_shape,
inner_dim,
activation=torch.nn.Sigmoid,
norm=BatchNorm1d,
):
super().__init__()
self.inner_dim = inner_dim
self.norm = norm
self.activation = activation
bz, t, chn = input_shape
self.conv = Sequential(input_shape=input_shape)
self.conv.append(
DepthwiseSeparableConv1d, out_channels=chn, kernel_size=1, stride=1,
)
self.conv.append(self.norm)
self.conv.append(self.activation())
self.avg_pool = AdaptivePool(1)
self.bottleneck = Sequential(
Linear(input_size=input_shape[-1], n_neurons=self.inner_dim),
self.activation(),
Linear(input_size=self.inner_dim, n_neurons=chn),
self.activation(),
)
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
bz, t, chn = x.shape
x = self.conv(x)
avg = self.avg_pool(x)
avg = self.bottleneck(avg)
context = avg.repeat(1, t, 1)
return x * context
class ContextNetBlock(torch.nn.Module):
"""This class implements a block in ContextNet.
Arguments
---------
out_channels : int
Number of output channels of this model (default 640).
kernel_size : int
Kernel size of convolution layers (default 3).
strides : int
Striding factor for this context block (default 1).
num_layersi : int
Number of depthwise convolution layers for this context block (default 5).
inner_dim : int
Inner dimension of bottle-neck network of the SE Module (default 12).
beta : float
Beta to scale the Swish activation (default 1).
dropout : float
Dropout (default 0.15).
activation : torch class
Activation function for this context block (default Swish).
se_activation : torch class
Activation function for SE Module (default torch.nn.Sigmoid).
norm : torch class
Normalization to regularize the model (default BatchNorm1d).
residuals : bool
Whether to apply residual connection at this context block (default None).
Example
-------
>>> inp = torch.randn([8, 120, 40])
>>> block = ContextNetBlock(256, 3, 5, 12, input_shape=inp.shape, stride=2)
>>> out = block(inp)
>>> out.shape
torch.Size([8, 60, 256])
"""
def __init__(
self,
out_channels,
kernel_size,
num_layers,
inner_dim,
input_shape,
stride=1,
beta=1,
dropout=0.15,
activation=Swish,
se_activation=torch.nn.Sigmoid,
norm=BatchNorm1d,
residual=True,
):
super().__init__()
self.residual = residual
self.Convs = Sequential(input_shape=input_shape)
for i in range(num_layers):
self.Convs.append(
DepthwiseSeparableConv1d,
out_channels,
kernel_size,
stride=stride if i == num_layers - 1 else 1,
)
self.Convs.append(norm)
self.SE = SEmodule(
input_shape=self.Convs.get_output_shape(),
inner_dim=inner_dim,
activation=se_activation,
norm=norm,
)
self.drop = Dropout(dropout)
self.reduced_cov = None
if residual:
self.reduced_cov = Sequential(input_shape=input_shape)
self.reduced_cov.append(
Conv1d, out_channels, kernel_size=3, stride=stride,
)
self.reduced_cov.append(norm)
if isinstance(activation, Swish):
self.activation = activation(beta)
else:
self.activation = activation()
self._reset_params()
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
out = self.Convs(x)
out = self.SE(out)
if self.reduced_cov:
out = out + self.reduced_cov(x)
out = self.activation(out)
return self.drop(out)
def _reset_params(self):
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.kaiming_normal_(p)
| 9,388 | 30.612795 | 201 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/Xvector.py | """A popular speaker recognition and diarization model.
Authors
* Nauman Dawalatabad 2020
* Mirco Ravanelli 2020
"""
# import os
import torch # noqa: F401
import torch.nn as nn
import speechbrain as sb
from speechbrain.nnet.pooling import StatisticsPooling
from speechbrain.nnet.CNN import Conv1d
from speechbrain.nnet.linear import Linear
from speechbrain.nnet.normalization import BatchNorm1d
class Xvector(torch.nn.Module):
"""This model extracts X-vectors for speaker recognition and diarization.
Arguments
---------
device : str
Device used e.g. "cpu" or "cuda".
activation : torch class
A class for constructing the activation layers.
tdnn_blocks : int
Number of time-delay neural (TDNN) layers.
tdnn_channels : list of ints
Output channels for TDNN layer.
tdnn_kernel_sizes : list of ints
List of kernel sizes for each TDNN layer.
tdnn_dilations : list of ints
List of dilations for kernels in each TDNN layer.
lin_neurons : int
Number of neurons in linear layers.
Example
-------
>>> compute_xvect = Xvector('cpu')
>>> input_feats = torch.rand([5, 10, 40])
>>> outputs = compute_xvect(input_feats)
>>> outputs.shape
torch.Size([5, 1, 512])
"""
def __init__(
self,
device="cpu",
activation=torch.nn.LeakyReLU,
tdnn_blocks=5,
tdnn_channels=[512, 512, 512, 512, 1500],
tdnn_kernel_sizes=[5, 3, 3, 1, 1],
tdnn_dilations=[1, 2, 3, 1, 1],
lin_neurons=512,
in_channels=40,
):
super().__init__()
self.blocks = nn.ModuleList()
# TDNN layers
for block_index in range(tdnn_blocks):
out_channels = tdnn_channels[block_index]
self.blocks.extend(
[
Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=tdnn_kernel_sizes[block_index],
dilation=tdnn_dilations[block_index],
),
activation(),
BatchNorm1d(input_size=out_channels),
]
)
in_channels = tdnn_channels[block_index]
# Statistical pooling
self.blocks.append(StatisticsPooling())
# Final linear transformation
self.blocks.append(
Linear(
input_size=out_channels * 2,
n_neurons=lin_neurons,
bias=True,
combine_dims=False,
)
)
def forward(self, x, lens=None):
"""Returns the x-vectors.
Arguments
---------
x : torch.Tensor
"""
for layer in self.blocks:
try:
x = layer(x, lengths=lens)
except TypeError:
x = layer(x)
return x
class Classifier(sb.nnet.containers.Sequential):
"""This class implements the last MLP on the top of xvector features.
Arguments
---------
input_shape : tuple
Expected shape of an example input.
activation : torch class
A class for constructing the activation layers.
lin_blocks : int
Number of linear layers.
lin_neurons : int
Number of neurons in linear layers.
out_neurons : int
Number of output neurons.
Example
-------
>>> input_feats = torch.rand([5, 10, 40])
>>> compute_xvect = Xvector()
>>> xvects = compute_xvect(input_feats)
>>> classify = Classifier(input_shape=xvects.shape)
>>> output = classify(xvects)
>>> output.shape
torch.Size([5, 1, 1211])
"""
def __init__(
self,
input_shape,
activation=torch.nn.LeakyReLU,
lin_blocks=1,
lin_neurons=512,
out_neurons=1211,
):
super().__init__(input_shape=input_shape)
self.append(activation(), layer_name="act")
self.append(sb.nnet.normalization.BatchNorm1d, layer_name="norm")
if lin_blocks > 0:
self.append(sb.nnet.containers.Sequential, layer_name="DNN")
for block_index in range(lin_blocks):
block_name = f"block_{block_index}"
self.DNN.append(
sb.nnet.containers.Sequential, layer_name=block_name
)
self.DNN[block_name].append(
sb.nnet.linear.Linear,
n_neurons=lin_neurons,
bias=True,
layer_name="linear",
)
self.DNN[block_name].append(activation(), layer_name="act")
self.DNN[block_name].append(
sb.nnet.normalization.BatchNorm1d, layer_name="norm"
)
# Final Softmax classifier
self.append(
sb.nnet.linear.Linear, n_neurons=out_neurons, layer_name="out"
)
self.append(
sb.nnet.activations.Softmax(apply_log=True), layer_name="softmax"
)
class Discriminator(sb.nnet.containers.Sequential):
"""This class implements a discriminator on the top of xvector features.
Arguments
---------
device : str
Device used e.g. "cpu" or "cuda"
activation : torch class
A class for constructing the activation layers.
lin_blocks : int
Number of linear layers.
lin_neurons : int
Number of neurons in linear layers.
Example
-------
>>> input_feats = torch.rand([5, 10, 40])
>>> compute_xvect = Xvector()
>>> xvects = compute_xvect(input_feats)
>>> discriminate = Discriminator(xvects.shape)
>>> output = discriminate(xvects)
>>> output.shape
torch.Size([5, 1, 1])
"""
def __init__(
self,
input_shape,
activation=torch.nn.LeakyReLU,
lin_blocks=1,
lin_neurons=512,
out_neurons=1,
):
super().__init__(input_shape=input_shape)
if lin_blocks > 0:
self.append(sb.nnet.containers.Sequential, layer_name="DNN")
for block_index in range(lin_blocks):
block_name = f"block_{block_index}"
self.DNN.append(
sb.nnet.containers.Sequential, layer_name=block_name
)
self.DNN[block_name].append(
sb.nnet.linear.Linear,
n_neurons=lin_neurons,
bias=True,
combine_dims=False,
layer_name="linear",
)
self.DNN[block_name].append(
sb.nnet.normalization.BatchNorm1d, layer_name="norm"
)
self.DNN[block_name].append(activation(), layer_name="act")
# Final Layer (sigmoid not included)
self.append(
sb.nnet.linear.Linear, n_neurons=out_neurons, layer_name="out"
)
| 6,854 | 28.170213 | 77 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/resepformer.py | """Library for the Reseource-Efficient Sepformer.
Authors
* Cem Subakan 2022
"""
import torch
import torch.nn as nn
from speechbrain.lobes.models.dual_path import select_norm
from speechbrain.lobes.models.transformer.Transformer import (
TransformerEncoder,
PositionalEncoding,
get_lookahead_mask,
)
import speechbrain.nnet.RNN as SBRNN
import copy
EPS = torch.finfo(torch.get_default_dtype()).eps
class MemLSTM(nn.Module):
"""the Mem-LSTM of SkiM --
Note: This is taken from the SkiM implementation in ESPNet toolkit and modified for compability with SpeechBrain.
Arguments:
---------
hidden_size: int,
Dimension of the hidden state.
dropout: float,
dropout ratio. Default is 0.
bidirectional: bool,
Whether the LSTM layers are bidirectional.
Default is False.
mem_type: 'hc', 'h', 'c', or 'id'.
This controls whether the hidden (or cell) state of
SegLSTM will be processed by MemLSTM.
In 'id' mode, both the hidden and cell states will
be identically returned.
norm_type: 'gln', 'cln'
This selects the type of normalization
cln is for causal implemention
Example
---------
>>> x = (torch.randn(1, 5, 64), torch.randn(1, 5, 64))
>>> block = MemLSTM(64)
>>> x = block(x, 5)
>>> x[0].shape
torch.Size([1, 5, 64])
"""
def __init__(
self,
hidden_size,
dropout=0.0,
bidirectional=False,
mem_type="hc",
norm_type="cln",
):
super().__init__()
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.input_size = (int(bidirectional) + 1) * hidden_size
self.mem_type = mem_type
assert mem_type in [
"hc",
"h",
"c",
"id",
], f"only support 'hc', 'h', 'c' and 'id', current type: {mem_type}"
if mem_type in ["hc", "h"]:
self.h_net = SBRNNBlock(
input_size=self.input_size,
hidden_channels=self.hidden_size,
num_layers=1,
outsize=self.input_size,
rnn_type="LSTM",
dropout=dropout,
bidirectional=bidirectional,
)
self.h_norm = select_norm(
norm=norm_type, dim=self.input_size, shape=3, eps=EPS
)
if mem_type in ["hc", "c"]:
self.c_net = SBRNNBlock(
input_size=self.input_size,
hidden_channels=self.hidden_size,
num_layers=1,
outsize=self.input_size,
rnn_type="LSTM",
dropout=dropout,
bidirectional=bidirectional,
)
self.c_norm = select_norm(
norm=norm_type, dim=self.input_size, shape=3, eps=EPS
)
def forward(self, hc, S):
"""The forward function for the memory RNN
Arguments
---------
hc : torch.Tensor
(h, c), tuple of hidden and cell states from SegLSTM
shape of h and c: (d, B*S, H)
where d is the number of directions
B is the batchsize
S is the number chunks
H is the latent dimensionality
S : int
S is the number of chunks
"""
if self.mem_type == "id":
ret_val = hc
else:
h, c = hc
d, BS, H = h.shape
B = BS // S
h = h.transpose(1, 0).contiguous().view(B, S, d * H) # B, S, dH
c = c.transpose(1, 0).contiguous().view(B, S, d * H) # B, S, dH
if self.mem_type == "hc":
h = h + self.h_norm(self.h_net(h).permute(0, 2, 1)).permute(
0, 2, 1
)
c = c + self.c_norm(self.c_net(c).permute(0, 2, 1)).permute(
0, 2, 1
)
elif self.mem_type == "h":
h = h + self.h_norm(self.h_net(h).permute(0, 2, 1)).permute(
0, 2, 1
)
c = torch.zeros_like(c)
elif self.mem_type == "c":
h = torch.zeros_like(h)
c = c + self.c_norm(self.c_net(c).permute(0, 2, 1)).permute(
0, 2, 1
)
h = h.view(B * S, d, H).transpose(1, 0).contiguous()
c = c.view(B * S, d, H).transpose(1, 0).contiguous()
ret_val = (h, c)
if not self.bidirectional:
# for causal setup
causal_ret_val = []
for x in ret_val:
x_ = torch.zeros_like(x)
x_[:, 1:, :] = x[:, :-1, :]
causal_ret_val.append(x_)
ret_val = tuple(causal_ret_val)
return ret_val
class SegLSTM(nn.Module):
"""the Segment-LSTM of SkiM
Note: This is taken from the SkiM implementation in ESPNet toolkit and modified for compatibility with SpeechBrain.
Arguments:
----------
input_size: int,
dimension of the input feature.
The input should have shape (batch, seq_len, input_size).
hidden_size: int,
dimension of the hidden state.
dropout: float,
dropout ratio. Default is 0.
bidirectional: bool,
whether the LSTM layers are bidirectional.
Default is False.
norm_type: gln, cln.
This selects the type of normalization
cln is for causal implementation.
Example
---------
>>> x = torch.randn(3, 20, 64)
>>> hc = None
>>> seglstm = SegLSTM(64, 64)
>>> y = seglstm(x, hc)
>>> y[0].shape
torch.Size([3, 20, 64])
"""
def __init__(
self,
input_size,
hidden_size,
dropout=0.0,
bidirectional=False,
norm_type="cLN",
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_direction = int(bidirectional) + 1
self.lstm = nn.LSTM(
input_size,
hidden_size,
1,
batch_first=True,
bidirectional=bidirectional,
)
self.dropout = nn.Dropout(p=dropout)
self.proj = nn.Linear(hidden_size * self.num_direction, input_size)
self.norm = select_norm(
norm=norm_type, dim=input_size, shape=3, eps=EPS
)
def forward(self, input, hc):
"""The forward function of the Segment LSTM
Arguments
---------
input : torch.Tensor of size [B*S, T, H]
where B is the batchsize
S is the number of chunks
T is the chunks size
H is the latent dimensionality
(h, c), tuple of hidden and cell states from SegLSTM
shape of h and c: (d, B*S, H)
where d is the number of directions
B is the batchsize
S is the number chunks
H is the latent dimensionality
"""
B, T, H = input.shape
if hc is None:
# In fist input SkiM block, h and c are not available
d = self.num_direction
h = torch.zeros(d, B, self.hidden_size).to(input.device)
c = torch.zeros(d, B, self.hidden_size).to(input.device)
else:
h, c = hc
output, (h, c) = self.lstm(input, (h, c))
output = self.dropout(output)
output = self.proj(output.contiguous().view(-1, output.shape[2])).view(
input.shape
)
output_norm = self.norm(output.permute(0, 2, 1)).permute(0, 2, 1)
output = input + output_norm
return output, (h, c)
class SBRNNBlock(nn.Module):
"""RNNBlock with output layer.
Arguments
---------
input_size : int
Dimensionality of the input features.
hidden_channels : int
Dimensionality of the latent layer of the rnn.
num_layers : int
Number of the rnn layers.
out_size : int
Number of dimensions at the output of the linear layer
rnn_type : str
Type of the the rnn cell.
dropout : float
Dropout rate
bidirectional : bool
If True, bidirectional.
Example
---------
>>> x = torch.randn(10, 100, 64)
>>> rnn = SBRNNBlock(64, 100, 1, 128, bidirectional=True)
>>> x = rnn(x)
>>> x.shape
torch.Size([10, 100, 128])
"""
def __init__(
self,
input_size,
hidden_channels,
num_layers,
outsize,
rnn_type="LSTM",
dropout=0,
bidirectional=True,
):
super(SBRNNBlock, self).__init__()
self.mdl = getattr(SBRNN, rnn_type)(
hidden_channels,
input_size=input_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
)
rnn_outsize = 2 * hidden_channels if bidirectional else hidden_channels
self.out = nn.Linear(rnn_outsize, outsize)
def forward(self, x):
"""Returns the transformed output.
Arguments
---------
x : torch.Tensor
[B, L, N]
where, B = Batchsize,
N = number of filters
L = time points
"""
rnn_out = self.mdl(x)[0]
out = self.out(rnn_out)
return out
class SBTransformerBlock_wnormandskip(nn.Module):
"""A wrapper for the SpeechBrain implementation of the transformer encoder.
Arguments
---------
num_layers : int
Number of layers.
d_model : int
Dimensionality of the representation.
nhead : int
Number of attention heads.
d_ffn : int
Dimensionality of positional feed forward.
input_shape : tuple
Shape of input.
kdim : int
Dimension of the key (Optional).
vdim : int
Dimension of the value (Optional).
dropout : float
Dropout rate.
activation : str
Activation function.
use_positional_encoding : bool
If true we use a positional encoding.
norm_before: bool
Use normalization before transformations.
Example
---------
>>> x = torch.randn(10, 100, 64)
>>> block = SBTransformerBlock_wnormandskip(1, 64, 8)
>>> x = block(x)
>>> x.shape
torch.Size([10, 100, 64])
"""
def __init__(
self,
num_layers,
d_model,
nhead,
d_ffn=2048,
input_shape=None,
kdim=None,
vdim=None,
dropout=0.1,
activation="relu",
use_positional_encoding=False,
norm_before=False,
attention_type="regularMHA",
causal=False,
use_norm=True,
use_skip=True,
norm_type="gln",
):
super(SBTransformerBlock_wnormandskip, self).__init__()
self.use_positional_encoding = use_positional_encoding
if activation == "relu":
activation = nn.ReLU
elif activation == "gelu":
activation = nn.GELU
else:
raise ValueError("unknown activation")
self.causal = causal
self.mdl = TransformerEncoder(
num_layers=num_layers,
nhead=nhead,
d_ffn=d_ffn,
input_shape=input_shape,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
activation=activation,
normalize_before=norm_before,
causal=causal,
attention_type=attention_type,
)
self.use_norm = use_norm
self.use_skip = use_skip
if use_norm:
self.norm = select_norm(
norm=norm_type, dim=d_model, shape=3, eps=EPS
)
if use_positional_encoding:
self.pos_enc = PositionalEncoding(
input_size=d_model, max_len=100000
)
def forward(self, x):
"""Returns the transformed output.
Arguments
---------
x : torch.Tensor
Tensor shape [B, L, N],
where, B = Batchsize,
L = time points
N = number of filters
"""
src_mask = get_lookahead_mask(x) if self.causal else None
if self.use_positional_encoding:
pos_enc = self.pos_enc(x)
out = self.mdl(x + pos_enc, src_mask=src_mask)[0]
else:
out = self.mdl(x, src_mask=src_mask)[0]
if self.use_norm:
out = self.norm(out.permute(0, 2, 1)).permute(0, 2, 1)
if self.use_skip:
out = out + x
return out
class ResourceEfficientSeparationPipeline(nn.Module):
""" Resource Efficient Separation Pipeline Used for RE-SepFormer and SkiM
Note: This implementation is a generalization of the ESPNET implementation of SkiM
Arguments:
----------
input_size: int,
Dimension of the input feature.
Input shape shoud be (batch, length, input_size)
hidden_size: int,
Dimension of the hidden state.
output_size: int,
Dimension of the output size.
dropout: float,
Dropout ratio. Default is 0.
num_blocks: int
Number of basic SkiM blocks
segment_size: int
Segmentation size for splitting long features
bidirectional: bool,
Whether the RNN layers are bidirectional.
mem_type: 'hc', 'h', 'c', 'id' or None.
This controls whether the hidden (or cell) state of SegLSTM
will be processed by MemLSTM.
In 'id' mode, both the hidden and cell states will
be identically returned.
When mem_type is None, the MemLSTM will be removed.
norm_type: gln, cln.
cln is for causal implementation.
seg_model: class
The model that processes the within segment elements
mem_model: class
The memory model that ensures continuity between the segments
Example
---------
>>> x = torch.randn(10, 100, 64)
>>> seg_mdl = SBTransformerBlock_wnormandskip(1, 64, 8)
>>> mem_mdl = SBTransformerBlock_wnormandskip(1, 64, 8)
>>> resepf_pipeline = ResourceEfficientSeparationPipeline(64, 64, 128, seg_model=seg_mdl, mem_model=mem_mdl)
>>> out = resepf_pipeline.forward(x)
>>> out.shape
torch.Size([10, 100, 128])
"""
def __init__(
self,
input_size,
hidden_size,
output_size,
dropout=0.0,
num_blocks=2,
segment_size=20,
bidirectional=True,
mem_type="av",
norm_type="gln",
seg_model=None,
mem_model=None,
):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.segment_size = segment_size
self.dropout = dropout
self.num_blocks = num_blocks
self.mem_type = mem_type
self.norm_type = norm_type
assert mem_type in [
"hc",
"h",
"c",
"id",
"av",
None,
], f"only support 'hc', 'h', 'c', 'id', 'av' and None, current type: {mem_type}"
self.seg_model = nn.ModuleList([])
for i in range(num_blocks):
self.seg_model.append(copy.deepcopy(seg_model))
if self.mem_type is not None:
self.mem_model = nn.ModuleList([])
for i in range(num_blocks - 1):
self.mem_model.append(copy.deepcopy(mem_model))
self.output_fc = nn.Sequential(
nn.PReLU(), nn.Conv1d(input_size, output_size, 1)
)
def forward(self, input):
"""The forward function of the ResourceEfficientSeparatioPipeline
This takes in a tensor of size [B, (S*K), D]
Arguments
---------
input : torch.Tensor
Tensor shape [B, (S*K), D],
where, B = Batchsize,
S = Number of chunks
K = Chunksize
D = number of features
"""
B, T, D = input.shape
input, rest = self._padfeature(input=input)
input = input.view(B, -1, self.segment_size, D) # B, S, K, D
B, S, K, D = input.shape
assert K == self.segment_size
output = input.reshape(B * S, K, D) # BS, K, D
if self.mem_type == "av":
hc = torch.zeros(
output.shape[0], 1, output.shape[-1], device=output.device
)
else:
hc = None
for i in range(self.num_blocks):
seg_model_type = type(self.seg_model[0]).__name__
if seg_model_type == "SBTransformerBlock_wnormandskip":
output = self.seg_model[i](output + hc) # BS, K, D
elif seg_model_type == "SegLSTM":
output, hc = self.seg_model[i](output, hc) # BS, K, D
else:
raise ValueError("Unsupported segment model class")
if i < (self.num_blocks - 1):
if self.mem_type == "av":
hc = output.mean(1).unsqueeze(0)
hc = self.mem_model[i](hc).permute(1, 0, 2)
else:
hc = self.mem_model[i](hc, S)
output = output.reshape(B, S * K, D)[:, :T, :] # B, T, D
output = self.output_fc(output.transpose(1, 2)).transpose(1, 2)
return output
def _padfeature(self, input):
"""
Argument:
----------
input : torch.Tensor of size [B, T, D]
where B is Batchsize
T is the chunk length
D is the feature dimensionality
"""
B, T, D = input.shape
rest = self.segment_size - T % self.segment_size
if rest > 0:
input = torch.nn.functional.pad(input, (0, 0, 0, rest))
return input, rest
class ResourceEfficientSeparator(nn.Module):
"""Resource Efficient Source Separator
This is the class that implements RE-SepFormer
Arguments:
----------
input_dim: int,
Input feature dimension
causal: bool,
Whether the system is causal.
num_spk: int,
Number of target speakers.
nonlinear: class
the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int,
number of blocks. Default is 2 for RE-SepFormer.
unit: int,
Dimensionality of the hidden state.
segment_size: int,
Chunk size for splitting long features
dropout: float,
dropout ratio. Default is 0.
mem_type: 'hc', 'h', 'c', 'id', 'av' or None.
This controls whether a memory representation will be used to ensure continuity between segments.
In 'av' mode, the summary state is is calculated by simply averaging over the time dimension of each segment
In 'id' mode, both the hidden and cell states
will be identically returned.
When mem_type is None, the memory model will be removed.
seg_model: class,
The model that processes the within segment elements
mem_model: class,
The memory model that ensures continuity between the segments
Example
---------
>>> x = torch.randn(10, 64, 100)
>>> seg_mdl = SBTransformerBlock_wnormandskip(1, 64, 8)
>>> mem_mdl = SBTransformerBlock_wnormandskip(1, 64, 8)
>>> resepformer = ResourceEfficientSeparator(64, num_spk=3, mem_type='av', seg_model=seg_mdl, mem_model=mem_mdl)
>>> out = resepformer.forward(x)
>>> out.shape
torch.Size([3, 10, 64, 100])
"""
def __init__(
self,
input_dim: int,
causal: bool = True,
num_spk: int = 2,
nonlinear: str = "relu",
layer: int = 3,
unit: int = 512,
segment_size: int = 20,
dropout: float = 0.0,
mem_type: str = "hc",
seg_model=None,
mem_model=None,
):
super().__init__()
self.num_spk = num_spk
self.segment_size = segment_size
if mem_type not in ("hc", "h", "c", "id", "av", None):
raise ValueError("Not supporting mem_type={}".format(mem_type))
self.model = ResourceEfficientSeparationPipeline(
input_size=input_dim,
hidden_size=unit,
output_size=input_dim * num_spk,
dropout=dropout,
num_blocks=layer,
bidirectional=(not causal),
norm_type="cln" if causal else "gln",
segment_size=segment_size,
mem_type=mem_type,
seg_model=seg_model,
mem_model=mem_model,
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(self, inpt: torch.Tensor):
"""Forward.
Arguments:
----------
inpt (torch.Tensor):
Encoded feature [B, T, N]
"""
inpt = inpt.permute(0, 2, 1)
B, T, N = inpt.shape
processed = self.model(inpt) # B,T, N
processed = processed.reshape(B, T, N, self.num_spk)
masks = self.nonlinear(processed).unbind(dim=3)
mask_tensor = torch.stack([m.permute(0, 2, 1) for m in masks])
return mask_tensor
| 21,609 | 29.013889 | 119 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/huggingface_wav2vec.py | """This lobe enables the integration of huggingface pretrained wav2vec2/hubert/wavlm models.
Reference: https://arxiv.org/abs/2006.11477
Reference: https://arxiv.org/abs/1904.05862
Reference: https://arxiv.org/abs/2110.13900
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
Authors
* Titouan Parcollet 2021
* Boumadane Abdelmoumene 2021
"""
import os
import torch
import logging
import pathlib
import numpy as np
import torch.nn.functional as F
from torch import nn
from huggingface_hub import model_info
from speechbrain.pretrained.fetching import fetch
from speechbrain.dataio.dataio import length_to_mask
# We check if transformers is installed.
try:
import transformers
from transformers import Wav2Vec2Model, HubertModel, WavLMModel
from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig
from transformers import Wav2Vec2FeatureExtractor
from transformers import Wav2Vec2ForPreTraining
from transformers.models.wav2vec2.modeling_wav2vec2 import (
_compute_mask_indices,
)
except ImportError:
MSG = "Please install transformers from HuggingFace to use wav2vec2 / Hubert\n"
MSG += "E.G. run: pip install transformers"
raise ImportError(MSG)
logger = logging.getLogger(__name__)
HF_models = {
"wav2vec2": Wav2Vec2Model,
"hubert": HubertModel,
"wavlm": WavLMModel,
}
HF_config = {
"wav2vec2": Wav2Vec2Config,
"hubert": HubertConfig,
"wavlm": WavLMConfig,
}
class HuggingFaceWav2Vec2(nn.Module):
"""This lobe enables the integration of HuggingFace and SpeechBrain
pretrained wav2vec2.0/Hubert models.
Source paper wav2vec2.0: https://arxiv.org/abs/2006.11477
Source paper Hubert: https://arxiv.org/abs/2106.07447
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The model can be used as a fixed feature extractor or can be finetuned. It
will download automatically the model from HuggingFace or use a local path.
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
output_norm : bool (default: True)
If True, a layer_norm (affine) will be applied to the output obtained
from the wav2vec model.
freeze : bool (default: True)
If True, the model is frozen. If False, the model will be trained
alongside with the rest of the pipeline.
freeze_feature_extractor : bool (default: False)
When freeze = False and freeze_feature_extractor True, the featue_extractor module of the model is Frozen. If False
all the wav2vec model will be trained including featue_extractor module.
apply_spec_augment : bool (default: False)
If True, the model will apply spec augment on the output of feature extractor
(inside huggingface Wav2VecModel() class).
If False, the model will not apply spec augment. We set this to false to prevent from doing it twice.
output_all_hiddens : bool (default: False)
If True, the forward function outputs the hidden states from all transformer layers.
For example wav2vec2-base has 12 transformer layers and the output is of shape (13, B, T, C),
where a projection of the CNN output is added to the beginning.
If False, the forward function outputs the hidden states only from the last transformer layer.
Example
-------
>>> inputs = torch.rand([10, 600])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2(model_hub, save_path)
>>> outputs = model(inputs)
"""
def __init__(
self,
source,
save_path,
output_norm=False,
freeze=False,
freeze_feature_extractor=False,
apply_spec_augment=False,
output_all_hiddens=False,
):
super().__init__()
# Download the extractor from HuggingFace.
# The extractor is only used to retrieve the normalisation information
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
source, cache_dir=save_path
)
# Select specific self-supervised loader (eg. Wav2Vec2, Hubert)
if "hubert" in source:
config = HF_config.get("hubert")
model = HF_models.get("hubert")
elif "wavlm" in source:
config = HF_config.get("wavlm")
model = HF_models.get("wavlm")
else:
config = HF_config.get("wav2vec2")
model = HF_models.get("wav2vec2")
# Download and load the model
self._from_pretrained(
source, config=config, model=model, save_path=save_path
)
self.model.config.apply_spec_augment = apply_spec_augment
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
self.normalize_wav = self.feature_extractor.do_normalize
self.freeze = freeze
self.freeze_feature_extractor = freeze_feature_extractor
self.output_norm = output_norm
if self.freeze:
logger.warning(
"speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen."
)
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
else:
self.model.train()
if self.freeze_feature_extractor:
logger.warning(
"speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 feature extractor is frozen."
)
self.model.feature_extractor.eval()
for param in self.model.feature_extractor.parameters():
param.requires_grad = False
self.output_all_hiddens = output_all_hiddens
def _from_pretrained(self, source, config, model, save_path):
"""This function manages the source checking and loading of the params.
# 1. Is the model from HF or a local path
# 2. Is the model pretrained with HF or SpeechBrain
# 3. Download (if appropriate) and load with respect to 1. and 2.
"""
is_sb, ckpt_file, is_local = self._check_model_source(source, save_path)
if is_sb:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
self.model.gradient_checkpointing_disable() # Required by DDP
# fetch the checkpoint file
ckpt_full_path = fetch(
filename=ckpt_file, source=source, savedir=save_path
)
# We transfer the parameters from the checkpoint.
self._load_sb_pretrained_w2v2_parameters(ckpt_full_path)
else:
self.model = model.from_pretrained(
source, cache_dir=save_path, local_files_only=is_local
)
def _load_sb_pretrained_w2v2_parameters(self, path):
"""Loads the parameter of a w2v2 model pretrained with SpeechBrain and the
HuggingFaceWav2Vec2Pretrain Object. It is necessary to perform a custom
loading because HuggingFace adds a level to the checkpoint when storing
the model breaking the compatibility between HuggingFaceWav2Vec2Pretrain
and HuggingFaceWav2Vec2.
In practice a typical HuggingFaceWav2Vec2 checkpoint for a given parameter
would be: model.conv.weight.data while for HuggingFaceWav2Vec2Pretrain it
is: model.wav2vec2.weight.data (wav2vec2 must be removed before loading).
"""
modified_state_dict = {}
orig_state_dict = torch.load(path, map_location="cpu")
# We remove the .wav2vec2 in the state dict.
for key, params in orig_state_dict.items():
if "wav2vec2." in key:
save_key = key.replace("model.wav2vec2.", "")
modified_state_dict[save_key] = params
incompatible_keys = self.model.load_state_dict(
modified_state_dict, strict=False
)
for missing_key in incompatible_keys.missing_keys:
logger.warning(
f"During parameter transfer to {self.model} loading from "
+ f"{path}, the transferred parameters did not have "
+ f"parameters for the key: {missing_key}"
)
for unexpected_key in incompatible_keys.unexpected_keys:
logger.warning(
f"The param with the key: {unexpected_key} is discarded as it "
+ "is useless for wav2vec 2.0 finetuning."
)
def _check_model_source(self, path, save_path):
"""Checks if the pretrained model has been trained with SpeechBrain and
is hosted locally or on a HuggingFace hub.
Called as static function in HuggingFaceTransformer._from_pretrained.
Arguments
---------
path : str
Used as "source"; local path or HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
norm_output (dir) of the downloaded model.
Returns
-------
is_sb : bool
Whether/not the model is deserializable w/ SpeechBrain or not (then, model conversion is needed).
checkpoint_filename : str
as of HuggingFace documentation: file name relative to the repo root (guaranteed to be here).
"""
checkpoint_filename = ""
source = pathlib.Path(path)
is_local = True
# If path is a huggingface hub.
if not source.exists():
is_local = False
# Check if source is downloaded already
sink = pathlib.Path(
save_path + "/models--" + path.replace("/", "--") + "/snapshots"
)
if sink.exists():
sink = (
sink / os.listdir(str(sink))[0]
) # there's a hash-id subfolder
if any(
File.endswith(".bin") or File.endswith(".ckpt")
for File in os.listdir(str(sink))
):
is_local = True
local_path = str(sink)
else:
local_path = path
else:
local_path = path
if is_local:
# Test for HuggingFace model
if any(File.endswith(".bin") for File in os.listdir(local_path)):
is_sb = False
return is_sb, checkpoint_filename, is_local
# Test for SpeechBrain model and get the filename.
for File in os.listdir(local_path):
if File.endswith(".ckpt"):
checkpoint_filename = os.path.join(path, File)
is_sb = True
return is_sb, checkpoint_filename, is_local
else:
files = model_info(
path
).siblings # get the list of files of the Hub
# Test if it's an HuggingFace model or a SB one
for File in files:
if File.rfilename.endswith(".ckpt"):
checkpoint_filename = File.rfilename
is_sb = True
return is_sb, checkpoint_filename, is_local
for File in files:
if File.rfilename.endswith(".bin"):
checkpoint_filename = File.rfilename
is_sb = False
return is_sb, checkpoint_filename, is_local
err_msg = f"{path} does not contain a .bin or .ckpt checkpoint !"
raise FileNotFoundError(err_msg)
def forward(self, wav, wav_lens=None):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
wav_len : tensor
The relative length of the wav given in SpeechBrain format.
"""
# If we freeze, we simply remove all grads from the graph.
if self.freeze:
with torch.no_grad():
return self.extract_features(wav, wav_lens)
return self.extract_features(wav, wav_lens)
def extract_features(self, wav, wav_lens=None):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
wav_len : tensor
The relative length of the wav given in SpeechBrain format.
"""
padding_mask = self.make_masks(wav, wav_len=wav_lens)
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape[1:])
# Extract wav2vec output
out = self.model(
wav,
attention_mask=padding_mask,
output_hidden_states=self.output_all_hiddens,
)
if self.output_all_hiddens:
out = torch.stack(list(out.hidden_states), dim=0)
norm_shape = out.shape[-3:]
else:
out = out.last_hidden_state
norm_shape = out.shape
# We normalize the output if required
if self.output_norm:
out = F.layer_norm(out, norm_shape[1:])
return out
def make_masks(self, src, wav_len=None, pad_idx=0):
"""This method generates the padding masks.
Arguments
---------
src : tensor
The sequence to the encoder (required).
wav_len : tensor
The relative length of the wav given in SpeechBrain format.
pad_idx : int
The index for <pad> token (default=0).
"""
src_key_padding_mask = None
if wav_len is not None:
abs_len = torch.round(wav_len * src.shape[1])
src_key_padding_mask = length_to_mask(abs_len).bool()
return src_key_padding_mask
class HuggingFaceWav2Vec2Pretrain(nn.Module):
"""This lobe enables the integration of HuggingFace
wav2vec2.0 models to be pretrained.
Source paper: https://arxiv.org/abs/2006.11477
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The return is an HuggingFace format and the mask indices that contains:
https://huggingface.co/transformers/model_doc/wav2vec2.html#wav2vec2forpretraining
For instance, it returns the loss that can be accessed with .loss
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
mask_prob : float (default: 0.65)
Probability of masking a given frame. Default is taken from the paper.
mask_length : float (default: 10)
Length (i.e. number of consecutive masked frames). Default is taken from
the paper.
Example
-------
>>> inputs = torch.rand([10, 32000])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2Pretrain(model_hub, save_path)
>>> outputs, _ = model(inputs)
"""
def __init__(
self,
source,
save_path,
mask_prob=0.65,
mask_length=10,
normalize_wav=True,
):
super().__init__()
self.mask_prob = mask_prob
self.mask_length = mask_length
self.normalize_wav = normalize_wav
# Download the config of the model from HuggingFace.
self.config = Wav2Vec2Config.from_pretrained(
source, cache_dir=save_path
)
self.config.output_hidden_states = (
True # We want the hidden states as well!
)
self.model = Wav2Vec2ForPreTraining(self.config)
self.model.gradient_checkpointing_disable() # Required by DDP
self.model.train()
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
def forward(self, wav, wav_lens):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
wav_len : tensor
The relative length of the wav given in SpeechBrain format.
"""
batch_size, raw_sequence_length = wav.shape
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape)
sequence_length = self.model._get_feat_extract_output_lengths(
raw_sequence_length
).item()
# 1. Compute the indices that will be masked
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.mask_prob,
mask_length=self.mask_length,
)
torch_mask_time_indices = torch.tensor(
mask_time_indices, device=wav.device, dtype=torch.long,
)
padding_mask = self.make_padding_masks(wav, wav_len=wav_lens)
# 2. Sample the negative samples from the entire sequence.
# Fairseq does it only on the masked indices, but this only work if you
# have long sentences. For more versatily, we sample on the entire sequence.
# value.
full_sentence_indices = np.ones((batch_size, sequence_length))
# print(np.sum(mask_time_indices, axis=1))
negative_sample_indices = torch.tensor(
transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices(
(batch_size, sequence_length),
num_negatives=self.config.num_negatives,
mask_time_indices=full_sentence_indices,
),
device=wav.device,
dtype=torch.long,
)
return (
self.model(
wav,
mask_time_indices=torch_mask_time_indices,
sampled_negative_indices=negative_sample_indices,
attention_mask=padding_mask,
),
torch_mask_time_indices,
)
def make_padding_masks(self, src, wav_len=None, pad_idx=0):
"""This method generates the padding masks.
Arguments
---------
src : tensor
The sequence to the encoder (required).
wav_len : tensor
The relative length of the wav given in SpeechBrain format.
pad_idx : int
The index for <pad> token (default=0).
"""
src_key_padding_mask = None
if wav_len is not None:
abs_len = torch.round(wav_len * src.shape[1])
src_key_padding_mask = length_to_mask(abs_len).bool()
return src_key_padding_mask
| 18,749 | 36.055336 | 123 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/Cnn14.py | """ This file implements the CNN14 model from https://arxiv.org/abs/1912.10211
Authors
* Cem Subakan 2022
* Francesco Paissan 2022
"""
import torch.nn as nn
import torch.nn.functional as F
import torch
def init_layer(layer):
"""Initialize a Linear or Convolutional layer."""
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.0)
def init_bn(bn):
"""Initialize a Batchnorm layer."""
bn.bias.data.fill_(0.0)
bn.weight.data.fill_(1.0)
class ConvBlock(nn.Module):
"""This class implements the convolutional block used in CNN14
Arguments
---------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
norm_type : str in ['bn', 'in', 'ln']
The type of normalization
Example:
--------
>>> convblock = ConvBlock(10, 20, 'ln')
>>> x = torch.rand(5, 10, 20, 30)
>>> y = convblock(x)
>>> print(y.shape)
torch.Size([5, 20, 10, 15])
"""
def __init__(self, in_channels, out_channels, norm_type):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False,
)
self.conv2 = nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False,
)
self.norm_type = norm_type
if norm_type == "bn":
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm_type == "in":
self.norm1 = nn.InstanceNorm2d(
out_channels, affine=True, track_running_stats=True
)
self.norm2 = nn.InstanceNorm2d(
out_channels, affine=True, track_running_stats=True
)
elif norm_type == "ln":
self.norm1 = nn.GroupNorm(1, out_channels)
self.norm2 = nn.GroupNorm(1, out_channels)
else:
raise ValueError("Unknown norm type {}".format(norm_type))
self.init_weight()
def init_weight(self):
"""
Initializes the model convolutional layers and the batchnorm layers
"""
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.norm1)
init_bn(self.norm2)
def forward(self, x, pool_size=(2, 2), pool_type="avg"):
"""The forward pass for convblocks in CNN14
Arguments:
----------
x : torch.Tensor
input tensor with shape B x C_in x D1 x D2
where B = Batchsize
C_in = Number of input channel
D1 = Dimensionality of the first spatial dim
D2 = Dimensionality of the second spatial dim
pool_size : tuple with integer values
Amount of pooling at each layer
pool_type : str in ['max', 'avg', 'avg+max']
The type of pooling
"""
x = F.relu_(self.norm1(self.conv1(x)))
x = F.relu_(self.norm2(self.conv2(x)))
if pool_type == "max":
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == "avg":
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == "avg+max":
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception("Incorrect pooling type!")
return x
class Cnn14(nn.Module):
"""This class implements the Cnn14 model from https://arxiv.org/abs/1912.10211
Arguments
---------
mel_bins : int
Number of mel frequency bins in the input
emb_dim : int
The dimensionality of the output embeddings
norm_type: str in ['bn', 'in', 'ln']
The type of normalization
return_reps: bool (default=False)
If True the model returns intermediate representations as well for interpretation
Example:
--------
>>> cnn14 = Cnn14(120, 256)
>>> x = torch.rand(3, 400, 120)
>>> h = cnn14.forward(x)
>>> print(h.shape)
torch.Size([3, 1, 256])
"""
def __init__(self, mel_bins, emb_dim, norm_type="bn", return_reps=False):
super(Cnn14, self).__init__()
self.return_reps = return_reps
self.norm_type = norm_type
if norm_type == "bn":
self.norm0 = nn.BatchNorm2d(mel_bins)
elif norm_type == "in":
self.norm0 = nn.InstanceNorm2d(
mel_bins, affine=True, track_running_stats=True
)
elif norm_type == "ln":
self.norm0 = nn.GroupNorm(1, mel_bins)
else:
raise ValueError("Unknown norm type {}".format(norm_type))
self.conv_block1 = ConvBlock(
in_channels=1, out_channels=64, norm_type=norm_type
)
self.conv_block2 = ConvBlock(
in_channels=64, out_channels=128, norm_type=norm_type
)
self.conv_block3 = ConvBlock(
in_channels=128, out_channels=256, norm_type=norm_type
)
self.conv_block4 = ConvBlock(
in_channels=256, out_channels=512, norm_type=norm_type
)
self.conv_block5 = ConvBlock(
in_channels=512, out_channels=1024, norm_type=norm_type
)
self.conv_block6 = ConvBlock(
in_channels=1024, out_channels=emb_dim, norm_type=norm_type
)
self.init_weight()
def init_weight(self):
"""
Initializes the model batch norm layer
"""
init_bn(self.norm0)
def forward(self, x):
"""
The forward pass for the CNN14 encoder
Arguments:
----------
x : torch.Tensor
input tensor with shape B x C_in x D1 x D2
where B = Batchsize
C_in = Number of input channel
D1 = Dimensionality of the first spatial dim
D2 = Dimensionality of the second spatial dim
"""
if x.dim() == 3:
x = x.unsqueeze(1)
x = x.transpose(1, 3)
x = self.norm0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x3_out = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x3_out, p=0.2, training=self.training)
x2_out = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x2_out, p=0.2, training=self.training)
x1_out = self.conv_block6(x, pool_size=(1, 1), pool_type="avg")
x = F.dropout(x1_out, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
# [B x 1 x emb_dim]
if not self.return_reps:
return x.unsqueeze(1)
return x.unsqueeze(1), (x1_out, x2_out, x3_out)
| 7,429 | 30.483051 | 89 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/ECAPA_TDNN.py | """A popular speaker recognition and diarization model.
Authors
* Hwidong Na 2020
"""
# import os
import torch # noqa: F401
import torch.nn as nn
import torch.nn.functional as F
from speechbrain.dataio.dataio import length_to_mask
from speechbrain.nnet.CNN import Conv1d as _Conv1d
from speechbrain.nnet.normalization import BatchNorm1d as _BatchNorm1d
from speechbrain.nnet.linear import Linear
# Skip transpose as much as possible for efficiency
class Conv1d(_Conv1d):
"""1D convolution. Skip transpose is used to improve efficiency."""
def __init__(self, *args, **kwargs):
super().__init__(skip_transpose=True, *args, **kwargs)
class BatchNorm1d(_BatchNorm1d):
"""1D batch normalization. Skip transpose is used to improve efficiency."""
def __init__(self, *args, **kwargs):
super().__init__(skip_transpose=True, *args, **kwargs)
class TDNNBlock(nn.Module):
"""An implementation of TDNN.
Arguments
----------
in_channels : int
Number of input channels.
out_channels : int
The number of output channels.
kernel_size : int
The kernel size of the TDNN blocks.
dilation : int
The dilation of the TDNN block.
activation : torch class
A class for constructing the activation layers.
groups: int
The groups size of the TDNN blocks.
Example
-------
>>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2)
>>> layer = TDNNBlock(64, 64, kernel_size=3, dilation=1)
>>> out_tensor = layer(inp_tensor).transpose(1, 2)
>>> out_tensor.shape
torch.Size([8, 120, 64])
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
dilation,
activation=nn.ReLU,
groups=1,
):
super(TDNNBlock, self).__init__()
self.conv = Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
groups=groups,
)
self.activation = activation()
self.norm = BatchNorm1d(input_size=out_channels)
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
return self.norm(self.activation(self.conv(x)))
class Res2NetBlock(torch.nn.Module):
"""An implementation of Res2NetBlock w/ dilation.
Arguments
---------
in_channels : int
The number of channels expected in the input.
out_channels : int
The number of output channels.
scale : int
The scale of the Res2Net block.
kernel_size: int
The kernel size of the Res2Net block.
dilation : int
The dilation of the Res2Net block.
Example
-------
>>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2)
>>> layer = Res2NetBlock(64, 64, scale=4, dilation=3)
>>> out_tensor = layer(inp_tensor).transpose(1, 2)
>>> out_tensor.shape
torch.Size([8, 120, 64])
"""
def __init__(
self, in_channels, out_channels, scale=8, kernel_size=3, dilation=1
):
super(Res2NetBlock, self).__init__()
assert in_channels % scale == 0
assert out_channels % scale == 0
in_channel = in_channels // scale
hidden_channel = out_channels // scale
self.blocks = nn.ModuleList(
[
TDNNBlock(
in_channel,
hidden_channel,
kernel_size=kernel_size,
dilation=dilation,
)
for i in range(scale - 1)
]
)
self.scale = scale
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
y = []
for i, x_i in enumerate(torch.chunk(x, self.scale, dim=1)):
if i == 0:
y_i = x_i
elif i == 1:
y_i = self.blocks[i - 1](x_i)
else:
y_i = self.blocks[i - 1](x_i + y_i)
y.append(y_i)
y = torch.cat(y, dim=1)
return y
class SEBlock(nn.Module):
"""An implementation of squeeze-and-excitation block.
Arguments
---------
in_channels : int
The number of input channels.
se_channels : int
The number of output channels after squeeze.
out_channels : int
The number of output channels.
Example
-------
>>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2)
>>> se_layer = SEBlock(64, 16, 64)
>>> lengths = torch.rand((8,))
>>> out_tensor = se_layer(inp_tensor, lengths).transpose(1, 2)
>>> out_tensor.shape
torch.Size([8, 120, 64])
"""
def __init__(self, in_channels, se_channels, out_channels):
super(SEBlock, self).__init__()
self.conv1 = Conv1d(
in_channels=in_channels, out_channels=se_channels, kernel_size=1
)
self.relu = torch.nn.ReLU(inplace=True)
self.conv2 = Conv1d(
in_channels=se_channels, out_channels=out_channels, kernel_size=1
)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x, lengths=None):
""" Processes the input tensor x and returns an output tensor."""
L = x.shape[-1]
if lengths is not None:
mask = length_to_mask(lengths * L, max_len=L, device=x.device)
mask = mask.unsqueeze(1)
total = mask.sum(dim=2, keepdim=True)
s = (x * mask).sum(dim=2, keepdim=True) / total
else:
s = x.mean(dim=2, keepdim=True)
s = self.relu(self.conv1(s))
s = self.sigmoid(self.conv2(s))
return s * x
class AttentiveStatisticsPooling(nn.Module):
"""This class implements an attentive statistic pooling layer for each channel.
It returns the concatenated mean and std of the input tensor.
Arguments
---------
channels: int
The number of input channels.
attention_channels: int
The number of attention channels.
Example
-------
>>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2)
>>> asp_layer = AttentiveStatisticsPooling(64)
>>> lengths = torch.rand((8,))
>>> out_tensor = asp_layer(inp_tensor, lengths).transpose(1, 2)
>>> out_tensor.shape
torch.Size([8, 1, 128])
"""
def __init__(self, channels, attention_channels=128, global_context=True):
super().__init__()
self.eps = 1e-12
self.global_context = global_context
if global_context:
self.tdnn = TDNNBlock(channels * 3, attention_channels, 1, 1)
else:
self.tdnn = TDNNBlock(channels, attention_channels, 1, 1)
self.tanh = nn.Tanh()
self.conv = Conv1d(
in_channels=attention_channels, out_channels=channels, kernel_size=1
)
def forward(self, x, lengths=None):
"""Calculates mean and std for a batch (input tensor).
Arguments
---------
x : torch.Tensor
Tensor of shape [N, C, L].
"""
L = x.shape[-1]
def _compute_statistics(x, m, dim=2, eps=self.eps):
mean = (m * x).sum(dim)
std = torch.sqrt(
(m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(eps)
)
return mean, std
if lengths is None:
lengths = torch.ones(x.shape[0], device=x.device)
# Make binary mask of shape [N, 1, L]
mask = length_to_mask(lengths * L, max_len=L, device=x.device)
mask = mask.unsqueeze(1)
# Expand the temporal context of the pooling layer by allowing the
# self-attention to look at global properties of the utterance.
if self.global_context:
# torch.std is unstable for backward computation
# https://github.com/pytorch/pytorch/issues/4320
total = mask.sum(dim=2, keepdim=True).float()
mean, std = _compute_statistics(x, mask / total)
mean = mean.unsqueeze(2).repeat(1, 1, L)
std = std.unsqueeze(2).repeat(1, 1, L)
attn = torch.cat([x, mean, std], dim=1)
else:
attn = x
# Apply layers
attn = self.conv(self.tanh(self.tdnn(attn)))
# Filter out zero-paddings
attn = attn.masked_fill(mask == 0, float("-inf"))
attn = F.softmax(attn, dim=2)
mean, std = _compute_statistics(x, attn)
# Append mean and std of the batch
pooled_stats = torch.cat((mean, std), dim=1)
pooled_stats = pooled_stats.unsqueeze(2)
return pooled_stats
class SERes2NetBlock(nn.Module):
"""An implementation of building block in ECAPA-TDNN, i.e.,
TDNN-Res2Net-TDNN-SEBlock.
Arguments
----------
out_channels: int
The number of output channels.
res2net_scale: int
The scale of the Res2Net block.
kernel_size: int
The kernel size of the TDNN blocks.
dilation: int
The dilation of the Res2Net block.
activation : torch class
A class for constructing the activation layers.
groups: int
Number of blocked connections from input channels to output channels.
Example
-------
>>> x = torch.rand(8, 120, 64).transpose(1, 2)
>>> conv = SERes2NetBlock(64, 64, res2net_scale=4)
>>> out = conv(x).transpose(1, 2)
>>> out.shape
torch.Size([8, 120, 64])
"""
def __init__(
self,
in_channels,
out_channels,
res2net_scale=8,
se_channels=128,
kernel_size=1,
dilation=1,
activation=torch.nn.ReLU,
groups=1,
):
super().__init__()
self.out_channels = out_channels
self.tdnn1 = TDNNBlock(
in_channels,
out_channels,
kernel_size=1,
dilation=1,
activation=activation,
groups=groups,
)
self.res2net_block = Res2NetBlock(
out_channels, out_channels, res2net_scale, kernel_size, dilation
)
self.tdnn2 = TDNNBlock(
out_channels,
out_channels,
kernel_size=1,
dilation=1,
activation=activation,
groups=groups,
)
self.se_block = SEBlock(out_channels, se_channels, out_channels)
self.shortcut = None
if in_channels != out_channels:
self.shortcut = Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
)
def forward(self, x, lengths=None):
""" Processes the input tensor x and returns an output tensor."""
residual = x
if self.shortcut:
residual = self.shortcut(x)
x = self.tdnn1(x)
x = self.res2net_block(x)
x = self.tdnn2(x)
x = self.se_block(x, lengths)
return x + residual
class ECAPA_TDNN(torch.nn.Module):
"""An implementation of the speaker embedding model in a paper.
"ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in
TDNN Based Speaker Verification" (https://arxiv.org/abs/2005.07143).
Arguments
---------
device : str
Device used, e.g., "cpu" or "cuda".
activation : torch class
A class for constructing the activation layers.
channels : list of ints
Output channels for TDNN/SERes2Net layer.
kernel_sizes : list of ints
List of kernel sizes for each layer.
dilations : list of ints
List of dilations for kernels in each layer.
lin_neurons : int
Number of neurons in linear layers.
groups : list of ints
List of groups for kernels in each layer.
Example
-------
>>> input_feats = torch.rand([5, 120, 80])
>>> compute_embedding = ECAPA_TDNN(80, lin_neurons=192)
>>> outputs = compute_embedding(input_feats)
>>> outputs.shape
torch.Size([5, 1, 192])
"""
def __init__(
self,
input_size,
device="cpu",
lin_neurons=192,
activation=torch.nn.ReLU,
channels=[512, 512, 512, 512, 1536],
kernel_sizes=[5, 3, 3, 3, 1],
dilations=[1, 2, 3, 4, 1],
attention_channels=128,
res2net_scale=8,
se_channels=128,
global_context=True,
groups=[1, 1, 1, 1, 1],
):
super().__init__()
assert len(channels) == len(kernel_sizes)
assert len(channels) == len(dilations)
self.channels = channels
self.blocks = nn.ModuleList()
# The initial TDNN layer
self.blocks.append(
TDNNBlock(
input_size,
channels[0],
kernel_sizes[0],
dilations[0],
activation,
groups[0],
)
)
# SE-Res2Net layers
for i in range(1, len(channels) - 1):
self.blocks.append(
SERes2NetBlock(
channels[i - 1],
channels[i],
res2net_scale=res2net_scale,
se_channels=se_channels,
kernel_size=kernel_sizes[i],
dilation=dilations[i],
activation=activation,
groups=groups[i],
)
)
# Multi-layer feature aggregation
self.mfa = TDNNBlock(
channels[-1],
channels[-1],
kernel_sizes[-1],
dilations[-1],
activation,
groups=groups[-1],
)
# Attentive Statistical Pooling
self.asp = AttentiveStatisticsPooling(
channels[-1],
attention_channels=attention_channels,
global_context=global_context,
)
self.asp_bn = BatchNorm1d(input_size=channels[-1] * 2)
# Final linear transformation
self.fc = Conv1d(
in_channels=channels[-1] * 2,
out_channels=lin_neurons,
kernel_size=1,
)
def forward(self, x, lengths=None):
"""Returns the embedding vector.
Arguments
---------
x : torch.Tensor
Tensor of shape (batch, time, channel).
"""
# Minimize transpose for efficiency
x = x.transpose(1, 2)
xl = []
for layer in self.blocks:
try:
x = layer(x, lengths=lengths)
except TypeError:
x = layer(x)
xl.append(x)
# Multi-layer feature aggregation
x = torch.cat(xl[1:], dim=1)
x = self.mfa(x)
# Attentive Statistical Pooling
x = self.asp(x, lengths=lengths)
x = self.asp_bn(x)
# Final linear transformation
x = self.fc(x)
x = x.transpose(1, 2)
return x
class Classifier(torch.nn.Module):
"""This class implements the cosine similarity on the top of features.
Arguments
---------
device : str
Device used, e.g., "cpu" or "cuda".
lin_blocks : int
Number of linear layers.
lin_neurons : int
Number of neurons in linear layers.
out_neurons : int
Number of classes.
Example
-------
>>> classify = Classifier(input_size=2, lin_neurons=2, out_neurons=2)
>>> outputs = torch.tensor([ [1., -1.], [-9., 1.], [0.9, 0.1], [0.1, 0.9] ])
>>> outupts = outputs.unsqueeze(1)
>>> cos = classify(outputs)
>>> (cos < -1.0).long().sum()
tensor(0)
>>> (cos > 1.0).long().sum()
tensor(0)
"""
def __init__(
self,
input_size,
device="cpu",
lin_blocks=0,
lin_neurons=192,
out_neurons=1211,
):
super().__init__()
self.blocks = nn.ModuleList()
for block_index in range(lin_blocks):
self.blocks.extend(
[
_BatchNorm1d(input_size=input_size),
Linear(input_size=input_size, n_neurons=lin_neurons),
]
)
input_size = lin_neurons
# Final Layer
self.weight = nn.Parameter(
torch.FloatTensor(out_neurons, input_size, device=device)
)
nn.init.xavier_uniform_(self.weight)
def forward(self, x):
"""Returns the output probabilities over speakers.
Arguments
---------
x : torch.Tensor
Torch tensor.
"""
for layer in self.blocks:
x = layer(x)
# Need to be normalized
x = F.linear(F.normalize(x.squeeze(1)), F.normalize(self.weight))
return x.unsqueeze(1)
| 16,703 | 28.050435 | 83 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/VanillaNN.py | """Vanilla Neural Network for simple tests.
Authors
* Elena Rastorgueva 2020
"""
import torch
import speechbrain as sb
class VanillaNN(sb.nnet.containers.Sequential):
"""A simple vanilla Deep Neural Network.
Arguments
---------
activation : torch class
A class used for constructing the activation layers.
dnn_blocks : int
The number of linear neural blocks to include.
dnn_neurons : int
The number of neurons in the linear layers.
Example
-------
>>> inputs = torch.rand([10, 120, 60])
>>> model = VanillaNN(input_shape=inputs.shape)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 120, 512])
"""
def __init__(
self,
input_shape,
activation=torch.nn.LeakyReLU,
dnn_blocks=2,
dnn_neurons=512,
):
super().__init__(input_shape=input_shape)
for block_index in range(dnn_blocks):
self.append(
sb.nnet.linear.Linear,
n_neurons=dnn_neurons,
bias=True,
layer_name="linear",
)
self.append(activation(), layer_name="act")
| 1,178 | 23.5625 | 60 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/CRDNN.py | """A combination of Convolutional, Recurrent, and Fully-connected networks.
Authors
* Mirco Ravanelli 2020
* Peter Plantinga 2020
* Ju-Chieh Chou 2020
* Titouan Parcollet 2020
* Abdel 2020
"""
import torch
import speechbrain as sb
class CRDNN(sb.nnet.containers.Sequential):
"""This model is a combination of CNNs, RNNs, and DNNs.
This model expects 3-dimensional input [batch, time, feats] and
by default produces output of the size [batch, time, dnn_neurons].
One exception is if ``using_2d_pooling`` or ``time_pooling`` is True.
In this case, the time dimension will be downsampled.
Arguments
---------
input_size : int
The length of the expected input at the third dimension.
input_shape : tuple
While input_size will suffice, this option can allow putting
CRDNN into a sequential with other classes.
activation : torch class
A class used for constructing the activation layers for CNN and DNN.
dropout : float
Neuron dropout rate as applied to CNN, RNN, and DNN.
cnn_blocks : int
The number of convolutional neural blocks to include.
cnn_channels : list of ints
A list of the number of output channels for each CNN block.
cnn_kernelsize : tuple of ints
The size of the convolutional kernels.
time_pooling : bool
Whether to pool the utterance on the time axis before the RNN.
time_pooling_size : int
The number of elements to pool on the time axis.
time_pooling_stride : int
The number of elements to increment by when iterating the time axis.
using_2d_pooling: bool
Whether using a 2D or 1D pooling after each CNN block.
inter_layer_pooling_size : list of ints
A list of the pooling sizes for each CNN block.
rnn_class : torch class
The type of RNN to use in CRDNN network (LiGRU, LSTM, GRU, RNN)
rnn_layers : int
The number of recurrent RNN layers to include.
rnn_neurons : int
Number of neurons in each layer of the RNN.
rnn_bidirectional : bool
Whether this model will process just forward or in both directions.
rnn_re_init : bool,
If True, an orthogonal initialization will be applied to the recurrent
weights.
dnn_blocks : int
The number of linear neural blocks to include.
dnn_neurons : int
The number of neurons in the linear layers.
use_rnnp: bool
If True, a linear projection layer is added between RNN layers.
projection_dim : int
The number of neurons in the projection layer.
This layer is used to reduce the size of the flattened
representation obtained after the CNN blocks.
Example
-------
>>> inputs = torch.rand([10, 15, 60])
>>> model = CRDNN(input_shape=inputs.shape)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 15, 512])
"""
def __init__(
self,
input_size=None,
input_shape=None,
activation=torch.nn.LeakyReLU,
dropout=0.15,
cnn_blocks=2,
cnn_channels=[128, 256],
cnn_kernelsize=(3, 3),
time_pooling=False,
time_pooling_size=2,
freq_pooling_size=2,
rnn_class=sb.nnet.RNN.LiGRU,
inter_layer_pooling_size=[2, 2],
using_2d_pooling=False,
rnn_layers=4,
rnn_neurons=512,
rnn_bidirectional=True,
rnn_re_init=False,
dnn_blocks=2,
dnn_neurons=512,
projection_dim=-1,
use_rnnp=False,
):
if input_size is None and input_shape is None:
raise ValueError("Must specify one of input_size or input_shape")
if input_shape is None:
input_shape = [None, None, input_size]
super().__init__(input_shape=input_shape)
if cnn_blocks > 0:
self.append(sb.nnet.containers.Sequential, layer_name="CNN")
for block_index in range(cnn_blocks):
self.CNN.append(
CNN_Block,
channels=cnn_channels[block_index],
kernel_size=cnn_kernelsize,
using_2d_pool=using_2d_pooling,
pooling_size=inter_layer_pooling_size[block_index],
activation=activation,
dropout=dropout,
layer_name=f"block_{block_index}",
)
if time_pooling:
self.append(
sb.nnet.pooling.Pooling1d(
pool_type="max",
input_dims=4,
kernel_size=time_pooling_size,
pool_axis=1,
),
layer_name="time_pooling",
)
# This projection helps reducing the number of parameters
# when using large number of CNN filters.
# Large numbers of CNN filters + large features
# often lead to very large flattened layers.
# This layer projects it back to something reasonable.
if projection_dim != -1:
self.append(sb.nnet.containers.Sequential, layer_name="projection")
self.projection.append(
sb.nnet.linear.Linear,
n_neurons=projection_dim,
bias=True,
combine_dims=True,
layer_name="linear",
)
self.projection.append(
sb.nnet.normalization.LayerNorm, layer_name="norm"
)
self.projection.append(activation(), layer_name="act")
if rnn_layers > 0:
if use_rnnp:
self.append(sb.nnet.containers.Sequential, layer_name="RNN")
for _ in range(rnn_layers):
self.append(
rnn_class,
hidden_size=rnn_neurons,
num_layers=1,
bidirectional=rnn_bidirectional,
re_init=rnn_re_init,
)
self.append(
sb.nnet.linear.Linear,
n_neurons=dnn_neurons,
bias=True,
combine_dims=True,
)
self.append(torch.nn.Dropout(p=dropout))
else:
self.append(
rnn_class,
layer_name="RNN",
hidden_size=rnn_neurons,
num_layers=rnn_layers,
dropout=dropout,
bidirectional=rnn_bidirectional,
re_init=rnn_re_init,
)
if dnn_blocks > 0:
self.append(sb.nnet.containers.Sequential, layer_name="DNN")
for block_index in range(dnn_blocks):
self.DNN.append(
DNN_Block,
neurons=dnn_neurons,
activation=activation,
dropout=dropout,
layer_name=f"block_{block_index}",
)
class CNN_Block(sb.nnet.containers.Sequential):
"""CNN Block, based on VGG blocks.
Arguments
---------
input_shape : tuple
Expected shape of the input.
channels : int
Number of convolutional channels for the block.
kernel_size : tuple
Size of the 2d convolutional kernel
activation : torch.nn.Module class
A class to be used for instantiating an activation layer.
using_2d_pool : bool
Whether to use 2d pooling or only 1d pooling.
pooling_size : int
Size of pooling kernel, duplicated for 2d pooling.
dropout : float
Rate to use for dropping channels.
Example
-------
>>> inputs = torch.rand(10, 15, 60)
>>> block = CNN_Block(input_shape=inputs.shape, channels=32)
>>> outputs = block(inputs)
>>> outputs.shape
torch.Size([10, 15, 30, 32])
"""
def __init__(
self,
input_shape,
channels,
kernel_size=[3, 3],
activation=torch.nn.LeakyReLU,
using_2d_pool=False,
pooling_size=2,
dropout=0.15,
):
super().__init__(input_shape=input_shape)
self.append(
sb.nnet.CNN.Conv2d,
out_channels=channels,
kernel_size=kernel_size,
layer_name="conv_1",
)
self.append(sb.nnet.normalization.LayerNorm, layer_name="norm_1")
self.append(activation(), layer_name="act_1")
self.append(
sb.nnet.CNN.Conv2d,
out_channels=channels,
kernel_size=kernel_size,
layer_name="conv_2",
)
self.append(sb.nnet.normalization.LayerNorm, layer_name="norm_2")
self.append(activation(), layer_name="act_2")
if using_2d_pool:
self.append(
sb.nnet.pooling.Pooling2d(
pool_type="max",
kernel_size=(pooling_size, pooling_size),
pool_axis=(1, 2),
),
layer_name="pooling",
)
else:
self.append(
sb.nnet.pooling.Pooling1d(
pool_type="max",
input_dims=4,
kernel_size=pooling_size,
pool_axis=2,
),
layer_name="pooling",
)
self.append(
sb.nnet.dropout.Dropout2d(drop_rate=dropout), layer_name="drop"
)
class DNN_Block(sb.nnet.containers.Sequential):
"""Block for linear layers.
Arguments
---------
input_shape : tuple
Expected shape of the input.
neurons : int
Size of the linear layers.
activation : torch.nn.Module class
Class definition to use for constructing activation layers.
dropout : float
Rate to use for dropping neurons.
Example
-------
>>> inputs = torch.rand(10, 15, 128)
>>> block = DNN_Block(input_shape=inputs.shape, neurons=64)
>>> outputs = block(inputs)
>>> outputs.shape
torch.Size([10, 15, 64])
"""
def __init__(
self, input_shape, neurons, activation=torch.nn.LeakyReLU, dropout=0.15
):
super().__init__(input_shape=input_shape)
self.append(
sb.nnet.linear.Linear, n_neurons=neurons, layer_name="linear",
)
self.append(sb.nnet.normalization.BatchNorm1d, layer_name="norm")
self.append(activation(), layer_name="act")
self.append(torch.nn.Dropout(p=dropout), layer_name="dropout")
| 10,521 | 32.724359 | 79 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/HifiGAN.py | """
Neural network modules for the HiFi-GAN: Generative Adversarial Networks for
Efficient and High Fidelity Speech Synthesis
For more details: https://arxiv.org/pdf/2010.05646.pdf
Authors
* Duret Jarod 2021
* Yingzhi WANG 2022
"""
# Adapted from https://github.com/jik876/hifi-gan/ and https://github.com/coqui-ai/TTS/
# MIT License
# Copyright (c) 2020 Jungil Kong
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn.functional as F
import torch.nn as nn
from speechbrain.nnet.CNN import Conv1d, ConvTranspose1d, Conv2d
from torchaudio import transforms
LRELU_SLOPE = 0.1
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""Dynamique range compression for audio signals
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def mel_spectogram(
sample_rate,
hop_length,
win_length,
n_fft,
n_mels,
f_min,
f_max,
power,
normalized,
norm,
mel_scale,
compression,
audio,
):
"""calculates MelSpectrogram for a raw audio signal
Arguments
---------
sample_rate : int
Sample rate of audio signal.
hop_length : int
Length of hop between STFT windows.
win_length : int
Window size.
n_fft : int
Size of FFT.
n_mels : int
Number of mel filterbanks.
f_min : float
Minimum frequency.
f_max : float
Maximum frequency.
power : float
Exponent for the magnitude spectrogram.
normalized : bool
Whether to normalize by magnitude after stft.
norm : str or None
If "slaney", divide the triangular mel weights by the width of the mel band
mel_scale : str
Scale to use: "htk" or "slaney".
compression : bool
whether to do dynamic range compression
audio : torch.tensor
input audio signal
"""
audio_to_mel = transforms.MelSpectrogram(
sample_rate=sample_rate,
hop_length=hop_length,
win_length=win_length,
n_fft=n_fft,
n_mels=n_mels,
f_min=f_min,
f_max=f_max,
power=power,
normalized=normalized,
norm=norm,
mel_scale=mel_scale,
).to(audio.device)
mel = audio_to_mel(audio)
if compression:
mel = dynamic_range_compression(mel)
return mel
##################################
# Generator
##################################
class ResBlock1(torch.nn.Module):
"""
Residual Block Type 1, which has 3 convolutional layers in each convolution block.
Arguments
---------
channels : int
number of hidden channels for the convolutional layers.
kernel_size : int
size of the convolution filter in each layer.
dilations : list
list of dilation value for each conv layer in a block.
"""
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.convs1 = nn.ModuleList(
[
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation[0],
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation[1],
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation[2],
padding="same",
skip_transpose=True,
weight_norm=True,
),
]
)
self.convs2 = nn.ModuleList(
[
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=1,
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=1,
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=1,
padding="same",
skip_transpose=True,
weight_norm=True,
),
]
)
def forward(self, x):
"""Returns the output of ResBlock1
Arguments
---------
x : torch.Tensor (batch, channel, time)
input tensor.
"""
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
"""This functions removes weight normalization during inference.
"""
for l in self.convs1:
l.remove_weight_norm()
for l in self.convs2:
l.remove_weight_norm()
class ResBlock2(torch.nn.Module):
"""
Residual Block Type 2, which has 2 convolutional layers in each convolution block.
Arguments
---------
channels : int
number of hidden channels for the convolutional layers.
kernel_size : int
size of the convolution filter in each layer.
dilations : list
list of dilation value for each conv layer in a block.
"""
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
super().__init__()
self.convs = nn.ModuleList(
[
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation[0],
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation[1],
padding="same",
skip_transpose=True,
weight_norm=True,
),
]
)
def forward(self, x):
"""Returns the output of ResBlock1
Arguments
---------
x : torch.Tensor (batch, channel, time)
input tensor.
"""
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
"""This functions removes weight normalization during inference.
"""
for l in self.convs:
l.remove_weight_norm()
class HifiganGenerator(torch.nn.Module):
"""HiFiGAN Generator with Multi-Receptive Field Fusion (MRF)
Arguments
---------
in_channels : int
number of input tensor channels.
out_channels : int
number of output tensor channels.
resblock_type : str
type of the `ResBlock`. '1' or '2'.
resblock_dilation_sizes : List[List[int]]
list of dilation values in each layer of a `ResBlock`.
resblock_kernel_sizes : List[int]
list of kernel sizes for each `ResBlock`.
upsample_kernel_sizes : List[int]
list of kernel sizes for each transposed convolution.
upsample_initial_channel : int
number of channels for the first upsampling layer. This is divided by 2
for each consecutive upsampling layer.
upsample_factors : List[int]
upsampling factors (stride) for each upsampling layer.
inference_padding : int
constant padding applied to the input at inference time. Defaults to 5.
Example
-------
>>> inp_tensor = torch.rand([4, 80, 33])
>>> hifigan_generator= HifiganGenerator(
... in_channels = 80,
... out_channels = 1,
... resblock_type = "1",
... resblock_dilation_sizes = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
... resblock_kernel_sizes = [3, 7, 11],
... upsample_kernel_sizes = [16, 16, 4, 4],
... upsample_initial_channel = 512,
... upsample_factors = [8, 8, 2, 2],
... )
>>> out_tensor = hifigan_generator(inp_tensor)
>>> out_tensor.shape
torch.Size([4, 1, 8448])
"""
def __init__(
self,
in_channels,
out_channels,
resblock_type,
resblock_dilation_sizes,
resblock_kernel_sizes,
upsample_kernel_sizes,
upsample_initial_channel,
upsample_factors,
inference_padding=5,
cond_channels=0,
conv_post_bias=True,
):
super().__init__()
self.inference_padding = inference_padding
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_factors)
# initial upsampling layers
self.conv_pre = Conv1d(
in_channels=in_channels,
out_channels=upsample_initial_channel,
kernel_size=7,
stride=1,
padding="same",
skip_transpose=True,
weight_norm=True,
)
resblock = ResBlock1 if resblock_type == "1" else ResBlock2
# upsampling layers
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(
zip(upsample_factors, upsample_kernel_sizes)
):
self.ups.append(
ConvTranspose1d(
in_channels=upsample_initial_channel // (2 ** i),
out_channels=upsample_initial_channel // (2 ** (i + 1)),
kernel_size=k,
stride=u,
padding=(k - u) // 2,
skip_transpose=True,
weight_norm=True,
)
)
# MRF blocks
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for _, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)
):
self.resblocks.append(resblock(ch, k, d))
# post convolution layer
self.conv_post = Conv1d(
in_channels=ch,
out_channels=1,
kernel_size=7,
stride=1,
padding="same",
skip_transpose=True,
bias=conv_post_bias,
weight_norm=True,
)
if cond_channels > 0:
self.cond_layer = Conv1d(
in_channels=cond_channels,
out_channels=upsample_initial_channel,
kernel_size=1,
)
def forward(self, x, g=None):
"""
Arguments
---------
x : torch.Tensor (batch, channel, time)
feature input tensor.
g : torch.Tensor (batch, 1, time)
global conditioning input tensor.
"""
o = self.conv_pre(x)
if hasattr(self, "cond_layer"):
o = o + self.cond_layer(g)
for i in range(self.num_upsamples):
o = F.leaky_relu(o, LRELU_SLOPE)
o = self.ups[i](o)
z_sum = None
for j in range(self.num_kernels):
if z_sum is None:
z_sum = self.resblocks[i * self.num_kernels + j](o)
else:
z_sum += self.resblocks[i * self.num_kernels + j](o)
o = z_sum / self.num_kernels
o = F.leaky_relu(o)
o = self.conv_post(o)
o = torch.tanh(o)
return o
def remove_weight_norm(self):
"""This functions removes weight normalization during inference.
"""
for l in self.ups:
l.remove_weight_norm()
for l in self.resblocks:
l.remove_weight_norm()
self.conv_pre.remove_weight_norm()
self.conv_post.remove_weight_norm()
@torch.no_grad()
def inference(self, c):
"""The inference function performs a padding and runs the forward method.
Arguments
---------
x : torch.Tensor (batch, channel, time)
feature input tensor.
"""
c = torch.nn.functional.pad(
c, (self.inference_padding, self.inference_padding), "replicate"
)
return self.forward(c)
##################################
# DISCRIMINATOR
##################################
class DiscriminatorP(torch.nn.Module):
"""HiFiGAN Periodic Discriminator
Takes every Pth value from the input waveform and applied a stack of convoluations.
Note:
if period is 2
waveform = [1, 2, 3, 4, 5, 6 ...] --> [1, 3, 5 ... ] --> convs -> score, feat
Arguments
---------
x : torch.Tensor (batch, 1, time)
input waveform.
"""
def __init__(self, period, kernel_size=5, stride=3):
super().__init__()
self.period = period
self.convs = nn.ModuleList(
[
Conv2d(
in_channels=1,
out_channels=32,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv2d(
in_channels=32,
out_channels=128,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv2d(
in_channels=128,
out_channels=512,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv2d(
in_channels=512,
out_channels=1024,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding="same",
skip_transpose=True,
weight_norm=True,
),
Conv2d(
in_channels=1024,
out_channels=1024,
kernel_size=(kernel_size, 1),
stride=1,
padding="same",
skip_transpose=True,
weight_norm=True,
),
]
)
self.conv_post = Conv2d(
in_channels=1024,
out_channels=1,
kernel_size=(3, 1),
stride=1,
padding="same",
skip_transpose=True,
weight_norm=True,
)
def forward(self, x):
"""
Arguments
---------
x : torch.Tensor (batch, 1, time)
input waveform.
"""
feat = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
feat.append(x)
x = self.conv_post(x)
feat.append(x)
x = torch.flatten(x, 1, -1)
return x, feat
class MultiPeriodDiscriminator(torch.nn.Module):
"""HiFiGAN Multi-Period Discriminator (MPD)
Wrapper for the `PeriodDiscriminator` to apply it in different periods.
Periods are suggested to be prime numbers to reduce the overlap between each discriminator.
"""
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList(
[
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
]
)
def forward(self, x):
"""Returns Multi-Period Discriminator scores and features
Arguments
---------
x : torch.Tensor (batch, 1, time)
input waveform.
"""
scores = []
feats = []
for _, d in enumerate(self.discriminators):
score, feat = d(x)
scores.append(score)
feats.append(feat)
return scores, feats
class DiscriminatorS(torch.nn.Module):
"""HiFiGAN Scale Discriminator.
It is similar to `MelganDiscriminator` but with a specific architecture explained in the paper.
SpeechBrain CNN wrappers are not used here beacause spectral_norm is not often used
Arguments
---------
use_spectral_norm : bool
if `True` switch to spectral norm instead of weight norm.
"""
def __init__(self, use_spectral_norm=False):
super().__init__()
norm_f = (
nn.utils.spectral_norm
if use_spectral_norm
else nn.utils.weight_norm
)
self.convs = nn.ModuleList(
[
norm_f(nn.Conv1d(1, 128, 15, 1, padding=7)),
norm_f(nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)),
]
)
self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
"""
Arguments
---------
x : torch.Tensor (batch, 1, time)
input waveform.
"""
feat = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
feat.append(x)
x = self.conv_post(x)
feat.append(x)
x = torch.flatten(x, 1, -1)
return x, feat
class MultiScaleDiscriminator(torch.nn.Module):
"""HiFiGAN Multi-Scale Discriminator.
Similar to MultiScaleMelganDiscriminator but specially tailored for HiFiGAN as in the paper.
"""
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList(
[
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
]
)
self.meanpools = nn.ModuleList(
[nn.AvgPool1d(4, 2, padding=2), nn.AvgPool1d(4, 2, padding=2)]
)
def forward(self, x):
"""
Arguments
---------
x : torch.Tensor (batch, 1, time)
input waveform.
"""
scores = []
feats = []
for i, d in enumerate(self.discriminators):
if i != 0:
x = self.meanpools[i - 1](x)
score, feat = d(x)
scores.append(score)
feats.append(feat)
return scores, feats
class HifiganDiscriminator(nn.Module):
"""HiFiGAN discriminator wrapping MPD and MSD.
Example
-------
>>> inp_tensor = torch.rand([4, 1, 8192])
>>> hifigan_discriminator= HifiganDiscriminator()
>>> scores, feats = hifigan_discriminator(inp_tensor)
>>> len(scores)
8
>>> len(feats)
8
"""
def __init__(self):
super().__init__()
self.mpd = MultiPeriodDiscriminator()
self.msd = MultiScaleDiscriminator()
def forward(self, x):
"""Returns list of list of features from each layer of each discriminator.
Arguments
---------
x : torch.Tensor
input waveform.
"""
scores, feats = self.mpd(x)
scores_, feats_ = self.msd(x)
return scores + scores_, feats + feats_
#################################
# GENERATOR LOSSES
#################################
def stft(x, n_fft, hop_length, win_length, window_fn="hann_window"):
"""computes the Fourier transform of short overlapping windows of the input
"""
o = torch.stft(x.squeeze(1), n_fft, hop_length, win_length,)
M = o[:, :, :, 0]
P = o[:, :, :, 1]
S = torch.sqrt(torch.clamp(M ** 2 + P ** 2, min=1e-8))
return S
class STFTLoss(nn.Module):
"""STFT loss. Input generate and real waveforms are converted
to spectrograms compared with L1 and Spectral convergence losses.
It is from ParallelWaveGAN paper https://arxiv.org/pdf/1910.11480.pdf
Arguments
---------
n_fft : int
size of Fourier transform.
hop_length : int
the distance between neighboring sliding window frames.
win_length : int
the size of window frame and STFT filter.
"""
def __init__(self, n_fft, hop_length, win_length):
super().__init__()
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
def forward(self, y_hat, y):
"""Returns magnitude loss and spectral convergence loss
Arguments
---------
y_hat : torch.tensor
generated waveform tensor
y : torch.tensor
real waveform tensor
"""
y_hat_M = stft(y_hat, self.n_fft, self.hop_length, self.win_length)
y_M = stft(y, self.n_fft, self.hop_length, self.win_length)
# magnitude loss
loss_mag = F.l1_loss(torch.log(y_M), torch.log(y_hat_M))
# spectral convergence loss
loss_sc = torch.norm(y_M - y_hat_M, p="fro") / torch.norm(y_M, p="fro")
return loss_mag, loss_sc
class MultiScaleSTFTLoss(torch.nn.Module):
"""Multi-scale STFT loss. Input generate and real waveforms are converted
to spectrograms compared with L1 and Spectral convergence losses.
It is from ParallelWaveGAN paper https://arxiv.org/pdf/1910.11480.pdf"""
def __init__(
self,
n_ffts=(1024, 2048, 512),
hop_lengths=(120, 240, 50),
win_lengths=(600, 1200, 240),
):
super().__init__()
self.loss_funcs = torch.nn.ModuleList()
for n_fft, hop_length, win_length in zip(
n_ffts, hop_lengths, win_lengths
):
self.loss_funcs.append(STFTLoss(n_fft, hop_length, win_length))
def forward(self, y_hat, y):
"""Returns multi-scale magnitude loss and spectral convergence loss
Arguments
---------
y_hat : torch.tensor
generated waveform tensor
y : torch.tensor
real waveform tensor
"""
N = len(self.loss_funcs)
loss_sc = 0
loss_mag = 0
for f in self.loss_funcs:
lm, lsc = f(y_hat, y)
loss_mag += lm
loss_sc += lsc
loss_sc /= N
loss_mag /= N
return loss_mag, loss_sc
class L1SpecLoss(nn.Module):
"""L1 Loss over Spectrograms as described in HiFiGAN paper https://arxiv.org/pdf/2010.05646.pdf
Note : L1 loss helps leaning details compared with L2 loss
Arguments
---------
sample_rate : int
Sample rate of audio signal.
hop_length : int
Length of hop between STFT windows.
win_length : int
Window size.
n_fft : int
Size of FFT.
n_mels : int
Number of mel filterbanks.
f_min : float
Minimum frequency.
f_max : float
Maximum frequency.
power : float
Exponent for the magnitude spectrogram.
normalized : bool
Whether to normalize by magnitude after stft.
norm : str or None
If "slaney", divide the triangular mel weights by the width of the mel band
mel_scale : str
Scale to use: "htk" or "slaney".
compression : bool
whether to do dynamic range compression
"""
def __init__(
self,
sample_rate=22050,
hop_length=256,
win_length=24,
n_mel_channels=80,
n_fft=1024,
n_stft=1024 // 2 + 1,
mel_fmin=0.0,
mel_fmax=8000.0,
mel_normalized=False,
power=1.0,
norm="slaney",
mel_scale="slaney",
dynamic_range_compression=True,
):
super().__init__()
self.sample_rate = sample_rate
self.hop_length = hop_length
self.win_length = win_length
self.n_mel_channels = n_mel_channels
self.n_fft = n_fft
self.n_stft = n_fft // 2 + 1
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.mel_normalized = mel_normalized
self.power = power
self.norm = norm
self.mel_scale = mel_scale
self.dynamic_range_compression = dynamic_range_compression
def forward(self, y_hat, y):
"""Returns L1 Loss over Spectrograms
Arguments
---------
y_hat : torch.tensor
generated waveform tensor
y : torch.tensor
real waveform tensor
"""
y_hat_M = mel_spectogram(
self.sample_rate,
self.hop_length,
self.win_length,
self.n_fft,
self.n_mel_channels,
self.mel_fmin,
self.mel_fmax,
self.power,
self.mel_normalized,
self.norm,
self.mel_scale,
self.dynamic_range_compression,
y_hat,
)
# y_M = mel_spectogram(self.mel_params, y)
y_M = mel_spectogram(
self.sample_rate,
self.hop_length,
self.win_length,
self.n_fft,
self.n_mel_channels,
self.mel_fmin,
self.mel_fmax,
self.power,
self.mel_normalized,
self.norm,
self.mel_scale,
self.dynamic_range_compression,
y,
)
# magnitude loss
# loss_mag = F.l1_loss(torch.log(y_M), torch.log(y_hat_M))
loss_mag = F.l1_loss(y_M, y_hat_M)
return loss_mag
class MSEGLoss(nn.Module):
"""Mean Squared Generator Loss
The generator is trained to fake the discriminator by updating the sample quality
to be classified to a value almost equal to 1.
"""
def forward(self, score_fake):
"""Returns Generator GAN loss
Arguments
---------
score_fake : list
discriminator scores of generated waveforms D(G(s))
"""
loss_fake = F.mse_loss(
score_fake, score_fake.new_ones(score_fake.shape)
)
return loss_fake
class MelganFeatureLoss(nn.Module):
"""Calculates the feature matching loss, which is a learned similarity metric measured by
the difference in features of the discriminator between a ground truth sample and a generated
sample (Larsen et al., 2016, Kumar et al., 2019).
"""
def __init__(self,):
super().__init__()
self.loss_func = nn.L1Loss()
# pylint: disable=no-self-use
def forward(self, fake_feats, real_feats):
"""Returns feature matching loss
Arguments
---------
fake_feats : list
discriminator features of generated waveforms
real_feats : list
discriminator features of groundtruth waveforms
"""
loss_feats = 0
num_feats = 0
for idx, _ in enumerate(fake_feats):
for fake_feat, real_feat in zip(fake_feats[idx], real_feats[idx]):
loss_feats += self.loss_func(fake_feat, real_feat)
num_feats += 1
loss_feats = loss_feats / num_feats
return loss_feats
##################################
# DISCRIMINATOR LOSSES
##################################
class MSEDLoss(nn.Module):
"""Mean Squared Discriminator Loss
The discriminator is trained to classify ground truth samples to 1,
and the samples synthesized from the generator to 0.
"""
def __init__(self,):
super().__init__()
self.loss_func = nn.MSELoss()
def forward(self, score_fake, score_real):
"""Returns Discriminator GAN losses
Arguments
---------
score_fake : list
discriminator scores of generated waveforms
score_real : list
discriminator scores of groundtruth waveforms
"""
loss_real = self.loss_func(
score_real, score_real.new_ones(score_real.shape)
)
loss_fake = self.loss_func(
score_fake, score_fake.new_zeros(score_fake.shape)
)
loss_d = loss_real + loss_fake
return loss_d, loss_real, loss_fake
#####################################
# LOSS WRAPPERS
#####################################
def _apply_G_adv_loss(scores_fake, loss_func):
"""Compute Generator adversarial loss function
and normalize values
Arguments
---------
scores_fake : list
discriminator scores of generated waveforms
loss_func : object
object of target generator loss
"""
adv_loss = 0
if isinstance(scores_fake, list):
for score_fake in scores_fake:
fake_loss = loss_func(score_fake)
adv_loss += fake_loss
# adv_loss /= len(scores_fake)
else:
fake_loss = loss_func(scores_fake)
adv_loss = fake_loss
return adv_loss
def _apply_D_loss(scores_fake, scores_real, loss_func):
"""Compute Discriminator losses and normalize loss values
Arguments
---------
scores_fake : list
discriminator scores of generated waveforms
scores_real : list
discriminator scores of groundtruth waveforms
loss_func : object
object of target discriminator loss
"""
loss = 0
real_loss = 0
fake_loss = 0
if isinstance(scores_fake, list):
# multi-scale loss
for score_fake, score_real in zip(scores_fake, scores_real):
total_loss, real_loss, fake_loss = loss_func(
score_fake=score_fake, score_real=score_real
)
loss += total_loss
real_loss += real_loss
fake_loss += fake_loss
# normalize loss values with number of scales (discriminators)
# loss /= len(scores_fake)
# real_loss /= len(scores_real)
# fake_loss /= len(scores_fake)
else:
# single scale loss
total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real)
loss = total_loss
return loss, real_loss, fake_loss
##################################
# MODEL LOSSES
##################################
class GeneratorLoss(nn.Module):
"""Creates a summary of generator losses
and applies weights for different losses
Arguments
---------
stft_loss : object
object of stft loss
stft_loss_weight : float
weight of STFT loss
mseg_loss : object
object of mseg loss
mseg_loss_weight : float
weight of mseg loss
feat_match_loss : object
object of feature match loss
feat_match_loss_weight : float
weight of feature match loss
l1_spec_loss : object
object of L1 spectrogram loss
l1_spec_loss_weight : float
weight of L1 spectrogram loss
"""
def __init__(
self,
stft_loss=None,
stft_loss_weight=0,
mseg_loss=None,
mseg_loss_weight=0,
feat_match_loss=None,
feat_match_loss_weight=0,
l1_spec_loss=None,
l1_spec_loss_weight=0,
):
super().__init__()
self.stft_loss = stft_loss
self.stft_loss_weight = stft_loss_weight
self.mseg_loss = mseg_loss
self.mseg_loss_weight = mseg_loss_weight
self.feat_match_loss = feat_match_loss
self.feat_match_loss_weight = feat_match_loss_weight
self.l1_spec_loss = l1_spec_loss
self.l1_spec_loss_weight = l1_spec_loss_weight
def forward(
self,
y_hat=None,
y=None,
scores_fake=None,
feats_fake=None,
feats_real=None,
):
"""Returns a dictionary of generator losses and applies weights
Arguments
---------
y_hat : torch.tensor
generated waveform tensor
y : torch.tensor
real waveform tensor
scores_fake : list
discriminator scores of generated waveforms
feats_fake : list
discriminator features of generated waveforms
feats_real : list
discriminator features of groundtruth waveforms
"""
gen_loss = 0
adv_loss = 0
loss = {}
# STFT Loss
if self.stft_loss:
stft_loss_mg, stft_loss_sc = self.stft_loss(
y_hat[:, :, : y.size(2)].squeeze(1), y.squeeze(1)
)
loss["G_stft_loss_mg"] = stft_loss_mg
loss["G_stft_loss_sc"] = stft_loss_sc
gen_loss = gen_loss + self.stft_loss_weight * (
stft_loss_mg + stft_loss_sc
)
# L1 Spec loss
if self.l1_spec_loss:
l1_spec_loss = self.l1_spec_loss(y_hat, y)
loss["G_l1_spec_loss"] = l1_spec_loss
gen_loss = gen_loss + self.l1_spec_loss_weight * l1_spec_loss
# multiscale MSE adversarial loss
if self.mseg_loss and scores_fake is not None:
mse_fake_loss = _apply_G_adv_loss(scores_fake, self.mseg_loss)
loss["G_mse_fake_loss"] = mse_fake_loss
adv_loss = adv_loss + self.mseg_loss_weight * mse_fake_loss
# Feature Matching Loss
if self.feat_match_loss and feats_fake is not None:
feat_match_loss = self.feat_match_loss(feats_fake, feats_real)
loss["G_feat_match_loss"] = feat_match_loss
adv_loss = adv_loss + self.feat_match_loss_weight * feat_match_loss
loss["G_loss"] = gen_loss + adv_loss
loss["G_gen_loss"] = gen_loss
loss["G_adv_loss"] = adv_loss
return loss
class DiscriminatorLoss(nn.Module):
"""Creates a summary of discriminator losses
Arguments
---------
msed_loss : object
object of MSE discriminator loss
"""
def __init__(self, msed_loss=None):
super().__init__()
self.msed_loss = msed_loss
def forward(self, scores_fake, scores_real):
"""Returns a dictionary of discriminator losses
Arguments
---------
scores_fake : list
discriminator scores of generated waveforms
scores_real : list
discriminator scores of groundtruth waveforms
"""
disc_loss = 0
loss = {}
if self.msed_loss:
mse_D_loss, mse_D_real_loss, mse_D_fake_loss = _apply_D_loss(
scores_fake=scores_fake,
scores_real=scores_real,
loss_func=self.msed_loss,
)
loss["D_mse_gan_loss"] = mse_D_loss
loss["D_mse_gan_real_loss"] = mse_D_real_loss
loss["D_mse_gan_fake_loss"] = mse_D_fake_loss
disc_loss += mse_D_loss
loss["D_loss"] = disc_loss
return loss
| 37,244 | 28.748403 | 99 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/RNNLM.py | """Implementation of a Recurrent Language Model.
Authors
* Mirco Ravanelli 2020
* Peter Plantinga 2020
* Ju-Chieh Chou 2020
* Titouan Parcollet 2020
* Abdel 2020
"""
import torch
from torch import nn
import speechbrain as sb
class RNNLM(nn.Module):
"""This model is a combination of embedding layer, RNN, DNN.
It can be used for RNNLM.
Arguments
---------
output_neurons : int
Number of entries in embedding table, also the number of neurons in
output layer.
embedding_dim : int
Size of embedding vectors (default 128).
activation : torch class
A class used for constructing the activation layers for DNN.
dropout : float
Neuron dropout rate applied to embedding, RNN, and DNN.
rnn_class : torch class
The type of RNN to use in RNNLM network (LiGRU, LSTM, GRU, RNN)
rnn_layers : int
The number of recurrent layers to include.
rnn_neurons : int
Number of neurons in each layer of the RNN.
rnn_re_init : bool
Whether to initialize rnn with orthogonal initialization.
rnn_return_hidden : bool
Whether to return hidden states (default True).
dnn_blocks : int
The number of linear neural blocks to include.
dnn_neurons : int
The number of neurons in the linear layers.
Example
-------
>>> model = RNNLM(output_neurons=5)
>>> inputs = torch.Tensor([[1, 2, 3]])
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([1, 3, 5])
"""
def __init__(
self,
output_neurons,
embedding_dim=128,
activation=torch.nn.LeakyReLU,
dropout=0.15,
rnn_class=sb.nnet.RNN.LSTM,
rnn_layers=2,
rnn_neurons=1024,
rnn_re_init=False,
return_hidden=False,
dnn_blocks=1,
dnn_neurons=512,
):
super().__init__()
self.embedding = sb.nnet.embedding.Embedding(
num_embeddings=output_neurons, embedding_dim=embedding_dim
)
self.dropout = nn.Dropout(p=dropout)
self.rnn = rnn_class(
input_size=embedding_dim,
hidden_size=rnn_neurons,
num_layers=rnn_layers,
dropout=dropout,
re_init=rnn_re_init,
)
self.return_hidden = return_hidden
self.reshape = False
self.dnn = sb.nnet.containers.Sequential(
input_shape=[None, None, rnn_neurons]
)
for block_index in range(dnn_blocks):
self.dnn.append(
sb.nnet.linear.Linear,
n_neurons=dnn_neurons,
bias=True,
layer_name="linear",
)
self.dnn.append(sb.nnet.normalization.LayerNorm, layer_name="norm")
self.dnn.append(activation(), layer_name="act")
self.dnn.append(torch.nn.Dropout(p=dropout), layer_name="dropout")
self.out = sb.nnet.linear.Linear(
input_size=dnn_neurons, n_neurons=output_neurons
)
def forward(self, x, hx=None):
""" Processes the input tensor x and returns an output tensor."""
x = self.embedding(x)
x = self.dropout(x)
# If 2d tensor, add a time-axis
# This is used for inference time
if len(x.shape) == 2:
x = x.unsqueeze(dim=1)
self.reshape = True
x, hidden = self.rnn(x, hx)
x = self.dnn(x)
out = self.out(x)
if self.reshape:
out = out.squeeze(dim=1)
if self.return_hidden:
return out, hidden
else:
return out
| 3,628 | 28.504065 | 79 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/PIQ.py | """This file implements the necessary classes and functions to implement Posthoc Interpretations via Quantization.
Authors
* Cem Subakan 2023
* Francesco Paissan 2023
"""
import torch
import torch.nn as nn
from torch.autograd import Function
def get_irrelevant_regions(labels, K, num_classes, N_shared=5, stage="TRAIN"):
"""This class returns binary matrix that indicates the irrelevant regions in the VQ-dictionary given the labels array
Arguments
---------
labels : torch.tensor
1 dimensional torch.tensor of size [B]
K : int
Number of keys in the dictionary
num_classes : int
Number of possible classes
N_shared : int
Number of shared keys
stage : str
"TRAIN" or else
Example:
--------
>>> labels = torch.Tensor([1, 0, 2])
>>> irrelevant_regions = get_irrelevant_regions(labels, 20, 3, 5)
>>> print(irrelevant_regions.shape)
torch.Size([3, 20])
"""
uniform_mat = torch.round(
torch.linspace(-0.5, num_classes - 0.51, K - N_shared)
).to(labels.device)
uniform_mat = uniform_mat.unsqueeze(0).repeat(labels.shape[0], 1)
labels_expanded = labels.unsqueeze(1).repeat(1, K - N_shared)
irrelevant_regions = uniform_mat != labels_expanded
if stage == "TRAIN":
irrelevant_regions = (
torch.cat(
[
irrelevant_regions,
torch.ones(irrelevant_regions.shape[0], N_shared).to(
labels.device
),
],
dim=1,
)
== 1
)
else:
irrelevant_regions = (
torch.cat(
[
irrelevant_regions,
torch.zeros(irrelevant_regions.shape[0], N_shared).to(
labels.device
),
],
dim=1,
)
== 1
)
return irrelevant_regions
def weights_init(m):
"""
Applies Xavier initialization to network weights.
"""
classname = m.__class__.__name__
if classname.find("Conv") != -1:
try:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
except AttributeError:
print("Skipping initialization of ", classname)
class VectorQuantization(Function):
"""This class defines the forward method for vector quantization. As VQ is not differentiable, it returns a RuntimeError in case `.grad()` is called. Refer to `VectorQuantizationStraightThrough` for a straight_through estimation of the gradient for the VQ operation."""
@staticmethod
def forward(
ctx,
inputs,
codebook,
labels=None,
num_classes=10,
activate_class_partitioning=True,
shared_keys=10,
training=True,
):
"""
Applies VQ to vectors `input` with `codebook` as VQ dictionary.
Arguments
---------
inputs : torch.Tensor
Hidden representations to quantize. Expected shape is `torch.Size([B, W, H, C])`.
codebook : torch.Tensor
VQ-dictionary for quantization. Expected shape of `torch.Size([K, C])` with K dictionary elements.
labels : torch.Tensor
Classification labels. Used to define irrelevant regions and divide the latent space based on predicted class. Shape should be `torch.Size([B])`.
num_classes : int
Number of possible classes
activate_class_partitioning : bool
`True` if latent space should be quantized for different classes.
shared_keys : int
Number of shared keys among classes.
training : bool
`True` if stage is TRAIN.
Returns
--------
Codebook's indices for quantized representation : torch.Tensor
Example:
--------
>>> inputs = torch.ones(3, 14, 25, 256)
>>> codebook = torch.randn(1024, 256)
>>> labels = torch.Tensor([1, 0, 2])
>>> print(VectorQuantization.apply(inputs, codebook, labels).shape)
torch.Size([3, 14, 25])
"""
with torch.no_grad():
embedding_size = codebook.size(1)
inputs_size = inputs.size()
inputs_flatten = inputs.view(-1, embedding_size)
labels_expanded = labels.reshape(-1, 1, 1).repeat(
1, inputs_size[1], inputs_size[2]
)
labels_flatten = labels_expanded.reshape(-1)
irrelevant_regions = get_irrelevant_regions(
labels_flatten,
codebook.shape[0],
num_classes,
N_shared=shared_keys,
stage="TRAIN" if training else "VALID",
)
codebook_sqr = torch.sum(codebook ** 2, dim=1)
inputs_sqr = torch.sum(inputs_flatten ** 2, dim=1, keepdim=True)
# Compute the distances to the codebook
distances = torch.addmm(
codebook_sqr + inputs_sqr,
inputs_flatten,
codebook.t(),
alpha=-2.0,
beta=1.0,
)
# intervene and boost the distances for irrelevant codes
if activate_class_partitioning:
distances[irrelevant_regions] = torch.inf
_, indices_flatten = torch.min(distances, dim=1)
indices = indices_flatten.view(*inputs_size[:-1])
ctx.mark_non_differentiable(indices)
return indices
@staticmethod
def backward(ctx, grad_output):
"""Handles error in case grad() is called on the VQ operation. """
raise RuntimeError(
"Trying to call `.grad()` on graph containing "
"`VectorQuantization`. The function `VectorQuantization` "
"is not differentiable. Use `VectorQuantizationStraightThrough` "
"if you want a straight-through estimator of the gradient."
)
class VectorQuantizationStraightThrough(Function):
"""This class defines the forward method for vector quantization. As VQ is not differentiable, it approximates the gradient of the VQ as in https://arxiv.org/abs/1711.00937."""
@staticmethod
def forward(
ctx,
inputs,
codebook,
labels=None,
num_classes=10,
activate_class_partitioning=True,
shared_keys=10,
training=True,
):
"""
Applies VQ to vectors `input` with `codebook` as VQ dictionary and estimates gradients with a
Straight-Through (id) approximation of the quantization steps.
Arguments
---------
inputs : torch.Tensor
Hidden representations to quantize. Expected shape is `torch.Size([B, W, H, C])`.
codebook : torch.Tensor
VQ-dictionary for quantization. Expected shape of `torch.Size([K, C])` with K dictionary elements.
labels : torch.Tensor
Classification labels. Used to define irrelevant regions and divide the latent space based on predicted class. Shape should be `torch.Size([B])`.
num_classes : int
Number of possible classes
activate_class_partitioning : bool
`True` if latent space should be quantized for different classes.
shared_keys : int
Number of shared keys among classes.
training : bool
`True` if stage is TRAIN.
Returns
--------
Quantized representation and codebook's indices for quantized representation : tuple
Example:
--------
>>> inputs = torch.ones(3, 14, 25, 256)
>>> codebook = torch.randn(1024, 256)
>>> labels = torch.Tensor([1, 0, 2])
>>> quant, quant_ind = VectorQuantizationStraightThrough.apply(inputs, codebook, labels)
>>> print(quant.shape, quant_ind.shape)
torch.Size([3, 14, 25, 256]) torch.Size([1050])
"""
indices = VectorQuantization.apply(
inputs,
codebook,
labels,
num_classes,
activate_class_partitioning,
shared_keys,
training,
)
indices_flatten = indices.view(-1)
ctx.save_for_backward(indices_flatten, codebook)
ctx.mark_non_differentiable(indices_flatten)
codes_flatten = torch.index_select(
codebook, dim=0, index=indices_flatten
)
codes = codes_flatten.view_as(inputs)
return (codes, indices_flatten)
@staticmethod
def backward(
ctx,
grad_output,
grad_indices,
labels=None,
num_classes=None,
activate_class_partitioning=True,
shared_keys=10,
training=True,
):
"""
Estimates gradient assuming vector quantization as identity function. (https://arxiv.org/abs/1711.00937)
"""
grad_inputs, grad_codebook = None, None
if ctx.needs_input_grad[0]:
# Straight-through estimator
grad_inputs = grad_output.clone()
if ctx.needs_input_grad[1]:
# Gradient wrt. the codebook
indices, codebook = ctx.saved_tensors
embedding_size = codebook.size(1)
grad_output_flatten = grad_output.contiguous().view(
-1, embedding_size
)
grad_codebook = torch.zeros_like(codebook)
grad_codebook.index_add_(0, indices, grad_output_flatten)
return (grad_inputs, grad_codebook, None, None, None, None, None)
class Conv2dEncoder_v2(nn.Module):
"""
This class implements a convolutional encoder to extract classification embeddings from logspectra.
Arguments
---------
dim : int
Number of channels of the extracted embeddings.
Returns
--------
Latent representations to feed inside classifier and/or intepreter.
Example:
--------
>>> inputs = torch.ones(3, 431, 513)
>>> model = Conv2dEncoder_v2()
>>> print(model(inputs).shape)
torch.Size([3, 256, 26, 32])
"""
def __init__(self, dim=256):
"""
Extracts embeddings from logspectrograms.
"""
super().__init__()
self.conv1 = nn.Conv2d(1, dim, 4, 2, 1)
self.bn1 = nn.BatchNorm2d(dim)
self.conv2 = nn.Conv2d(dim, dim, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(dim)
self.conv3 = nn.Conv2d(dim, dim, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(dim)
self.conv4 = nn.Conv2d(dim, dim, 4, 2, 1)
self.bn4 = nn.BatchNorm2d(dim)
self.resblock = ResBlockAudio(dim)
self.nonl = nn.ReLU()
def forward(self, x):
"""
Computes forward pass.
Arguments
--------
x : torch.Tensor
Log-power spectrogram. Expected shape `torch.Size([B, T, F])`.
Returns
--------
Embeddings : torch.Tensor
"""
x = x.unsqueeze(1)
h1 = self.conv1(x)
h1 = self.bn1(h1)
h1 = self.nonl(h1)
h2 = self.conv2(h1)
h2 = self.bn2(h2)
h2 = self.nonl(h2)
h3 = self.conv3(h2)
h3 = self.bn3(h3)
h3 = self.nonl(h3)
h4 = self.conv4(h3)
h4 = self.bn4(h4)
h4 = self.nonl(h4)
h4 = self.resblock(h4)
return h4
class ResBlockAudio(nn.Module):
"""This class implements a residual block.
Arguments
--------
dim : int
Input channels of the tensor to process. Matches output channels of the residual block.
Returns
--------
Residual block output : torch.Tensor
Example
--------
>>> res = ResBlockAudio(128)
>>> x = torch.randn(2, 128, 16, 16)
>>> print(x.shape)
torch.Size([2, 128, 16, 16])
"""
def __init__(self, dim):
"""Implements a residual block."""
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1),
nn.BatchNorm2d(dim),
nn.ReLU(True),
nn.Conv2d(dim, dim, 1),
nn.BatchNorm2d(dim),
)
def forward(self, x):
"""Forward step.
Arguments
--------
x : torch.Tensor
Tensor to process. Expected shape is `torch.Size([B, C, H, W])`.
Returns
--------
Residual block output : torch.Tensor
"""
return x + self.block(x)
class VectorQuantizedPSI_Audio(nn.Module):
"""
This class reconstructs log-power spectrograms from classifier's representations.
Arguments
---------
dim : int
Dimensionality of VQ vectors.
K : int
Number of elements of VQ dictionary.
numclasses : int
Number of possible classes
activate_class_partitioning : bool
`True` if latent space should be quantized for different classes.
shared_keys : int
Number of shared keys among classes.
use_adapter : bool
`True` to learn an adapter for classifier's representations.
adapter_reduce_dim : bool
`True` if adapter should compress representations.
Returns
--------
Reconstructed log-power spectrograms, adapted classifier's representations, quantized classifier's representations. : tuple
Example:
--------
>>> psi = VectorQuantizedPSI_Audio(dim=256, K=1024)
>>> x = torch.randn(2, 256, 16, 16)
>>> labels = torch.Tensor([0, 2])
>>> logspectra, hcat, z_q_x = psi(x, labels)
>>> print(logspectra.shape, hcat.shape, z_q_x.shape)
torch.Size([2, 1, 257, 257]) torch.Size([2, 256, 8, 8]) torch.Size([2, 256, 8, 8])
"""
def __init__(
self,
dim=128,
K=512,
numclasses=50,
activate_class_partitioning=True,
shared_keys=0,
use_adapter=True,
adapter_reduce_dim=True,
):
super().__init__()
self.codebook = VQEmbedding(
K,
dim,
numclasses=numclasses,
activate_class_partitioning=activate_class_partitioning,
shared_keys=shared_keys,
)
self.use_adapter = use_adapter
self.adapter_reduce_dim = adapter_reduce_dim
if use_adapter:
self.adapter = ResBlockAudio(dim)
if adapter_reduce_dim:
self.down = nn.Conv2d(dim, dim, 4, (2, 2), 1)
self.up = nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(dim, dim, 3, (2, 2), 1),
nn.ReLU(True),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1),
nn.ReLU(),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1),
nn.ReLU(),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1),
nn.ReLU(),
nn.BatchNorm2d(dim),
nn.ConvTranspose2d(dim, 1, 12, 1, 1),
)
self.apply(weights_init)
def forward(self, hs, labels):
"""
Forward step. Reconstructs log-power based on provided label's keys in VQ dictionary.
Arguments
--------
hs : torch.Tensor
Classifier's representations.
labels : torch.Tensor
Predicted labels for classifier's representations.
Returns
--------
Reconstructed log-power spectrogram, reduced classifier's representations and quantized classifier's representations. : tuple
"""
if self.use_adapter:
hcat = self.adapter(hs)
else:
hcat = hs
if self.adapter_reduce_dim:
hcat = self.down(hcat)
z_q_x_st, z_q_x = self.codebook.straight_through(hcat, labels)
z_q_x_st = self.up(z_q_x_st)
else:
z_q_x_st, z_q_x = self.codebook.straight_through(hcat, labels)
x_tilde = self.decoder(z_q_x_st)
return x_tilde, hcat, z_q_x
class VQEmbedding(nn.Module):
"""
Implements VQ Dictionary. Wraps `VectorQuantization` and `VectorQuantizationStraightThrough`. For more details refer to the specific class.
Arguments
---------
K : int
Number of elements of VQ dictionary.
D : int
Dimensionality of VQ vectors.
num_classes : int
Number of possible classes
activate_class_partitioning : bool
`True` if latent space should be quantized for different classes.
shared_keys : int
Number of shared keys among classes.
"""
def __init__(
self,
K,
D,
numclasses=50,
activate_class_partitioning=True,
shared_keys=0,
):
super().__init__()
self.embedding = nn.Embedding(K, D)
self.embedding.weight.data.uniform_(-1.0 / K, 1.0 / K)
self.numclasses = numclasses
self.activate_class_partitioning = activate_class_partitioning
self.shared_keys = shared_keys
def forward(self, z_e_x, labels=None):
"""
Wraps VectorQuantization. Computes VQ-dictionary indices for input quantization. Note that this forward step is not differentiable.
Arguments
---------
z_e_x : torch.Tensor
Input tensor to be quantized.
Returns
--------
Codebook's indices for quantized representation : torch.Tensor
Example:
--------
>>> inputs = torch.ones(3, 256, 14, 25)
>>> codebook = VQEmbedding(1024, 256)
>>> labels = torch.Tensor([1, 0, 2])
>>> print(codebook(inputs, labels).shape)
torch.Size([3, 14, 25])
"""
z_e_x_ = z_e_x.permute(0, 2, 3, 1).contiguous()
latents = VectorQuantization.apply(
z_e_x_, self.embedding.weight, labels
)
return latents
def straight_through(self, z_e_x, labels=None):
"""
Implements the vector quantization with straight through approximation of the gradient.
Arguments
---------
z_e_x : torch.Tensor
Input tensor to be quantized.
labels : torch.Tensor
Predicted class for input representations (used for latent space quantization).
Returns
--------
Straigth through quantized representation and quantized representation : tuple
Example:
--------
>>> inputs = torch.ones(3, 256, 14, 25)
>>> codebook = VQEmbedding(1024, 256)
>>> labels = torch.Tensor([1, 0, 2])
>>> quant, quant_ind = codebook.straight_through(inputs, labels)
>>> print(quant.shape, quant_ind.shape)
torch.Size([3, 256, 14, 25]) torch.Size([3, 256, 14, 25])
"""
z_e_x_ = z_e_x.permute(0, 2, 3, 1).contiguous()
z_q_x_, indices = VectorQuantizationStraightThrough.apply(
z_e_x_,
self.embedding.weight.detach(),
labels,
self.numclasses,
self.activate_class_partitioning,
self.shared_keys,
self.training,
)
z_q_x = z_q_x_.permute(0, 3, 1, 2).contiguous()
z_q_x_bar_flatten = torch.index_select(
self.embedding.weight, dim=0, index=indices
)
z_q_x_bar_ = z_q_x_bar_flatten.view_as(z_e_x_)
z_q_x_bar = z_q_x_bar_.permute(0, 3, 1, 2).contiguous()
return z_q_x, z_q_x_bar
| 19,449 | 30.370968 | 273 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/huggingface_whisper.py | """This lobe enables the integration of huggingface pretrained whisper model.
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
Authors
* Adel Moumen 2022
* Titouan Parcollet 2022
* Luca Della Libera 2022
"""
import torch
import logging
from torch import nn
try:
from transformers import WhisperModel
from transformers import WhisperFeatureExtractor
from transformers.models.whisper.tokenization_whisper import (
WhisperTokenizer,
)
except ImportError:
MSG = "Please install transformers from HuggingFace to use Whisper\n"
MSG += "E.G. run: pip install transformers"
raise ImportError(MSG)
logger = logging.getLogger(__name__)
class HuggingFaceWhisper(nn.Module):
"""This lobe enables the integration of HuggingFace pretrained Whisper model.
Source paper whisper:
https://cdn.openai.com/papers/whisper.pdf
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
Some part of the code also cis adapted from the official OpenAI repository:
https://github.com/openai/whisper
The model can be finetuned. It will download automatically the model from
HuggingFace or use a local path.
Arguments
---------
source : str
HuggingFace hub name: e.g "openai/whisper-tiny"
save_path : str
Path (dir) of the downloaded model.
sampling_rate : int (default: 16000)
Sampling rate of the audio signal.
encoder_only : bool (default: False)
If True, the forward function outputs the hidden states from the last transformer layer of the encoder.
If False, one step of the decoder is performed and returned.
freeze : bool (default: False)
If True, the model is frozen.
freeze_encoder : bool (default: False)
If True, the encoder is frozen.
output_attentions : bool (default: True)
If True, the forward function outputs the attention weights.
output_all_hiddens: bool (default: False)
If True, the forward function outputs the hidden states from all transformer layers of the encoder.
For example whisper-base has 6 transformer layers and the output is of shape (7, B, T, C),
where the output of the CNN output is added to the beginning.
If False, the forward function outputs the hidden states only from the last transformer layer of the encoder.
Example
-------
>>> model_hub = "openai/whisper-tiny"
>>> save_path = "savedir"
>>> sampling_rate = 16000
>>> model = HuggingFaceWhisper(model_hub, save_path, sampling_rate)
>>> tokens = torch.tensor([[1, 1]]) * model.model.config.decoder_start_token_id
>>> inputs = torch.randn([1, 93680])
>>> outputs = model(inputs, tokens)
"""
def __init__(
self,
source,
save_path,
sampling_rate=16000,
encoder_only=False,
freeze=False,
freeze_encoder=False,
output_attentions=True,
output_all_hiddens=False,
):
super().__init__()
self.sampling_rate = sampling_rate
self.encoder_only = encoder_only
self.freeze = freeze
self.freeze_encoder = freeze_encoder
self.output_attentions = output_attentions
self.output_all_hiddens = output_all_hiddens
self.tokenizer = None
# Download the tokenizer only if we are going to use the Decoder.
if not encoder_only:
self.tokenizer = WhisperTokenizer.from_pretrained(source)
# Download the extractor from HuggingFace.
feature_extractor = WhisperFeatureExtractor.from_pretrained(
source, cache_dir=save_path, sampling_rate=sampling_rate,
)
self._n_fft = feature_extractor.n_fft
self._hop_length = feature_extractor.hop_length
self._n_samples = feature_extractor.n_samples
self.register_buffer(
"_mel_filters", torch.as_tensor(feature_extractor.mel_filters)
)
self.model = WhisperModel.from_pretrained(source, cache_dir=save_path)
if self.freeze:
logger.warning(
"speechbrain.lobes.models.huggingface_whisper - whisper encoder-decoder is frozen."
)
self.model.train() # we keep it to train to have dropout and LN computed adequaly
for param in self.model.parameters():
param.requires_grad = False
else:
self.model.train()
if self.freeze_encoder:
logger.warning(
"speechbrain.lobes.models.huggingface_whisper - whisper encoder is frozen."
)
for param in self.model.encoder.parameters():
param.requires_grad = False
def forward(self, wav, decoder_input_ids=None):
"""Perform mel transformation and one step of the whisper (encoder-decoder).
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
decoder_input_ids : torch.Tensor
This is necessary if we want to use the decoder.
A batch of decoder inputs tokens.
The first tokens need to dictacte the behavior of the decoder.
It needs to start with the bos_token, the language token,
the task token, and finally the timestamp token.
Please refer to the whisper paper for more details or go to the
seq2seq2.py file in SpeechBrain to see how to generate the tokens
with Greedy Search and/or Beam Search.
"""
if self.freeze:
with torch.no_grad():
out_encoder = self.forward_encoder(wav)
if self.encoder_only:
return out_encoder
if self.output_all_hiddens:
logits, attn = self.forward_decoder(
out_encoder[-1], decoder_input_ids
)
else:
logits, attn = self.forward_decoder(
out_encoder, decoder_input_ids
)
return out_encoder, logits, attn
else:
if self.encoder_only:
return self.forward_encoder(wav)
else:
out_encoder = self.forward_encoder(wav)
if self.output_all_hiddens:
logits, attn = self.forward_decoder(
out_encoder[-1], decoder_input_ids
)
else:
logits, attn = self.forward_decoder(
out_encoder, decoder_input_ids
)
return out_encoder, logits, attn
def forward_encoder(self, wav):
"""Perform one step of the whisper encoder with Mel FBANKs as Input.
Arguments
---------
wav : torch.Tensor (FBANKs)
A batch of Mel FBANK from HF to transform to features.
"""
if self.freeze_encoder:
with torch.no_grad():
return self._get_encoder_states(wav)
else:
return self._get_encoder_states(wav)
def _get_encoder_states(self, wav):
"""Takes an input waveform and return its corresponding encoder states.
Returns the last hidden state of the encoder or all hidden states if
output_all_hiddens is True.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
mel = self._get_mel(wav)
if self.output_all_hiddens:
states = self.model.encoder(mel, output_hidden_states=True)
return torch.stack(states.hidden_states)
else:
return self.model.encoder(mel).last_hidden_state
def _get_mel(self, wav):
"""Takes an input waveform and return its corresponding mel spectrogram
according to HuggingFace implementation. WARNING: it's slow! Better push this
in the DataLoader.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
mels = self._pad_or_trim(wav)
mels = self._log_mel_spectrogram(mels)
return mels
def _log_mel_spectrogram(self, audio):
"""Compute the Mel spectrogram of a batch of input waveforms.
Reference: adapted from
https://github.com/openai/whisper/blob/eff383b27b783e280c089475852ba83f20f64998/whisper/audio.py#L92
Arguments
---------
audio : torch.Tensor
A batch of audio waveforms in 16 kHz.
Returns
-------
torch.Tensor
A tensor that contains the batch of Mel spectrograms.
"""
window = torch.hann_window(self._n_fft, device=audio.device)
stft = torch.stft(
audio,
self._n_fft,
self._hop_length,
window=window,
return_complex=True,
)
magnitudes = stft[..., :-1].abs() ** 2
filters = self._mel_filters
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(
log_spec,
(log_spec.flatten(start_dim=1).max(dim=-1)[0] - 8.0)[:, None, None],
)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
def _pad_or_trim(self, array, axis=-1):
"""Pad or trim the Mel spectrograms as expected by the encoder.
Reference: adapted from
https://github.com/openai/whisper/blob/eff383b27b783e280c089475852ba83f20f64998/whisper/audio.py#L52
Arguments
---------
array : torch.Tensor
A tensor that contains the batch of Mel spectrograms.
axis : int
The axis along which to pad.
Returns
-------
torch.Tensor
The padded tensor.
"""
if array.shape[axis] > self._n_samples:
array = array.index_select(
dim=axis,
index=torch.arange(self._n_samples, device=array.device),
)
if array.shape[axis] < self._n_samples:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (
0,
self._n_samples - array.shape[axis],
)
array = nn.functional.pad(
array, [pad for sizes in pad_widths[::-1] for pad in sizes]
)
return array
def forward_decoder(self, audio_features, decoder_input_ids):
"""Perform one step of the whisper decoder.
Arguments
---------
audio_features : torch.Tensor
A batch of audio features (mel + whisper encoding).
decoder_input_ids : torch.Tensor
A batch of decoder inputs tokens.
The first tokens need to dictacte the behavior of the decoder.
It needs to start with the bos_token, the language token,
the task token, and finally the timestamp token.
Please refer to the whisper paper for more details or go to the
seq2seq2.py file in SpeechBrain to see how to generate the tokens
with Greedy Search and/or Beam Search.
"""
output_states = self.model.decoder(
encoder_hidden_states=audio_features,
input_ids=decoder_input_ids,
output_attentions=self.output_attentions,
)
attn = output_states.attentions[-1]
attn = attn.view(attn.shape[0] * attn.shape[1], *attn.shape[2:])
output_states = output_states.last_hidden_state
logits = (
output_states
@ torch.transpose(
self.model.decoder.embed_tokens.weight.to(output_states.dtype),
0,
1,
)
).to(audio_features.dtype)
return logits, attn
| 12,043 | 35.607903 | 117 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/dual_path.py | """Library to support dual-path speech separation.
Authors
* Cem Subakan 2020
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Mirko Bronzi 2020
* Jianyuan Zhong 2020
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from speechbrain.nnet.linear import Linear
from speechbrain.lobes.models.transformer.Transformer import TransformerEncoder
from speechbrain.lobes.models.transformer.Transformer import PositionalEncoding
from speechbrain.lobes.models.transformer.Conformer import ConformerEncoder
import speechbrain.nnet.RNN as SBRNN
from speechbrain.nnet.activations import Swish
EPS = 1e-8
class GlobalLayerNorm(nn.Module):
"""Calculate Global Layer Normalization.
Arguments
---------
dim : (int or list or torch.Size)
Input shape from an expected input of size.
eps : float
A value added to the denominator for numerical stability.
elementwise_affine : bool
A boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
Example
-------
>>> x = torch.randn(5, 10, 20)
>>> GLN = GlobalLayerNorm(10, 3)
>>> x_norm = GLN(x)
"""
def __init__(self, dim, shape, eps=1e-8, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
if shape == 3:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
if shape == 4:
self.weight = nn.Parameter(torch.ones(self.dim, 1, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1, 1))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
def forward(self, x):
"""Returns the normalized tensor.
Arguments
---------
x : torch.Tensor
Tensor of size [N, C, K, S] or [N, C, L].
"""
# x = N x C x K x S or N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x K x S
# gln: mean,var N x 1 x 1
if x.dim() == 3:
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
if self.elementwise_affine:
x = (
self.weight * (x - mean) / torch.sqrt(var + self.eps)
+ self.bias
)
else:
x = (x - mean) / torch.sqrt(var + self.eps)
if x.dim() == 4:
mean = torch.mean(x, (1, 2, 3), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2, 3), keepdim=True)
if self.elementwise_affine:
x = (
self.weight * (x - mean) / torch.sqrt(var + self.eps)
+ self.bias
)
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
"""Calculate Cumulative Layer Normalization.
Arguments
---------
dim : int
Dimension that you want to normalize.
elementwise_affine : True
Learnable per-element affine parameters.
Example
-------
>>> x = torch.randn(5, 10, 20)
>>> CLN = CumulativeLayerNorm(10)
>>> x_norm = CLN(x)
"""
def __init__(self, dim, elementwise_affine=True, eps=1e-8):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine, eps=eps
)
def forward(self, x):
"""Returns the normalized tensor.
Arguments
---------
x : torch.Tensor
Tensor size [N, C, K, S] or [N, C, L]
"""
# x: N x C x K x S or N x C x L
# N x K x S x C
if x.dim() == 4:
x = x.permute(0, 2, 3, 1).contiguous()
# N x K x S x C == only channel norm
x = super().forward(x)
# N x C x K x S
x = x.permute(0, 3, 1, 2).contiguous()
if x.dim() == 3:
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim, shape, eps=1e-8):
"""Just a wrapper to select the normalization type.
"""
if norm == "gln":
return GlobalLayerNorm(dim, shape, elementwise_affine=True, eps=eps)
if norm == "cln":
return CumulativeLayerNorm(dim, elementwise_affine=True, eps=eps)
if norm == "ln":
return nn.GroupNorm(1, dim, eps=eps)
else:
return nn.BatchNorm1d(dim)
class Encoder(nn.Module):
"""Convolutional Encoder Layer.
Arguments
---------
kernel_size : int
Length of filters.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
Example
-------
>>> x = torch.randn(2, 1000)
>>> encoder = Encoder(kernel_size=4, out_channels=64)
>>> h = encoder(x)
>>> h.shape
torch.Size([2, 64, 499])
"""
def __init__(self, kernel_size=2, out_channels=64, in_channels=1):
super(Encoder, self).__init__()
self.conv1d = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=kernel_size // 2,
groups=1,
bias=False,
)
self.in_channels = in_channels
def forward(self, x):
"""Return the encoded output.
Arguments
---------
x : torch.Tensor
Input tensor with dimensionality [B, L].
Return
------
x : torch.Tensor
Encoded tensor with dimensionality [B, N, T_out].
where B = Batchsize
L = Number of timepoints
N = Number of filters
T_out = Number of timepoints at the output of the encoder
"""
# B x L -> B x 1 x L
if self.in_channels == 1:
x = torch.unsqueeze(x, dim=1)
# B x 1 x L -> B x N x T_out
x = self.conv1d(x)
x = F.relu(x)
return x
class Decoder(nn.ConvTranspose1d):
"""A decoder layer that consists of ConvTranspose1d.
Arguments
---------
kernel_size : int
Length of filters.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
Example
---------
>>> x = torch.randn(2, 100, 1000)
>>> decoder = Decoder(kernel_size=4, in_channels=100, out_channels=1)
>>> h = decoder(x)
>>> h.shape
torch.Size([2, 1003])
"""
def __init__(self, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
def forward(self, x):
"""Return the decoded output.
Arguments
---------
x : torch.Tensor
Input tensor with dimensionality [B, N, L].
where, B = Batchsize,
N = number of filters
L = time points
"""
if x.dim() not in [2, 3]:
raise RuntimeError(
"{} accept 3/4D tensor as input".format(self.__name__)
)
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if torch.squeeze(x).dim() == 1:
x = torch.squeeze(x, dim=1)
else:
x = torch.squeeze(x)
return x
class IdentityBlock:
"""This block is used when we want to have identity transformation within the Dual_path block.
Example
-------
>>> x = torch.randn(10, 100)
>>> IB = IdentityBlock()
>>> xhat = IB(x)
"""
def _init__(self, **kwargs):
pass
def __call__(self, x):
return x
class FastTransformerBlock(nn.Module):
"""This block is used to implement fast transformer models with efficient attention.
The implementations are taken from https://fast-transformers.github.io/
Arguments
---------
attention_type : str
Specifies the type of attention.
Check https://fast-transformers.github.io/ for details.
out_channels : int
Dimensionality of the representation.
num_layers : int
Number of layers.
nhead : int
Number of attention heads.
d_ffn : int
Dimensionality of positional feed-forward.
dropout : float
Dropout drop rate.
activation : str
Activation function.
reformer_bucket_size : int
bucket size for reformer.
Example
-------
# >>> x = torch.randn(10, 100, 64)
# >>> block = FastTransformerBlock('linear', 64)
# >>> x = block(x)
# >>> x.shape
# torch.Size([10, 100, 64])
"""
def __init__(
self,
attention_type,
out_channels,
num_layers=6,
nhead=8,
d_ffn=1024,
dropout=0,
activation="relu",
reformer_bucket_size=32,
):
super(FastTransformerBlock, self).__init__()
from fast_transformers.builders import TransformerEncoderBuilder
builder = TransformerEncoderBuilder.from_kwargs(
attention_type=attention_type,
n_layers=num_layers,
n_heads=nhead,
feed_forward_dimensions=d_ffn,
query_dimensions=out_channels // nhead,
value_dimensions=out_channels // nhead,
dropout=dropout,
attention_dropout=dropout,
chunk_size=reformer_bucket_size,
)
self.mdl = builder.get()
self.attention_type = attention_type
self.reformer_bucket_size = reformer_bucket_size
def forward(self, x):
"""Returns the transformed input.
Arguments
---------
x : torch.Tensor
Tensor shaper [B, L, N].
where, B = Batchsize,
N = number of filters
L = time points
"""
if self.attention_type == "reformer":
# pad zeros at the end
pad_size = (self.reformer_bucket_size * 2) - (
x.shape[1] % (self.reformer_bucket_size * 2)
)
device = x.device
x_padded = torch.cat(
[x, torch.zeros(x.size(0), pad_size, x.size(-1)).to(device)],
dim=1,
)
# apply the model
x_padded = self.mdl(x_padded)
# get rid of zeros at the end
return x_padded[:, :-pad_size, :]
else:
return self.mdl(x)
class PyTorchPositionalEncoding(nn.Module):
"""Positional encoder for the pytorch transformer.
Arguments
---------
d_model : int
Representation dimensionality.
dropout : float
Dropout drop prob.
max_len : int
Max sequence length.
Example
-------
>>> x = torch.randn(10, 100, 64)
>>> enc = PyTorchPositionalEncoding(64)
>>> x = enc(x)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PyTorchPositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
"""Returns the encoded output.
Arguments
---------
x : torch.Tensor
Tensor shape [B, L, N],
where, B = Batchsize,
N = number of filters
L = time points
"""
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class PytorchTransformerBlock(nn.Module):
"""A wrapper that uses the pytorch transformer block.
Arguments
---------
out_channels : int
Dimensionality of the representation.
num_layers : int
Number of layers.
nhead : int
Number of attention heads.
d_ffn : int
Dimensionality of positional feed forward.
Dropout : float
Dropout drop rate.
activation : str
Activation function.
use_positional_encoding : bool
If true we use a positional encoding.
Example
---------
>>> x = torch.randn(10, 100, 64)
>>> block = PytorchTransformerBlock(64)
>>> x = block(x)
>>> x.shape
torch.Size([10, 100, 64])
"""
def __init__(
self,
out_channels,
num_layers=6,
nhead=8,
d_ffn=2048,
dropout=0.1,
activation="relu",
use_positional_encoding=True,
):
super(PytorchTransformerBlock, self).__init__()
encoder_layer = nn.TransformerEncoderLayer(
d_model=out_channels,
nhead=nhead,
dim_feedforward=d_ffn,
dropout=dropout,
activation=activation,
)
# cem :this encoder thing has a normalization component. we should look at that probably also.
self.mdl = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
if use_positional_encoding:
self.pos_encoder = PyTorchPositionalEncoding(out_channels)
else:
self.pos_encoder = None
def forward(self, x):
"""Returns the transformed output.
Arguments
---------
x : torch.Tensor
Tensor shape [B, L, N]
where, B = Batchsize,
N = number of filters
L = time points
"""
if self.pos_encoder is not None:
x = self.pos_encoder(x)
return self.mdl(x)
class SBTransformerBlock(nn.Module):
"""A wrapper for the SpeechBrain implementation of the transformer encoder.
Arguments
---------
num_layers : int
Number of layers.
d_model : int
Dimensionality of the representation.
nhead : int
Number of attention heads.
d_ffn : int
Dimensionality of positional feed forward.
input_shape : tuple
Shape of input.
kdim : int
Dimension of the key (Optional).
vdim : int
Dimension of the value (Optional).
dropout : float
Dropout rate.
activation : str
Activation function.
use_positional_encoding : bool
If true we use a positional encoding.
norm_before: bool
Use normalization before transformations.
Example
---------
>>> x = torch.randn(10, 100, 64)
>>> block = SBTransformerBlock(1, 64, 8)
>>> x = block(x)
>>> x.shape
torch.Size([10, 100, 64])
"""
def __init__(
self,
num_layers,
d_model,
nhead,
d_ffn=2048,
input_shape=None,
kdim=None,
vdim=None,
dropout=0.1,
activation="relu",
use_positional_encoding=False,
norm_before=False,
attention_type="regularMHA",
):
super(SBTransformerBlock, self).__init__()
self.use_positional_encoding = use_positional_encoding
if activation == "relu":
activation = nn.ReLU
elif activation == "gelu":
activation = nn.GELU
else:
raise ValueError("unknown activation")
self.mdl = TransformerEncoder(
num_layers=num_layers,
nhead=nhead,
d_ffn=d_ffn,
input_shape=input_shape,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
activation=activation,
normalize_before=norm_before,
attention_type=attention_type,
)
if use_positional_encoding:
self.pos_enc = PositionalEncoding(input_size=d_model)
def forward(self, x):
"""Returns the transformed output.
Arguments
---------
x : torch.Tensor
Tensor shape [B, L, N],
where, B = Batchsize,
L = time points
N = number of filters
"""
if self.use_positional_encoding:
pos_enc = self.pos_enc(x)
return self.mdl(x + pos_enc)[0]
else:
return self.mdl(x)[0]
class SBRNNBlock(nn.Module):
"""RNNBlock for the dual path pipeline.
Arguments
---------
input_size : int
Dimensionality of the input features.
hidden_channels : int
Dimensionality of the latent layer of the rnn.
num_layers : int
Number of the rnn layers.
rnn_type : str
Type of the the rnn cell.
dropout : float
Dropout rate
bidirectional : bool
If True, bidirectional.
Example
---------
>>> x = torch.randn(10, 100, 64)
>>> rnn = SBRNNBlock(64, 100, 1, bidirectional=True)
>>> x = rnn(x)
>>> x.shape
torch.Size([10, 100, 200])
"""
def __init__(
self,
input_size,
hidden_channels,
num_layers,
rnn_type="LSTM",
dropout=0,
bidirectional=True,
):
super(SBRNNBlock, self).__init__()
self.mdl = getattr(SBRNN, rnn_type)(
hidden_channels,
input_size=input_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
)
def forward(self, x):
"""Returns the transformed output.
Arguments
---------
x : torch.Tensor
[B, L, N]
where, B = Batchsize,
N = number of filters
L = time points
"""
return self.mdl(x)[0]
class DPTNetBlock(nn.Module):
"""The DPT Net block.
Arguments
---------
d_model : int
Number of expected features in the input (required).
nhead : int
Number of heads in the multiheadattention models (required).
dim_feedforward : int
Dimension of the feedforward network model (default=2048).
dropout : float
Dropout value (default=0.1).
activation : str
Activation function of intermediate layer, relu or gelu (default=relu).
Examples
--------
>>> encoder_layer = DPTNetBlock(d_model=512, nhead=8)
>>> src = torch.rand(10, 100, 512)
>>> out = encoder_layer(src)
>>> out.shape
torch.Size([10, 100, 512])
"""
def __init__(
self, d_model, nhead, dim_feedforward=256, dropout=0, activation="relu"
):
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.modules.normalization import LayerNorm
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.rnn import LSTM
from torch.nn.modules.linear import Linear
super(DPTNetBlock, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
# self.linear1 = Linear(d_model, dim_feedforward)
self.rnn = LSTM(d_model, d_model * 2, 1, bidirectional=True)
self.dropout = Dropout(dropout)
# self.linear2 = Linear(dim_feedforward, d_model)
self.linear2 = Linear(d_model * 2 * 2, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if "activation" not in state:
state["activation"] = F.relu
super(DPTNetBlock, self).__setstate__(state)
def forward(self, src):
"""Pass the input through the encoder layer.
Arguments
---------
src : torch.Tensor
Tensor shape [B, L, N]
where, B = Batchsize,
N = number of filters
L = time points
"""
src2 = self.self_attn(
src, src, src, attn_mask=None, key_padding_mask=None
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
# src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src2 = self.rnn(src)[0]
src2 = self.activation(src2)
src2 = self.dropout(src2)
src2 = self.linear2(src2)
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def _get_activation_fn(activation):
"""Just a wrapper to get the activation functions.
"""
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
class Dual_Computation_Block(nn.Module):
"""Computation block for dual-path processing.
Arguments
---------
intra_mdl : torch.nn.module
Model to process within the chunks.
inter_mdl : torch.nn.module
Model to process across the chunks.
out_channels : int
Dimensionality of inter/intra model.
norm : str
Normalization type.
skip_around_intra : bool
Skip connection around the intra layer.
linear_layer_after_inter_intra : bool
Linear layer or not after inter or intra.
Example
---------
>>> intra_block = SBTransformerBlock(1, 64, 8)
>>> inter_block = SBTransformerBlock(1, 64, 8)
>>> dual_comp_block = Dual_Computation_Block(intra_block, inter_block, 64)
>>> x = torch.randn(10, 64, 100, 10)
>>> x = dual_comp_block(x)
>>> x.shape
torch.Size([10, 64, 100, 10])
"""
def __init__(
self,
intra_mdl,
inter_mdl,
out_channels,
norm="ln",
skip_around_intra=True,
linear_layer_after_inter_intra=True,
):
super(Dual_Computation_Block, self).__init__()
self.intra_mdl = intra_mdl
self.inter_mdl = inter_mdl
self.skip_around_intra = skip_around_intra
self.linear_layer_after_inter_intra = linear_layer_after_inter_intra
# Norm
self.norm = norm
if norm is not None:
self.intra_norm = select_norm(norm, out_channels, 4)
self.inter_norm = select_norm(norm, out_channels, 4)
# Linear
if linear_layer_after_inter_intra:
if isinstance(intra_mdl, SBRNNBlock):
self.intra_linear = Linear(
out_channels, input_size=2 * intra_mdl.mdl.rnn.hidden_size
)
else:
self.intra_linear = Linear(
out_channels, input_size=out_channels
)
if isinstance(inter_mdl, SBRNNBlock):
self.inter_linear = Linear(
out_channels, input_size=2 * intra_mdl.mdl.rnn.hidden_size
)
else:
self.inter_linear = Linear(
out_channels, input_size=out_channels
)
def forward(self, x):
"""Returns the output tensor.
Arguments
---------
x : torch.Tensor
Input tensor of dimension [B, N, K, S].
Return
---------
out: torch.Tensor
Output tensor of dimension [B, N, K, S].
where, B = Batchsize,
N = number of filters
K = time points in each chunk
S = the number of chunks
"""
B, N, K, S = x.shape
# intra RNN
# [BS, K, N]
intra = x.permute(0, 3, 2, 1).contiguous().view(B * S, K, N)
# [BS, K, H]
intra = self.intra_mdl(intra)
# [BS, K, N]
if self.linear_layer_after_inter_intra:
intra = self.intra_linear(intra)
# [B, S, K, N]
intra = intra.view(B, S, K, N)
# [B, N, K, S]
intra = intra.permute(0, 3, 2, 1).contiguous()
if self.norm is not None:
intra = self.intra_norm(intra)
# [B, N, K, S]
if self.skip_around_intra:
intra = intra + x
# inter RNN
# [BK, S, N]
inter = intra.permute(0, 2, 3, 1).contiguous().view(B * K, S, N)
# [BK, S, H]
inter = self.inter_mdl(inter)
# [BK, S, N]
if self.linear_layer_after_inter_intra:
inter = self.inter_linear(inter)
# [B, K, S, N]
inter = inter.view(B, K, S, N)
# [B, N, K, S]
inter = inter.permute(0, 3, 1, 2).contiguous()
if self.norm is not None:
inter = self.inter_norm(inter)
# [B, N, K, S]
out = inter + intra
return out
class Dual_Path_Model(nn.Module):
"""The dual path model which is the basis for dualpathrnn, sepformer, dptnet.
Arguments
---------
in_channels : int
Number of channels at the output of the encoder.
out_channels : int
Number of channels that would be inputted to the intra and inter blocks.
intra_model : torch.nn.module
Model to process within the chunks.
inter_model : torch.nn.module
model to process across the chunks,
num_layers : int
Number of layers of Dual Computation Block.
norm : str
Normalization type.
K : int
Chunk length.
num_spks : int
Number of sources (speakers).
skip_around_intra : bool
Skip connection around intra.
linear_layer_after_inter_intra : bool
Linear layer after inter and intra.
use_global_pos_enc : bool
Global positional encodings.
max_length : int
Maximum sequence length.
Example
---------
>>> intra_block = SBTransformerBlock(1, 64, 8)
>>> inter_block = SBTransformerBlock(1, 64, 8)
>>> dual_path_model = Dual_Path_Model(64, 64, intra_block, inter_block, num_spks=2)
>>> x = torch.randn(10, 64, 2000)
>>> x = dual_path_model(x)
>>> x.shape
torch.Size([2, 10, 64, 2000])
"""
def __init__(
self,
in_channels,
out_channels,
intra_model,
inter_model,
num_layers=1,
norm="ln",
K=200,
num_spks=2,
skip_around_intra=True,
linear_layer_after_inter_intra=True,
use_global_pos_enc=False,
max_length=20000,
):
super(Dual_Path_Model, self).__init__()
self.K = K
self.num_spks = num_spks
self.num_layers = num_layers
self.norm = select_norm(norm, in_channels, 3)
self.conv1d = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.use_global_pos_enc = use_global_pos_enc
if self.use_global_pos_enc:
self.pos_enc = PositionalEncoding(max_length)
self.dual_mdl = nn.ModuleList([])
for i in range(num_layers):
self.dual_mdl.append(
copy.deepcopy(
Dual_Computation_Block(
intra_model,
inter_model,
out_channels,
norm,
skip_around_intra=skip_around_intra,
linear_layer_after_inter_intra=linear_layer_after_inter_intra,
)
)
)
self.conv2d = nn.Conv2d(
out_channels, out_channels * num_spks, kernel_size=1
)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1, bias=False)
self.prelu = nn.PReLU()
self.activation = nn.ReLU()
# gated output layer
self.output = nn.Sequential(
nn.Conv1d(out_channels, out_channels, 1), nn.Tanh()
)
self.output_gate = nn.Sequential(
nn.Conv1d(out_channels, out_channels, 1), nn.Sigmoid()
)
def forward(self, x):
"""Returns the output tensor.
Arguments
---------
x : torch.Tensor
Input tensor of dimension [B, N, L].
Returns
-------
out : torch.Tensor
Output tensor of dimension [spks, B, N, L]
where, spks = Number of speakers
B = Batchsize,
N = number of filters
L = the number of time points
"""
# before each line we indicate the shape after executing the line
# [B, N, L]
x = self.norm(x)
# [B, N, L]
x = self.conv1d(x)
if self.use_global_pos_enc:
x = self.pos_enc(x.transpose(1, -1)).transpose(1, -1) + x * (
x.size(1) ** 0.5
)
# [B, N, K, S]
x, gap = self._Segmentation(x, self.K)
# [B, N, K, S]
for i in range(self.num_layers):
x = self.dual_mdl[i](x)
x = self.prelu(x)
# [B, N*spks, K, S]
x = self.conv2d(x)
B, _, K, S = x.shape
# [B*spks, N, K, S]
x = x.view(B * self.num_spks, -1, K, S)
# [B*spks, N, L]
x = self._over_add(x, gap)
x = self.output(x) * self.output_gate(x)
# [B*spks, N, L]
x = self.end_conv1x1(x)
# [B, spks, N, L]
_, N, L = x.shape
x = x.view(B, self.num_spks, N, L)
x = self.activation(x)
# [spks, B, N, L]
x = x.transpose(0, 1)
return x
def _padding(self, input, K):
"""Padding the audio times.
Arguments
---------
K : int
Chunks of length.
P : int
Hop size.
input : torch.Tensor
Tensor of size [B, N, L].
where, B = Batchsize,
N = number of filters
L = time points
"""
B, N, L = input.shape
P = K // 2
gap = K - (P + L % K) % K
if gap > 0:
pad = (
torch.Tensor(torch.zeros(B, N, gap))
.type(input.dtype)
.to(input.device)
)
input = torch.cat([input, pad], dim=2)
_pad = (
torch.Tensor(torch.zeros(B, N, P))
.type(input.dtype)
.to(input.device)
)
input = torch.cat([_pad, input, _pad], dim=2)
return input, gap
def _Segmentation(self, input, K):
"""The segmentation stage splits
Arguments
---------
K : int
Length of the chunks.
input : torch.Tensor
Tensor with dim [B, N, L].
Return
-------
output : torch.tensor
Tensor with dim [B, N, K, S].
where, B = Batchsize,
N = number of filters
K = time points in each chunk
S = the number of chunks
L = the number of time points
"""
B, N, L = input.shape
P = K // 2
input, gap = self._padding(input, K)
# [B, N, K, S]
input1 = input[:, :, :-P].contiguous().view(B, N, -1, K)
input2 = input[:, :, P:].contiguous().view(B, N, -1, K)
input = (
torch.cat([input1, input2], dim=3).view(B, N, -1, K).transpose(2, 3)
)
return input.contiguous(), gap
def _over_add(self, input, gap):
"""Merge the sequence with the overlap-and-add method.
Arguments
---------
input : torch.tensor
Tensor with dim [B, N, K, S].
gap : int
Padding length.
Return
-------
output : torch.tensor
Tensor with dim [B, N, L].
where, B = Batchsize,
N = number of filters
K = time points in each chunk
S = the number of chunks
L = the number of time points
"""
B, N, K, S = input.shape
P = K // 2
# [B, N, S, K]
input = input.transpose(2, 3).contiguous().view(B, N, -1, K * 2)
input1 = input[:, :, :, :K].contiguous().view(B, N, -1)[:, :, P:]
input2 = input[:, :, :, K:].contiguous().view(B, N, -1)[:, :, :-P]
input = input1 + input2
# [B, N, L]
if gap > 0:
input = input[:, :, :-gap]
return input
class SepformerWrapper(nn.Module):
"""The wrapper for the sepformer model which combines the Encoder, Masknet and the decoder
https://arxiv.org/abs/2010.13154
Arguments
---------
encoder_kernel_size: int,
The kernel size used in the encoder
encoder_in_nchannels: int,
The number of channels of the input audio
encoder_out_nchannels: int,
The number of filters used in the encoder.
Also, number of channels that would be inputted to the intra and inter blocks.
masknet_chunksize: int,
The chunk length that is to be processed by the intra blocks
masknet_numlayers: int,
The number of layers of combination of inter and intra blocks
masknet_norm: str,
The normalization type to be used in the masknet
Should be one of 'ln' -- layernorm, 'gln' -- globallayernorm
'cln' -- cumulative layernorm, 'bn' -- batchnorm
-- see the select_norm function above for more details
masknet_useextralinearlayer: bool,
Whether or not to use a linear layer at the output of intra and inter blocks
masknet_extraskipconnection: bool,
This introduces extra skip connections around the intra block
masknet_numspks: int,
This determines the number of speakers to estimate
intra_numlayers: int,
This determines the number of layers in the intra block
inter_numlayers: int,
This determines the number of layers in the inter block
intra_nhead: int,
This determines the number of parallel attention heads in the intra block
inter_nhead: int,
This determines the number of parallel attention heads in the inter block
intra_dffn: int,
The number of dimensions in the positional feedforward model in the inter block
inter_dffn: int,
The number of dimensions in the positional feedforward model in the intra block
intra_use_positional: bool,
Whether or not to use positional encodings in the intra block
inter_use_positional: bool,
Whether or not to use positional encodings in the inter block
intra_norm_before: bool
Whether or not we use normalization before the transformations in the intra block
inter_norm_before: bool
Whether or not we use normalization before the transformations in the inter block
Example
-----
>>> model = SepformerWrapper()
>>> inp = torch.rand(1, 160)
>>> result = model.forward(inp)
>>> result.shape
torch.Size([1, 160, 2])
"""
def __init__(
self,
encoder_kernel_size=16,
encoder_in_nchannels=1,
encoder_out_nchannels=256,
masknet_chunksize=250,
masknet_numlayers=2,
masknet_norm="ln",
masknet_useextralinearlayer=False,
masknet_extraskipconnection=True,
masknet_numspks=2,
intra_numlayers=8,
inter_numlayers=8,
intra_nhead=8,
inter_nhead=8,
intra_dffn=1024,
inter_dffn=1024,
intra_use_positional=True,
inter_use_positional=True,
intra_norm_before=True,
inter_norm_before=True,
):
super(SepformerWrapper, self).__init__()
self.encoder = Encoder(
kernel_size=encoder_kernel_size,
out_channels=encoder_out_nchannels,
in_channels=encoder_in_nchannels,
)
intra_model = SBTransformerBlock(
num_layers=intra_numlayers,
d_model=encoder_out_nchannels,
nhead=intra_nhead,
d_ffn=intra_dffn,
use_positional_encoding=intra_use_positional,
norm_before=intra_norm_before,
)
inter_model = SBTransformerBlock(
num_layers=inter_numlayers,
d_model=encoder_out_nchannels,
nhead=inter_nhead,
d_ffn=inter_dffn,
use_positional_encoding=inter_use_positional,
norm_before=inter_norm_before,
)
self.masknet = Dual_Path_Model(
in_channels=encoder_out_nchannels,
out_channels=encoder_out_nchannels,
intra_model=intra_model,
inter_model=inter_model,
num_layers=masknet_numlayers,
norm=masknet_norm,
K=masknet_chunksize,
num_spks=masknet_numspks,
skip_around_intra=masknet_extraskipconnection,
linear_layer_after_inter_intra=masknet_useextralinearlayer,
)
self.decoder = Decoder(
in_channels=encoder_out_nchannels,
out_channels=encoder_in_nchannels,
kernel_size=encoder_kernel_size,
stride=encoder_kernel_size // 2,
bias=False,
)
self.num_spks = masknet_numspks
# reinitialize the parameters
for module in [self.encoder, self.masknet, self.decoder]:
self.reset_layer_recursively(module)
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the network"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def forward(self, mix):
""" Processes the input tensor x and returns an output tensor."""
mix_w = self.encoder(mix)
est_mask = self.masknet(mix_w)
mix_w = torch.stack([mix_w] * self.num_spks)
sep_h = mix_w * est_mask
# Decoding
est_source = torch.cat(
[
self.decoder(sep_h[i]).unsqueeze(-1)
for i in range(self.num_spks)
],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, 0, 0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin, :]
return est_source
class SBConformerEncoderBlock(nn.Module):
"""A wrapper for the SpeechBrain implementation of the ConformerEncoder.
Arguments
---------
num_layers : int
Number of layers.
d_model : int
Dimensionality of the representation.
nhead : int
Number of attention heads.
d_ffn : int
Dimensionality of positional feed forward.
input_shape : tuple
Shape of input.
kdim : int
Dimension of the key (Optional).
vdim : int
Dimension of the value (Optional).
dropout : float
Dropout rate.
activation : str
Activation function.
kernel_size: int
Kernel size in the conformer encoder
bias: bool
Use bias or not in the convolution part of conformer encoder
use_positional_encoding : bool
If true we use a positional encoding.
Example
---------
>>> x = torch.randn(10, 100, 64)
>>> block = SBConformerEncoderBlock(1, 64, 8)
>>> from speechbrain.lobes.models.transformer.Transformer import PositionalEncoding
>>> pos_enc = PositionalEncoding(64)
>>> pos_embs = pos_enc(torch.ones(1, 199, 64))
>>> x = block(x)
>>> x.shape
torch.Size([10, 100, 64])
"""
def __init__(
self,
num_layers,
d_model,
nhead,
d_ffn=2048,
input_shape=None,
kdim=None,
vdim=None,
dropout=0.1,
activation="swish",
kernel_size=31,
bias=True,
use_positional_encoding=True,
attention_type="RelPosMHAXL",
):
super(SBConformerEncoderBlock, self).__init__()
self.use_positional_encoding = use_positional_encoding
self.attention_type = attention_type
if activation == "relu":
activation = nn.ReLU
elif activation == "gelu":
activation = nn.GELU
elif activation == "swish":
activation = Swish
else:
raise ValueError("unknown activation")
self.mdl = ConformerEncoder(
num_layers=num_layers,
nhead=nhead,
d_ffn=d_ffn,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
activation=activation,
kernel_size=kernel_size,
bias=bias,
attention_type=attention_type,
)
if self.attention_type == "RelPosMHAXL":
# for RelPosMHAXL, we need the positional encoding (not optional)
self.pos_enc = PositionalEncoding(input_size=d_model)
elif self.attention_type == "regularMHA":
if self.use_positional_encoding:
self.pos_enc = PositionalEncoding(input_size=d_model)
else:
raise ValueError("Unsupported attention type")
def forward(self, x):
"""Returns the transformed output.
Arguments
---------
x : torch.Tensor
Tensor shape [B, L, N],
where, B = Batchsize,
L = time points
N = number of filters
"""
if self.attention_type == "RelPosMHAXL":
pos_enc = self.pos_enc(
torch.ones(
x.shape[0], x.shape[1] * 2 - 1, x.shape[2], device=x.device
)
)
return self.mdl(x, pos_embs=pos_enc)[0]
elif self.attention_type == "regularMHA":
if self.use_positional_encoding:
pos_embs = self.pos_enc(x)
return self.mdl(x + pos_embs)[0]
else:
return self.mdl(x)[0]
else:
raise ValueError("Unsupported attention type")
| 42,269 | 28.313454 | 102 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/g2p/dataio.py | """
Data pipeline elements for the G2P pipeline
Authors
* Loren Lugosch 2020
* Mirco Ravanelli 2020
* Artem Ploujnikov 2021 (minor refactoring only)
"""
from functools import reduce
from speechbrain.wordemb.util import expand_to_chars
import speechbrain as sb
import torch
import re
RE_MULTI_SPACE = re.compile(r"\s{2,}")
def clean_pipeline(txt, graphemes):
"""
Cleans incoming text, removing any characters not on the
accepted list of graphemes and converting to uppercase
Arguments
---------
txt: str
the text to clean up
graphemes: list
a list of graphemes
Returns
-------
item: DynamicItem
A wrapped transformation function
"""
result = txt.upper()
result = "".join(char for char in result if char in graphemes)
result = RE_MULTI_SPACE.sub(" ", result)
return result
def grapheme_pipeline(char, grapheme_encoder=None, uppercase=True):
"""Encodes a grapheme sequence
Arguments
---------
graphemes: list
a list of available graphemes
grapheme_encoder: speechbrain.dataio.encoder.TextEncoder
a text encoder for graphemes. If not provided,
takes: str
the name of the input
uppercase: bool
whether or not to convert items to uppercase
Returns
-------
grapheme_list: list
a raw list of graphemes, excluding any non-matching
labels
grapheme_encoded_list: list
a list of graphemes encoded as integers
grapheme_encoded: torch.Tensor
"""
if uppercase:
char = char.upper()
grapheme_list = [
grapheme for grapheme in char if grapheme in grapheme_encoder.lab2ind
]
yield grapheme_list
grapheme_encoded_list = grapheme_encoder.encode_sequence(grapheme_list)
yield grapheme_encoded_list
grapheme_encoded = torch.LongTensor(grapheme_encoded_list)
yield grapheme_encoded
def tokenizer_encode_pipeline(
seq,
tokenizer,
tokens,
wordwise=True,
word_separator=" ",
token_space_index=512,
char_map=None,
):
"""A pipeline element that uses a pretrained tokenizer
Arguments
---------
tokenizer: speechbrain.tokenizer.SentencePiece
a tokenizer instance
tokens: str
available tokens
takes: str
the name of the pipeline input providing raw text
provides_prefix: str
the prefix used for outputs
wordwise: str
whether tokenization is peformed on the whole sequence
or one word at a time. Tokenization can produce token
sequences in which a token may span multiple words
token_space_index: int
the index of the space token
char_map: dict
a mapping from characters to tokens. This is used when
tokenizing sequences of phonemes rather than sequences
of characters. A sequence of phonemes is typically a list
of one or two-character tokens (e.g. ["DH", "UH", " ", "S", "AW",
"N", "D"]). The character map makes it possible to map these
to arbitrarily selected characters
Returns
-------
token_list: list
a list of raw tokens
encoded_list: list
a list of tokens, encoded as a list of integers
encoded: torch.Tensor
a list of tokens, encoded as a tensor
"""
token_list = [token for token in seq if token in tokens]
yield token_list
tokenizer_input = "".join(
_map_tokens_item(token_list, char_map)
if char_map is not None
else token_list
)
if wordwise:
encoded_list = _wordwise_tokenize(
tokenizer(), tokenizer_input, word_separator, token_space_index
)
else:
encoded_list = tokenizer().sp.encode_as_ids(tokenizer_input)
yield encoded_list
encoded = torch.LongTensor(encoded_list)
yield encoded
def _wordwise_tokenize(tokenizer, sequence, input_separator, token_separator):
"""Tokenizes a sequence wordwise
Arguments
---------
tokenizer: speechbrain.tokenizers.SentencePiece.SentencePiece
a tokenizer instance
sequence: iterable
the original sequence
input_separator: str
the separator used in the input seauence
token_separator: str
the token separator used in the output sequence
Returns
-------
result: str
the resulting tensor
"""
if input_separator not in sequence:
return tokenizer.sp.encode_as_ids(sequence)
words = list(_split_list(sequence, input_separator))
encoded_words = [
tokenizer.sp.encode_as_ids(word_tokens) for word_tokens in words
]
sep_list = [token_separator]
return reduce((lambda left, right: left + sep_list + right), encoded_words)
def _wordwise_detokenize(tokenizer, sequence, output_separtor, token_separator):
"""Detokenizes a sequence wordwise
Arguments
---------
tokenizer: speechbrain.tokenizers.SentencePiece.SentencePiece
a tokenizer instance
sequence: iterable
the original sequence
output_separator: str
the separator used in the output seauence
token_separator: str
the token separator used in the output sequence
Returns
-------
result: torch.Tensor
the result
"""
if isinstance(sequence, str) and sequence == "":
return ""
if token_separator not in sequence:
sequence_list = (
sequence if isinstance(sequence, list) else sequence.tolist()
)
return tokenizer.sp.decode_ids(sequence_list)
words = list(_split_list(sequence, token_separator))
encoded_words = [
tokenizer.sp.decode_ids(word_tokens) for word_tokens in words
]
return output_separtor.join(encoded_words)
def _split_list(items, separator):
"""
Splits a sequence (such as a tensor) by the specified separator
Arguments
---------
items: sequence
any sequence that supports indexing
Results
-------
separator: str
the separator token
"""
if items is not None:
last_idx = -1
for idx, item in enumerate(items):
if item == separator:
yield items[last_idx + 1 : idx]
last_idx = idx
if last_idx < idx - 1:
yield items[last_idx + 1 :]
def enable_eos_bos(tokens, encoder, bos_index, eos_index):
"""
Initializs the phoneme encoder with EOS/BOS sequences
Arguments
---------
tokens: list
a list of tokens
encoder: speechbrain.dataio.encoder.TextEncoder.
a text encoder instance. If none is provided, a new one
will be instantiated
bos_index: int
the position corresponding to the Beginning-of-Sentence
token
eos_index: int
the position corresponding to the End-of-Sentence
Returns
-------
encoder: speechbrain.dataio.encoder.TextEncoder
an encoder
"""
if encoder is None:
encoder = sb.dataio.encoder.TextEncoder()
if bos_index == eos_index:
if "<eos-bos>" not in encoder.lab2ind:
encoder.insert_bos_eos(
bos_label="<eos-bos>",
eos_label="<eos-bos>",
bos_index=bos_index,
)
else:
if "<bos>" not in encoder.lab2ind:
encoder.insert_bos_eos(
bos_label="<bos>",
eos_label="<eos>",
bos_index=bos_index,
eos_index=eos_index,
)
if "<unk>" not in encoder.lab2ind:
encoder.add_unk()
encoder.update_from_iterable(tokens, sequence_input=False)
return encoder
def phoneme_pipeline(phn, phoneme_encoder=None):
"""Encodes a sequence of phonemes using the encoder
provided
Arguments
---------
phoneme_encoder: speechbrain.datio.encoder.TextEncoder
a text encoder instance (optional, if not provided, a new one
will be created)
Returns
-------
phn: list
the original list of phonemes
phn_encoded_list: list
encoded phonemes, as a list
phn_encoded: torch.Tensor
encoded phonemes, as a tensor
"""
yield phn
phn_encoded_list = phoneme_encoder.encode_sequence(phn)
yield phn_encoded_list
phn_encoded = torch.LongTensor(phn_encoded_list)
yield phn_encoded
def add_bos_eos(seq=None, encoder=None):
"""Adds BOS and EOS tokens to the sequence provided
Arguments
---------
seq: torch.Tensor
the source sequence
encoder: speechbrain.dataio.encoder.TextEncoder
an encoder instance
Returns
-------
seq_eos: torch.Tensor
the sequence, with the EOS token added
seq_bos: torch.Tensor
the sequence, with the BOS token added
"""
seq_bos = encoder.prepend_bos_index(seq)
if not torch.is_tensor(seq_bos):
seq_bos = torch.tensor(seq_bos)
yield seq_bos.long()
yield torch.tensor(len(seq_bos))
seq_eos = encoder.append_eos_index(seq)
if not torch.is_tensor(seq_eos):
seq_eos = torch.tensor(seq_eos)
yield seq_eos.long()
yield torch.tensor(len(seq_eos))
def beam_search_pipeline(char_lens, encoder_out, beam_searcher):
"""Performs a Beam Search on the phonemes. This function is
meant to be used as a component in a decoding pipeline
Arguments
---------
char_lens: torch.Tensor
the length of character inputs
encoder_out: torch.Tensor
Raw encoder outputs
beam_searcher: speechbrain.decoders.seq2seq.S2SBeamSearcher
a SpeechBrain beam searcher instance
Returns
-------
hyps: list
hypotheses
scores: list
confidence scores associated with each hypotheses
"""
return beam_searcher(encoder_out, char_lens)
def phoneme_decoder_pipeline(hyps, phoneme_encoder):
"""Decodes a sequence of phonemes
Arguments
---------
hyps: list
hypotheses, the output of a beam search
phoneme_encoder: speechbrain.datio.encoder.TextEncoder
a text encoder instance
Returns
-------
phonemes: list
the phoneme sequence
"""
return phoneme_encoder.decode_ndim(hyps)
def char_range(start_char, end_char):
"""Produces a list of consequtive characters
Arguments
---------
start_char: str
the starting character
end_char: str
the ending characters
Returns
-------
char_range: str
the character range
"""
return [chr(idx) for idx in range(ord(start_char), ord(end_char) + 1)]
def build_token_char_map(tokens):
"""Builds a map that maps arbitrary tokens to arbitrarily chosen characters.
This is required to overcome the limitations of SentencePiece.
Arguments
---------
tokens: list
a list of tokens for which to produce the map
Returns
-------
token_map: dict
a dictionary with original tokens as keys and
new mappings as values
"""
chars = char_range("A", "Z") + char_range("a", "z")
values = list(filter(lambda chr: chr != " ", tokens))
token_map = dict(zip(values, chars[: len(values)]))
token_map[" "] = " "
return token_map
def flip_map(map_dict):
"""Exchanges keys and values in a dictionary
Arguments
---------
map_dict: dict
a dictionary
Returns
-------
reverse_map_dict: dict
a dictioanry with keys and values flipped
"""
return {value: key for key, value in map_dict.items()}
def text_decode(seq, encoder):
"""Decodes a sequence using a tokenizer.
This function is meant to be used in hparam files
Arguments
---------
seq: torch.Tensor
token indexes
encoder: sb.dataio.encoder.TextEncoder
a text encoder instance
Returns
-------
output_seq: list
a list of lists of tokens
"""
return encoder.decode_ndim(seq)
def char_map_detokenize(
char_map, tokenizer, token_space_index=None, wordwise=True
):
"""Returns a function that recovers the original sequence from one that has been
tokenized using a character map
Arguments
---------
char_map: dict
a character-to-output-token-map
tokenizer: speechbrain.tokenizers.SentencePiece.SentencePiece
a tokenizer instance
token_space_index: int
the index of the "space" token
Returns
-------
f: callable
the tokenizer function
"""
def detokenize_wordwise(item):
"""Detokenizes the sequence one word at a time"""
return _wordwise_detokenize(tokenizer(), item, " ", token_space_index)
def detokenize_regular(item):
"""Detokenizes the entire sequence"""
return tokenizer().sp.decode_ids(item)
detokenize = detokenize_wordwise if wordwise else detokenize_regular
def f(tokens):
"""The tokenizer function"""
decoded_tokens = [detokenize(item) for item in tokens]
mapped_tokens = _map_tokens_batch(decoded_tokens, char_map)
return mapped_tokens
return f
def _map_tokens_batch(tokens, char_map):
"""Performs token mapping, in batch mode
Arguments
---------
tokens: iterable
a list of token sequences
char_map: dict
a token-to-character mapping
Returns
-------
result: list
a list of lists of characters
"""
return [[char_map[char] for char in item] for item in tokens]
def _map_tokens_item(tokens, char_map):
"""Maps tokens to characters, for a single item
Arguments
---------
tokens: iterable
a single token sequence
char_map: dict
a token-to-character mapping
Returns
-------
result: list
a list of tokens
"""
return [char_map[char] for char in tokens]
def lazy_init(init):
"""A wrapper to ensure that the specified object is initialzied
only once (used mainly for tokenizers that train when the
constructor is called
Arguments
---------
init: callable
a constructor or function that creates an object
Returns
-------
instance: object
the object instance
"""
instance = None
def f():
"""The initializer function"""
nonlocal instance
if instance is None:
instance = init()
return instance
return f
def get_sequence_key(key, mode):
"""Determines the key to be used for sequences (e.g. graphemes/phonemes)
based on the naming convention
Arguments
---------
key: str
the key (e.g. "graphemes", "phonemes")
mode:
the mode/sufix (raw, eos/bos)
"""
return key if mode == "raw" else f"{key}_{mode}"
def phonemes_to_label(phns, decoder):
"""Converts a batch of phoneme sequences (a single tensor)
to a list of space-separated phoneme label strings,
(e.g. ["T AY B L", "B UH K"]), removing any special tokens
Arguments
---------
phn: sequence
a batch of phoneme sequences
Returns
-------
result: list
a list of strings corresponding to the phonemes provided"""
phn_decoded = decoder(phns)
return [" ".join(remove_special(item)) for item in phn_decoded]
def remove_special(phn):
"""Removes any special tokens from the sequence. Special tokens are delimited
by angle brackets.
Arguments
---------
phn: list
a list of phoneme labels
Returns
-------
result: list
the original list, without any special tokens
"""
return [token for token in phn if "<" not in token]
def word_emb_pipeline(
txt,
grapheme_encoded,
grapheme_encoded_len,
grapheme_encoder=None,
word_emb=None,
use_word_emb=None,
):
"""Applies word embeddings, if applicable. This function is meant
to be used as part of the encoding pipeline
Arguments
---------
txt: str
the raw text
grapheme_encoded: torch.tensor
the encoded graphemes
grapheme_encoded_len: torch.tensor
encoded grapheme lengths
grapheme_encoder: speechbrain.dataio.encoder.TextEncoder
the text encoder used for graphemes
word_emb: callable
the model that produces word embeddings
use_word_emb: bool
a flag indicated if word embeddings are to be applied
Returns
-------
char_word_emb: torch.tensor
Word embeddings, expanded to the character dimension
"""
char_word_emb = None
if use_word_emb:
raw_word_emb = word_emb().embeddings(txt)
word_separator_idx = grapheme_encoder.lab2ind[" "]
char_word_emb = expand_to_chars(
emb=raw_word_emb.unsqueeze(0),
seq=grapheme_encoded.unsqueeze(0),
seq_len=grapheme_encoded_len.unsqueeze(0),
word_separator=word_separator_idx,
).squeeze(0)
return char_word_emb
| 16,894 | 25.153251 | 84 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/g2p/homograph.py | """Tools for homograph disambiguation
Authors
* Artem Ploujnikov 2021
"""
import torch
from torch import nn
class SubsequenceLoss(nn.Module):
"""
A loss function for a specific word in the output, used in
the homograph disambiguation task
The approach is as follows:
1. Arrange only the target words from the original batch into a
single tensor
2. Find the word index of each target word
3. Compute the beginnings and endings of words in the predicted
sequences. The assumption is that the model has been trained well
enough to identify word boundaries with a simple argmax without
having to perform a beam search.
Important! This loss can be used for fine-tuning only
The model is expected to be able to already be able
to correctly predict word boundaries
Arguments
---------
seq_cost: callable
the loss to be used on the extracted subsequences
word_separator: int
the index of the "space" character (in phonemes)
word_separator_base: str
the index of word separators used in unprocessed
targets (if different, used with tokenizations)
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.g2p.homograph import SubsequenceLoss
>>> from speechbrain.nnet.losses import nll_loss
>>> loss = SubsequenceLoss(
... seq_cost=nll_loss
... )
>>> phns = torch.Tensor(
... [[1, 2, 0, 1, 3, 0, 2, 1, 0],
... [2, 1, 3, 0, 1, 2, 0, 3, 2]]
... )
>>> phn_lens = torch.IntTensor([8, 9])
>>> subsequence_phn_start = torch.IntTensor([3, 4])
>>> subsequence_phn_end = torch.IntTensor([5, 7])
>>> p_seq = torch.Tensor([
... [[0., 1., 0., 0.],
... [0., 0., 1., 0.],
... [1., 0., 0., 0.],
... [0., 1., 0., 0.],
... [0., 0., 0., 1.],
... [1., 0., 0., 0.],
... [0., 0., 1., 0.],
... [0., 1., 0., 0.],
... [1., 0., 0., 0.]],
... [[0., 0., 1., 0.],
... [0., 1., 0., 0.],
... [0., 0., 0., 1.],
... [1., 0., 0., 0.],
... [0., 1., 0., 0.],
... [0., 0., 1., 0.],
... [1., 0., 0., 0.],
... [0., 0., 0., 1.],
... [0., 0., 1., 0.]]
... ])
>>> loss_value = loss(
... phns,
... phn_lens,
... p_seq,
... subsequence_phn_start,
... subsequence_phn_end
... )
>>> loss_value
tensor(-0.8000)
"""
def __init__(self, seq_cost, word_separator=0, word_separator_base=0):
super().__init__()
self.seq_cost = seq_cost
self._subsequence_extractor = SubsequenceExtractor(
word_separator, word_separator_base
)
@property
def word_separator(self):
"""
The word separator being used
"""
return self._subsequence_extractor.word_separator
@word_separator.setter
def word_separator(self, value):
"""
Sets the word separator
"""
self._subsequence_extractor.word_separator = value
@property
def word_separator_base(self):
"""
The word separator being used
"""
return self._subsequence_extractor.word_separator_base
@word_separator.setter
def word_separator_base(self, value): # noqa
"""
Sets the base word separator
"""
self._subsequence_extractor.word_separator_base = value
def forward(
self,
phns,
phn_lens,
p_seq,
subsequence_phn_start,
subsequence_phn_end,
phns_base=None,
phn_lens_base=None,
):
"""
Evaluates the subsequence loss
Arguments
---------
phns: torch.Tensor
the phoneme tensor (batch x length)
phn_lens: torch.Tensor
the phoneme length tensor
p_seq: torch.Tensor
the output phoneme probability tensor
(batch x length x phns)
subsequence_phn_start: torch.Tensor
the beginning of the target subsequence
(i.e. the homograph)
subsequence_phn_end: torch.Tensor
the end of the target subsequence
(i.e. the homograph)
phns_base: torch.Tensor
the phoneme tensor (not preprocessed)
phn_lens_base: torch.Tensor
the phoneme lengths (not preprocessed)
Returns
-------
loss: torch.Tensor
the loss tensor
"""
(
p_seq_subsequence,
phns_subsequence,
subsequence_lengths,
) = self._subsequence_extractor(
phns,
phn_lens,
p_seq,
subsequence_phn_start,
subsequence_phn_end,
phns_base,
phn_lens_base,
)
return self.seq_cost(
p_seq_subsequence, phns_subsequence, subsequence_lengths
)
class SubsequenceExtractor:
"""
A utility class to help extract subsequences out of a batch
of sequences
Arguments
---------
word_separator: int
the index of the word separator (used in p_seq)
word_separator_base int
the index of word separators used in unprocessed
targets (if different)
Example
-------
>>> import torch
>>> from speechbrain.lobes.models.g2p.homograph import SubsequenceExtractor
>>> extractor = SubsequenceExtractor()
>>> phns = torch.Tensor(
... [[1, 2, 0, 1, 3, 0, 2, 1, 0],
... [2, 1, 3, 0, 1, 2, 0, 3, 2]]
... )
>>> phn_lens = torch.IntTensor([8, 9])
>>> subsequence_phn_start = torch.IntTensor([3, 4])
>>> subsequence_phn_end = torch.IntTensor([5, 7])
>>> p_seq = torch.Tensor([
... [[0., 1., 0., 0.],
... [0., 0., 1., 0.],
... [1., 0., 0., 0.],
... [0., 1., 0., 0.],
... [0., 0., 0., 1.],
... [1., 0., 0., 0.],
... [0., 0., 1., 0.],
... [0., 1., 0., 0.],
... [1., 0., 0., 0.]],
... [[0., 0., 1., 0.],
... [0., 1., 0., 0.],
... [0., 0., 0., 1.],
... [1., 0., 0., 0.],
... [0., 1., 0., 0.],
... [0., 0., 1., 0.],
... [1., 0., 0., 0.],
... [0., 0., 0., 1.],
... [0., 0., 1., 0.]]
... ])
>>> extractor.extract_seq(
... phns,
... phn_lens,
... p_seq,
... subsequence_phn_start,
... subsequence_phn_end
... )
(tensor([[[0., 1., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 0.]],
<BLANKLINE>
[[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 0.]]]), tensor([[1., 3., 0.],
[1., 2., 0.]]), tensor([0.6667, 1.0000]))
"""
def __init__(self, word_separator=0, word_separator_base=None):
self.word_separator = word_separator
if word_separator_base is None:
word_separator_base = word_separator
self.word_separator_base = word_separator_base
def __call__(self, *args, **kwargs):
return self.extract_seq(*args, **kwargs)
def extract_seq(
self,
phns,
phn_lens,
p_seq,
subsequence_phn_start,
subsequence_phn_end,
phns_base=None,
phn_base_lens=None,
):
"""
Extracts the subsequence from the complete sequence
phns: torch.Tensor
the phoneme tensor (batch x length)
phn_lens: torch.Tensor
the phoneme length tensor
p_seq: torch.Tensor
the output phoneme probability tensor
(batch x length x phns)
subsequence_phn_start: torch.Tensor
the beginning of the target subsequence
(i.e. the homograph)
subsequence_phn_end: torch.Tensor
the end of the target subsequence
(i.e. the homograph)
phns_base: torch.Tensor
the phoneme tensor (not preprocessed)
phn_base_lens: torch.Tensor
the phoneme lengths (not preprocessed)
Returns
-------
p_seq_subsequence: torch.Tensor
the output subsequence (of probabilities)
phns_subsequence: torch.Tensor
the target subsequence
subsequence_lengths: torch.Tensor
subsequence lengths, expressed as a fraction
of the tensor's last dimension
"""
has_base = False
if phns_base is None and phn_base_lens is None:
phns_base = phns
phn_base_lens = phn_lens
elif phns_base is None or phn_base_lens is None:
raise ValueError(
"phn_base and phn_lens_base, if provided, should be provided together"
)
else:
has_base = True
p_seq_edge = p_seq.size(1)
phns_edge = (phns.size(1) * phn_lens).long().unsqueeze(-1)
# Compute subsequence lengths and the longest length
subsequence_lengths = subsequence_phn_end - subsequence_phn_start
longest_subsequence = subsequence_lengths.max()
# Pad the sequence axis to make sure the "distance" from the start of
# each subsequence to the end of the sequence is at least as long
# as the longest subsequence (e.g. subsequence = homograph)
phns = self._pad_subsequence(phns, longest_subsequence)
phns_base = self._pad_subsequence(phns_base, longest_subsequence)
# p_seq_pad = (gap + longest_subsequence + 1).item()
p_seq_pad = p_seq.size(1)
p_seq = torch.nn.functional.pad(p_seq, (0, 0, 0, p_seq_pad))
# Copy only the subsequences from the targets and inputs
# into new tensors
subsequence_phn_start_unsq = subsequence_phn_start.unsqueeze(-1)
range_phns_base = torch.arange(
phns_base.size(1), device=phns_base.device
).expand_as(phns_base)
range_phns_subsequence = torch.arange(
longest_subsequence, device=phns.device
).expand(phns.size(0), longest_subsequence)
# Count the words in predictions
target_word_indexes = self._get_target_word_indexes(
phns_base,
range_phns_base,
subsequence_phn_start_unsq,
self.word_separator_base,
phn_lens=phn_base_lens,
)
if has_base:
# Needed if tokenization or any other transformation was used
phns_subsequence, subsequence_lengths = self._get_phns_subsequence(
phns, target_word_indexes, longest_subsequence, phns_edge
)
else:
# If phns and phns_base are the same, there is no need to re-detect word boundaries
match = (range_phns_base >= subsequence_phn_start_unsq) & (
range_phns_base
< subsequence_phn_start_unsq + longest_subsequence
)
phns_subsequence = phns[match].reshape(range_phns_subsequence.shape)
phns_subsequence[
range_phns_subsequence >= subsequence_lengths.unsqueeze(-1)
] = 0.0
p_seq_subsequence = self._get_p_seq_subsequence(
p_seq, target_word_indexes, longest_subsequence, p_seq_edge
)
return (
p_seq_subsequence,
phns_subsequence,
subsequence_lengths / longest_subsequence,
)
def _pad_subsequence(self, sequence, longest_subsequence):
"""Pads a subsequence to the length of the longest subsequence
Arguments
---------
sequence: torch.tensor
the sequence to be padded
longest_subsequence: int
the length of the longest subsequence
"""
if longest_subsequence > 0:
sequence = torch.nn.functional.pad(
sequence, (0, longest_subsequence)
)
return sequence
def _get_phns_subsequence(
self, phns, target_word_indexes, longest_subsequence, edge
):
"""Extracts a subsequence
Arguments
---------
phns: torch.Tensor
a tensor of phoneme indexes
target_word_indexes: torch.Tensor
a tensor of word indexes to extract, zero-based
(e.g.) torch.IntTensor([2, 3]) means extracting
the third word from the first sample and the
fourth word from the second sample
longest_subsequence: int
the length of the longest subsequence
edge: int
the index of the "edge" of the sequence
Returns
-------
phn_subsequence: torch.Tensor
a tensor with only the target words
subsequence_lengths: torch.Tensor
the lengths of the extracted words
"""
word_start, word_end = self._get_word_boundaries(
phns, target_word_indexes, edge
)
word_start_unsq = word_start.unsqueeze(-1)
word_end_unsq = word_end.unsqueeze(-1)
phns_range = (
torch.arange(phns.size(1), device=phns.device)
.unsqueeze(0)
.expand_as(phns)
)
phn_match = (phns_range >= word_start_unsq) & (
phns_range < word_start_unsq + longest_subsequence
)
phns_subsequence = phns[phn_match].view(
phns.size(0), longest_subsequence
)
phns_subsequence_range = (
torch.arange(
phns_subsequence.size(1), device=phns_subsequence.device
)
.unsqueeze(0)
.expand_as(phns_subsequence)
)
phns_subsequence[
phns_subsequence_range >= (word_end_unsq - word_start_unsq)
] = 0.0
subsequence_lengths = torch.minimum(
word_end - word_start, torch.tensor(phns_subsequence.size(1))
)
return phns_subsequence, subsequence_lengths
def _get_p_seq_subsequence(
self, p_seq, target_word_indexes, longest_subsequence, edge
):
"""Extracts a subsequence out of a tensor of probabilities
Arguments
---------
p_seq: torch.Tensor
a tensor of phoneme probabilities
(batch x sequence index x phoneme index)
target_word_indexes: torch.Tensor
a tensor of word indexes to extract, zero-based
(e.g.) torch.IntTensor([2, 3]) means extracting
the third word from the first sample and the
fourth word from the second sample
longest_subsequence: int
the length of the longest subsequence
edge: int
the index of the "edge" of the sequence
Returns
-------
p_seq_subsequence: torch.Tensor
a probability tensor composed of the phoneme
probabilities for target words only
"""
# Determine where the predicted subsequences start and end
word_start, word_end = self._get_word_boundaries(
p_seq, target_word_indexes, edge
)
p_seq_range = (
torch.arange(p_seq.size(1), device=p_seq.device)
.unsqueeze(0)
.unsqueeze(-1)
.expand_as(p_seq)
)
word_start_unsq = word_start.unsqueeze(-1).unsqueeze(-1)
word_end_unsq = word_end.unsqueeze(-1).unsqueeze(-1)
phn_match = (p_seq_range >= word_start_unsq) & (
p_seq_range < word_start_unsq + longest_subsequence
)
p_seq_subsequence = p_seq[phn_match].view(
p_seq.size(0), longest_subsequence, p_seq.size(-1)
)
p_seq_subsequence_range = (
torch.arange(
p_seq_subsequence.size(1), device=p_seq_subsequence.device
)
.unsqueeze(0)
.unsqueeze(-1)
.expand_as(p_seq_subsequence)
)
p_seq_subsequence[
p_seq_subsequence_range >= (word_end_unsq - word_start_unsq)
] = 0.0
return p_seq_subsequence
def _get_target_word_indexes(
self, phns, range_phns, start, word_separator, phn_lens=None
):
"""Computes the target word indexes
Arguments
---------
phns: torch.Tensor
a phoneme batch tensor
range_phns: torch.Tensor
a range tensor over thephoneme sequence
start: torch.Tensor
the beginning of the subsequence
word_separator: int
the word separator being used
Returns
-------
word_indexes: torch.Tensor
the word index tensor
"""
end_of_sequence = (
(range_phns == ((phn_lens).unsqueeze(-1) * phns.size(1)).long())
if phn_lens is not None
else False
)
word_boundaries = (range_phns < start) & (
(phns == word_separator) | end_of_sequence
)
word_indexes = word_boundaries.sum(dim=-1)
return word_indexes
def _get_word_boundaries(
self, seq, word_indexes, edge, word_separator=None
):
"""Determines the word boundaries for the specified
word indexes within a sequence
Arguments
---------
seq: torch.Tensor
a sequence (phonemes or graphemes)
word_indexes:
the word indexes
edge: int
a tensor indicating the last position
word_separator: int
the word separator token
Returns
-------
start: torch.Tensor
word start indexes
end: torch.Tensor
word end indexes
"""
if word_separator is None:
word_separator = self.word_separator
# Find all spaces in the tensor
tokens = seq.argmax(-1) if seq.dim() == 3 else seq
# Compute an auxiliary range tensor to help determine
# word boundaries
words_range = torch.arange(
tokens.size(-1), device=tokens.device
).expand_as(tokens)
word_boundaries = (tokens == word_separator) | (words_range == edge)
# Find which word a given position in the tensor belongs in
words = word_boundaries.cumsum(dim=-1)
index_match = words == word_indexes.unsqueeze(-1)
start = self._get_positions(index_match, words_range, torch.min, edge)
end = self._get_positions(index_match, words_range, torch.max, 0)
return start, end
def _get_positions(
self, index_match, words_range, aggregation, no_match_value
):
"""A helper method to calculate start or end positions corresponding
to specific words
Arguments
---------
index_match: torch.Tensor
a mask where positions matching the word index are
indicated as a 1 and the remaining positions are 0
words_range: torch.Tensor
a range tensor over the tokens
aggregation: callable
the aggregation to use (torch.min or torch.max)
no_match_value: int
the value to output if no match is found (this could
happen when searching in model outputs rather than
in source data)
"""
positions = torch.where(index_match, words_range, no_match_value)
positions = aggregation(positions, dim=-1).values
return torch.where(positions == 0, 0, positions + 1)
def extract_hyps(
self, ref_seq, hyps, subsequence_phn_start, use_base=False
):
"""Extracts a subsequnce from hypotheses (e.g. the result of a beam
search) based on a refernece sequence, which can be either a sequence of phonemes (the target during training)
Arguments
---------
ref_seq: torch.Tensor
a reference sequence (e.g. phoneme targets)
hyps: list
a batch of hypotheses, a list of list of
integer indices (usually of phonemes)
subsequence_phn_start: torch.tensor
the index of the beginning of the subsequence to
use_base: bool
whether to use the raw (token) space for word separators
"""
range_phns = torch.arange(
ref_seq.size(1), device=ref_seq.device
).expand_as(ref_seq)
target_word_indexes = self._get_target_word_indexes(
ref_seq,
range_phns,
subsequence_phn_start.unsqueeze(-1),
self.word_separator_base if use_base else self.word_separator,
)
separator_indexes = [
[-1]
+ [
idx
for idx, phn in enumerate(item_hyps)
if phn == self.word_separator
]
+ [None]
for item_hyps in hyps
]
result = [
self._extract_hyp_word(
item_hyps, item_separtaor_indexes, word_index
)
for item_hyps, item_separtaor_indexes, word_index in zip(
hyps, separator_indexes, target_word_indexes
)
]
return result
def _extract_hyp_word(self, hyps, separator_indexes, word_index):
"""Extracts a single word out of a hypothesis sequence
Arguments
---------
hyps: list
a hypotheses list (or tensor)
separator_indexes: torch.Tensor
a tensor of word separators
word_index: int
the index of the word to eb retrieved
Returns
-------
result: list|str
the extracted word
"""
if word_index < len(separator_indexes):
left = separator_indexes[word_index]
if left is None:
return ""
left += 1
right = separator_indexes[word_index + 1]
result = hyps[left:right]
else:
result = []
return result
| 21,897 | 31.978916 | 118 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/g2p/model.py | """The Attentional RNN model for Grapheme-to-Phoneme
Authors
* Mirco Ravinelli 2021
* Artem Ploujnikov 2021
"""
from speechbrain.lobes.models.transformer.Transformer import (
TransformerInterface,
get_lookahead_mask,
get_key_padding_mask,
)
import torch
from torch import nn
from speechbrain.nnet.linear import Linear
from speechbrain.nnet import normalization
class AttentionSeq2Seq(nn.Module):
"""
The Attentional RNN encoder-decoder model
Arguments
---------
enc: torch.nn.Module
the encoder module
encoder_emb: torch.nn.Module
the encoder_embedding_module
emb: torch.nn.Module
the embedding module
dec: torch.nn.Module
the decoder module
lin: torch.nn.Module
the linear module
out: torch.nn.Module
the output layer (typically log_softmax)
use_word_emb: bool
whether or not to use word embedding
bos_token: int
the index of teh Beginning-of-Sentence token
word_emb_enc: nn.Module
a module to encode word embeddings
Returns
-------
result: tuple
a (p_seq, char_lens) tuple
"""
def __init__(
self,
enc,
encoder_emb,
emb,
dec,
lin,
out,
bos_token=0,
use_word_emb=False,
word_emb_enc=None,
):
super().__init__()
self.enc = enc
self.encoder_emb = encoder_emb
self.emb = emb
self.dec = dec
self.lin = lin
self.out = out
self.bos_token = bos_token
self.use_word_emb = use_word_emb
self.word_emb_enc = word_emb_enc if use_word_emb else None
def forward(
self, grapheme_encoded, phn_encoded=None, word_emb=None, **kwargs
):
"""Computes the forward pass
Arguments
---------
grapheme_encoded: torch.Tensor
graphemes encoded as a Torch tensor
phn_encoded: torch.Tensor
the encoded phonemes
word_emb: torch.Tensor
word embeddings (optional)
Returns
-------
p_seq: torch.Tensor
a (batch x position x token) tensor of token probabilities in each
position
char_lens: torch.Tensor
a tensor of character sequence lengths
encoder_out:
the raw output of the encoder
"""
chars, char_lens = grapheme_encoded
if phn_encoded is None:
phn_bos = get_dummy_phonemes(chars.size(0), chars.device)
else:
phn_bos, _ = phn_encoded
emb_char = self.encoder_emb(chars)
if self.use_word_emb:
emb_char = _apply_word_emb(self.word_emb_enc, emb_char, word_emb)
encoder_out, _ = self.enc(emb_char)
e_in = self.emb(phn_bos)
h, w = self.dec(e_in, encoder_out, char_lens)
logits = self.lin(h)
p_seq = self.out(logits)
return p_seq, char_lens, encoder_out, w
def _apply_word_emb(self, emb_char, word_emb):
"""Concatenate character embeddings with word embeddeings,
possibly encoding the word embeddings if an encoder
is provided
Arguments
---------
emb_char: torch.Tensor
the character embedding tensor
word_emb: torch.Tensor
the word embedding tensor
Returns
-------
result: torch.Tensor
the concatenation of the tensor"""
word_emb_enc = (
self.word_emb_enc(word_emb)
if self.word_emb_enc is not None
else word_emb
)
return torch.cat([emb_char, word_emb_enc], dim=-1)
class WordEmbeddingEncoder(nn.Module):
"""A small encoder module that reduces the dimensionality
and normalizes word embeddings
Arguments
---------
word_emb_dim: int
the dimension of the original word embeddings
word_emb_enc_dim: int
the dimension of the encoded word embeddings
norm: torch.nn.Module
the normalization to be used (
e.g. speechbrain.nnet.normalization.LayerNorm)
norm_type: str
the type of normalization to be used
"""
def __init__(
self, word_emb_dim, word_emb_enc_dim, norm=None, norm_type=None
):
super().__init__()
self.word_emb_dim = word_emb_dim
self.word_emb_enc_dim = word_emb_enc_dim
if norm_type:
self.norm = self._get_norm(norm_type, word_emb_dim)
else:
self.norm = norm
self.lin = Linear(n_neurons=word_emb_enc_dim, input_size=word_emb_dim)
self.activation = nn.Tanh()
def _get_norm(self, norm, dim):
"""Determines the type of normalizer
Arguments
---------
norm: str
the normalization type: "batch", "layer" or "instance
dim: int
the dimensionality of the inputs
"""
norm_cls = self.NORMS.get(norm)
if not norm_cls:
raise ValueError(f"Invalid norm: {norm}")
return norm_cls(input_size=dim)
def forward(self, emb):
"""Computes the forward pass of the embedding
Arguments
---------
emb: torch.Tensor
the original word embeddings
Returns
-------
emb_enc: torch.Tensor
encoded word embeddings
"""
if self.norm is not None:
x = self.norm(emb)
x = self.lin(x)
x = self.activation(x)
return x
NORMS = {
"batch": normalization.BatchNorm1d,
"layer": normalization.LayerNorm,
"instance": normalization.InstanceNorm1d,
}
class TransformerG2P(TransformerInterface):
"""
A Transformer-based Grapheme-to-Phoneme model
Arguments
----------
emb: torch.nn.Module
the embedding module
encoder_emb: torch.nn.Module
the encoder embedding module
char_lin: torch.nn.Module
a linear module connecting the inputs
to the transformer
phn_lin: torch.nn.Module
a linear module connecting the outputs to
the transformer
out: torch.nn.Module
the decoder module (usually Softmax)
lin: torch.nn.Module
the linear module for outputs
d_model: int
The number of expected features in the encoder/decoder inputs (default=512).
nhead: int
The number of heads in the multi-head attention models (default=8).
num_encoder_layers: int, optional
The number of encoder layers in1ì the encoder.
num_decoder_layers: int, optional
The number of decoder layers in the decoder.
dim_ffn: int, optional
The dimension of the feedforward network model hidden layer.
dropout: int, optional
The dropout value.
activation: torch.nn.Module, optional
The activation function for Feed-Forward Netowrk layer,
e.g., relu or gelu or swish.
custom_src_module: torch.nn.Module, optional
Module that processes the src features to expected feature dim.
custom_tgt_module: torch.nn.Module, optional
Module that processes the src features to expected feature dim.
positional_encoding: str, optional
Type of positional encoding used. e.g. 'fixed_abs_sine' for fixed absolute positional encodings.
normalize_before: bool, optional
Whether normalization should be applied before or after MHA or FFN in Transformer layers.
Defaults to True as this was shown to lead to better performance and training stability.
kernel_size: int, optional
Kernel size in convolutional layers when Conformer is used.
bias: bool, optional
Whether to use bias in Conformer convolutional layers.
encoder_module: str, optional
Choose between Conformer and Transformer for the encoder. The decoder is fixed to be a Transformer.
conformer_activation: torch.nn.Module, optional
Activation module used after Conformer convolutional layers. E.g. Swish, ReLU etc. it has to be a torch Module.
attention_type: str, optional
Type of attention layer used in all Transformer or Conformer layers.
e.g. regularMHA or RelPosMHA.
max_length: int, optional
Max length for the target and source sequence in input.
Used for positional encodings.
causal: bool, optional
Whether the encoder should be causal or not (the decoder is always causal).
If causal the Conformer convolutional layer is causal.
pad_idx: int
the padding index (for masks)
encoder_kdim: int, optional
Dimension of the key for the encoder.
encoder_vdim: int, optional
Dimension of the value for the encoder.
decoder_kdim: int, optional
Dimension of the key for the decoder.
decoder_vdim: int, optional
Dimension of the value for the decoder.
"""
def __init__(
self,
emb,
encoder_emb,
char_lin,
phn_lin,
lin,
out,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
d_ffn=2048,
dropout=0.1,
activation=nn.ReLU,
custom_src_module=None,
custom_tgt_module=None,
positional_encoding="fixed_abs_sine",
normalize_before=True,
kernel_size=15,
bias=True,
encoder_module="transformer",
attention_type="regularMHA",
max_length=2500,
causal=False,
pad_idx=0,
encoder_kdim=None,
encoder_vdim=None,
decoder_kdim=None,
decoder_vdim=None,
use_word_emb=False,
word_emb_enc=None,
):
super().__init__(
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
d_ffn=d_ffn,
dropout=dropout,
activation=activation,
custom_src_module=custom_src_module,
custom_tgt_module=custom_tgt_module,
positional_encoding=positional_encoding,
normalize_before=normalize_before,
kernel_size=kernel_size,
bias=bias,
encoder_module=encoder_module,
attention_type=attention_type,
max_length=max_length,
causal=causal,
encoder_kdim=encoder_kdim,
encoder_vdim=encoder_vdim,
decoder_kdim=decoder_kdim,
decoder_vdim=decoder_vdim,
)
self.emb = emb
self.encoder_emb = encoder_emb
self.char_lin = char_lin
self.phn_lin = phn_lin
self.lin = lin
self.out = out
self.pad_idx = pad_idx
self.use_word_emb = use_word_emb
self.word_emb_enc = word_emb_enc
self._reset_params()
def forward(
self, grapheme_encoded, phn_encoded=None, word_emb=None, **kwargs
):
"""Computes the forward pass
Arguments
---------
grapheme_encoded: torch.Tensor
graphemes encoded as a Torch tensor
phn_encoded: torch.Tensor
the encoded phonemes
word_emb: torch.Tensor
word embeddings (if applicable)
Returns
-------
p_seq: torch.Tensor
the log-probabilities of individual tokens i a sequence
char_lens: torch.Tensor
the character length syntax
encoder_out: torch.Tensor
the encoder state
attention: torch.Tensor
the attention state
"""
chars, char_lens = grapheme_encoded
if phn_encoded is None:
phn = get_dummy_phonemes(chars.size(0), chars.device)
else:
phn, _ = phn_encoded
emb_char = self.encoder_emb(chars)
if self.use_word_emb:
emb_char = _apply_word_emb(self.word_emb_enc, emb_char, word_emb)
src = self.char_lin(emb_char)
tgt = self.emb(phn)
tgt = self.phn_lin(tgt)
(
src_key_padding_mask,
tgt_key_padding_mask,
src_mask,
tgt_mask,
) = self.make_masks(src, tgt, char_lens, pad_idx=self.pad_idx)
pos_embs_encoder = None
if self.attention_type == "RelPosMHAXL":
pos_embs_encoder = self.positional_encoding(src)
elif self.positional_encoding_type == "fixed_abs_sine":
src = src + self.positional_encoding(src) # add the encodings here
pos_embs_encoder = None
encoder_out, _ = self.encoder(
src=src,
src_mask=src_mask,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs_encoder,
)
if self.attention_type == "RelPosMHAXL":
# use standard sinusoidal pos encoding in decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
src = src + self.positional_encoding_decoder(src)
pos_embs_encoder = None
pos_embs_target = None
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt)
pos_embs_target = None
pos_embs_encoder = None
decoder_out, _, attention = self.decoder(
tgt=tgt,
memory=encoder_out,
memory_mask=src_mask,
tgt_mask=tgt_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=src_key_padding_mask,
pos_embs_tgt=pos_embs_target,
pos_embs_src=pos_embs_encoder,
)
logits = self.lin(decoder_out)
p_seq = self.out(logits)
return p_seq, char_lens, encoder_out, attention
def _reset_params(self):
"""Resets the parameters of the model"""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
def make_masks(self, src, tgt, src_len=None, pad_idx=0):
"""This method generates the masks for training the transformer model.
Arguments
---------
src : tensor
The sequence to the encoder (required).
tgt : tensor
The sequence to the decoder (required).
pad_idx : int
The index for <pad> token (default=0).
Returns
-------
src_key_padding_mask: torch.Tensor
the source key padding mask
tgt_key_padding_mask: torch.Tensor
the target key padding masks
src_mask: torch.Tensor
the source mask
tgt_mask: torch.Tensor
the target mask
"""
if src_len is not None:
abs_len = torch.round(src_len * src.shape[1])
src_key_padding_mask = (
torch.arange(src.shape[1])[None, :].to(abs_len)
> abs_len[:, None]
)
tgt_key_padding_mask = get_key_padding_mask(tgt, pad_idx=pad_idx)
src_mask = None
tgt_mask = get_lookahead_mask(tgt)
return src_key_padding_mask, tgt_key_padding_mask, src_mask, tgt_mask
def decode(self, tgt, encoder_out):
"""This method implements a decoding step for the transformer model.
Arguments
---------
tgt : torch.Tensor
The sequence to the decoder.
encoder_out : torch.Tensor
Hidden output of the encoder.
Returns
-------
prediction: torch.Tensor
the predicted sequence
attention: torch.Tensor
the attention matrix corresponding to the last attention head
(useful for plotting attention)
"""
tgt_mask = get_lookahead_mask(tgt)
tgt = self.emb(tgt)
tgt = self.phn_lin(tgt)
if self.attention_type == "RelPosMHAXL":
# we use fixed positional encodings in the decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
encoder_out = encoder_out + self.positional_encoding_decoder(
encoder_out
)
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt) # add the encodings here
prediction, self_attns, multihead_attns = self.decoder(
tgt,
encoder_out,
tgt_mask=tgt_mask,
pos_embs_tgt=None,
pos_embs_src=None,
)
attention = multihead_attns[-1]
return prediction, attention
def input_dim(use_word_emb, embedding_dim, word_emb_enc_dim):
"""Computes the input dimension (intended for hparam files)
Arguments
---------
use_word_emb: bool
whether to use word embeddings
embedding_dim: int
the embedding dimension
word_emb_enc_dim: int
the dimension of encoded word embeddings
Returns
-------
input_dim: int
the input dimension
"""
return embedding_dim + use_word_emb * word_emb_enc_dim
def _apply_word_emb(word_emb_enc, emb_char, word_emb):
"""
Concatenates character and word embeddings together, possibly
applying a custom encoding/transformation
Arguments
---------
word_emb_enc: callable
an encoder to apply (typically, speechbrain.lobes.models.g2p.model.WordEmbeddingEncoder)
emb_char: torch.Tensor
character embeddings
word_emb: char
word embeddings
Returns
-------
result: torch.Tensor
the resulting (concatenated) tensor
"""
word_emb_enc = (
word_emb_enc(word_emb.data)
if word_emb_enc is not None
else word_emb.data
)
return torch.cat([emb_char, word_emb_enc], dim=-1)
def get_dummy_phonemes(batch_size, device):
"""
Creates a dummy phoneme sequence
Arguments
---------
batch_size: int
the batch size
device: str
the target device
Returns
-------
result: torch.Tensor
"""
return torch.tensor([0], device=device).expand(batch_size, 1)
| 18,054 | 29.293624 | 119 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/transformer/Transformer.py | """Transformer implementaion in the SpeechBrain style.
Authors
* Jianyuan Zhong 2020
* Samuele Cornell 2021
"""
import math
import torch
import torch.nn as nn
import speechbrain as sb
from typing import Optional
import numpy as np
from .Conformer import ConformerEncoder
from speechbrain.nnet.activations import Swish
from speechbrain.nnet.attention import RelPosEncXL
class TransformerInterface(nn.Module):
"""This is an interface for transformer model.
Users can modify the attributes and define the forward function as
needed according to their own tasks.
The architecture is based on the paper "Attention Is All You Need":
https://arxiv.org/pdf/1706.03762.pdf
Arguments
----------
d_model: int
The number of expected features in the encoder/decoder inputs (default=512).
nhead: int
The number of heads in the multi-head attention models (default=8).
num_encoder_layers: int, optional
The number of encoder layers in1ì the encoder.
num_decoder_layers: int, optional
The number of decoder layers in the decoder.
dim_ffn: int, optional
The dimension of the feedforward network model hidden layer.
dropout: int, optional
The dropout value.
activation: torch.nn.Module, optional
The activation function for Feed-Forward Netowrk layer,
e.g., relu or gelu or swish.
custom_src_module: torch.nn.Module, optional
Module that processes the src features to expected feature dim.
custom_tgt_module: torch.nn.Module, optional
Module that processes the src features to expected feature dim.
positional_encoding: str, optional
Type of positional encoding used. e.g. 'fixed_abs_sine' for fixed absolute positional encodings.
normalize_before: bool, optional
Whether normalization should be applied before or after MHA or FFN in Transformer layers.
Defaults to True as this was shown to lead to better performance and training stability.
kernel_size: int, optional
Kernel size in convolutional layers when Conformer is used.
bias: bool, optional
Whether to use bias in Conformer convolutional layers.
encoder_module: str, optional
Choose between Conformer and Transformer for the encoder. The decoder is fixed to be a Transformer.
conformer_activation: torch.nn.Module, optional
Activation module used after Conformer convolutional layers. E.g. Swish, ReLU etc. it has to be a torch Module.
attention_type: str, optional
Type of attention layer used in all Transformer or Conformer layers.
e.g. regularMHA or RelPosMHA.
max_length: int, optional
Max length for the target and source sequence in input.
Used for positional encodings.
causal: bool, optional
Whether the encoder should be causal or not (the decoder is always causal).
If causal the Conformer convolutional layer is causal.
encoder_kdim: int, optional
Dimension of the key for the encoder.
encoder_vdim: int, optional
Dimension of the value for the encoder.
decoder_kdim: int, optional
Dimension of the key for the decoder.
decoder_vdim: int, optional
Dimension of the value for the decoder.
"""
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
d_ffn=2048,
dropout=0.1,
activation=nn.ReLU,
custom_src_module=None,
custom_tgt_module=None,
positional_encoding="fixed_abs_sine",
normalize_before=True,
kernel_size: Optional[int] = 31,
bias: Optional[bool] = True,
encoder_module: Optional[str] = "transformer",
conformer_activation: Optional[nn.Module] = Swish,
attention_type: Optional[str] = "regularMHA",
max_length: Optional[int] = 2500,
causal: Optional[bool] = False,
encoder_kdim: Optional[int] = None,
encoder_vdim: Optional[int] = None,
decoder_kdim: Optional[int] = None,
decoder_vdim: Optional[int] = None,
):
super().__init__()
self.causal = causal
self.attention_type = attention_type
self.positional_encoding_type = positional_encoding
self.encoder_kdim = encoder_kdim
self.encoder_vdim = encoder_vdim
self.decoder_kdim = decoder_kdim
self.decoder_vdim = decoder_vdim
assert attention_type in ["regularMHA", "RelPosMHAXL"]
assert positional_encoding in ["fixed_abs_sine", None]
assert (
num_encoder_layers + num_decoder_layers > 0
), "number of encoder layers and number of decoder layers cannot both be 0!"
if positional_encoding == "fixed_abs_sine":
self.positional_encoding = PositionalEncoding(d_model, max_length)
elif positional_encoding is None:
pass
# no positional encodings
# overrides any other pos_embedding
if attention_type == "RelPosMHAXL":
self.positional_encoding = RelPosEncXL(d_model)
self.positional_encoding_decoder = PositionalEncoding(
d_model, max_length
)
# initialize the encoder
if num_encoder_layers > 0:
if custom_src_module is not None:
self.custom_src_module = custom_src_module(d_model)
if encoder_module == "transformer":
self.encoder = TransformerEncoder(
nhead=nhead,
num_layers=num_encoder_layers,
d_ffn=d_ffn,
d_model=d_model,
dropout=dropout,
activation=activation,
normalize_before=normalize_before,
causal=self.causal,
attention_type=self.attention_type,
kdim=self.encoder_kdim,
vdim=self.encoder_vdim,
)
elif encoder_module == "conformer":
self.encoder = ConformerEncoder(
nhead=nhead,
num_layers=num_encoder_layers,
d_ffn=d_ffn,
d_model=d_model,
dropout=dropout,
activation=conformer_activation,
kernel_size=kernel_size,
bias=bias,
causal=self.causal,
attention_type=self.attention_type,
)
assert (
normalize_before
), "normalize_before must be True for Conformer"
assert (
conformer_activation is not None
), "conformer_activation must not be None"
# initialize the decoder
if num_decoder_layers > 0:
if custom_tgt_module is not None:
self.custom_tgt_module = custom_tgt_module(d_model)
self.decoder = TransformerDecoder(
num_layers=num_decoder_layers,
nhead=nhead,
d_ffn=d_ffn,
d_model=d_model,
dropout=dropout,
activation=activation,
normalize_before=normalize_before,
causal=True,
attention_type="regularMHA", # always use regular attention in decoder
kdim=self.decoder_kdim,
vdim=self.decoder_vdim,
)
def forward(self, **kwags):
"""Users should modify this function according to their own tasks."""
raise NotImplementedError
class PositionalEncoding(nn.Module):
"""This class implements the absolute sinusoidal positional encoding function.
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
Arguments
---------
input_size: int
Embedding dimension.
max_len : int, optional
Max length of the input sequences (default 2500).
Example
-------
>>> a = torch.rand((8, 120, 512))
>>> enc = PositionalEncoding(input_size=a.shape[-1])
>>> b = enc(a)
>>> b.shape
torch.Size([1, 120, 512])
"""
def __init__(self, input_size, max_len=2500):
super().__init__()
self.max_len = max_len
pe = torch.zeros(self.max_len, input_size, requires_grad=False)
positions = torch.arange(0, self.max_len).unsqueeze(1).float()
denominator = torch.exp(
torch.arange(0, input_size, 2).float()
* -(math.log(10000.0) / input_size)
)
pe[:, 0::2] = torch.sin(positions * denominator)
pe[:, 1::2] = torch.cos(positions * denominator)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
def forward(self, x):
"""
Arguments
---------
x : tensor
Input feature shape (batch, time, fea)
"""
return self.pe[:, : x.size(1)].clone().detach()
class TransformerEncoderLayer(nn.Module):
"""This is an implementation of self-attention encoder layer.
Arguments
----------
d_ffn: int, optional
The dimension of the feedforward network model hidden layer.
nhead: int
The number of heads in the multi-head attention models (default=8).
d_model: int
The number of expected features in the encoder/decoder inputs (default=512).
kdim: int, optional
Dimension of the key.
vdim: int, optional
Dimension of the value.
dropout: int, optional
The dropout value.
activation: torch.nn.Module, optional
The activation function for Feed-Forward Netowrk layer,
e.g., relu or gelu or swish.
normalize_before: bool, optional
Whether normalization should be applied before or after MHA or FFN in Transformer layers.
Defaults to True as this was shown to lead to better performance and training stability.
attention_type: str, optional
Type of attention layer used in all Transformer or Conformer layers.
e.g. regularMHA or RelPosMHA.
Example
-------
>>> import torch
>>> x = torch.rand((8, 60, 512))
>>> net = TransformerEncoderLayer(512, 8, d_model=512)
>>> output = net(x)
>>> output[0].shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
d_ffn,
nhead,
d_model,
kdim=None,
vdim=None,
dropout=0.0,
activation=nn.ReLU,
normalize_before=False,
attention_type="regularMHA",
causal=False,
):
super().__init__()
if attention_type == "regularMHA":
self.self_att = sb.nnet.attention.MultiheadAttention(
nhead=nhead,
d_model=d_model,
dropout=dropout,
kdim=kdim,
vdim=vdim,
)
elif attention_type == "RelPosMHAXL":
self.self_att = sb.nnet.attention.RelPosMHAXL(
d_model, nhead, dropout, mask_pos_future=causal
)
self.pos_ffn = sb.nnet.attention.PositionalwiseFeedForward(
d_ffn=d_ffn,
input_size=d_model,
dropout=dropout,
activation=activation,
)
self.norm1 = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
self.norm2 = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.normalize_before = normalize_before
def forward(
self,
src,
src_mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.Tensor] = None,
pos_embs: Optional[torch.Tensor] = None,
):
"""
Arguments
----------
src : torch.Tensor
The sequence to the encoder layer.
src_mask : torch.Tensor
The mask for the src query for each example in the batch.
src_key_padding_mask : torch.Tensor, optional
The mask for the src keys for each example in the batch.
"""
if self.normalize_before:
src1 = self.norm1(src)
else:
src1 = src
output, self_attn = self.self_att(
src1,
src1,
src1,
attn_mask=src_mask,
key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs,
)
# add & norm
src = src + self.dropout1(output)
if not self.normalize_before:
src = self.norm1(src)
if self.normalize_before:
src1 = self.norm2(src)
else:
src1 = src
output = self.pos_ffn(src1)
# add & norm
output = src + self.dropout2(output)
if not self.normalize_before:
output = self.norm2(output)
return output, self_attn
class TransformerEncoder(nn.Module):
"""This class implements the transformer encoder.
Arguments
---------
num_layers : int
Number of transformer layers to include.
nhead : int
Number of attention heads.
d_ffn : int
Hidden size of self-attention Feed Forward layer.
d_model : int
The dimension of the input embedding.
kdim : int
Dimension for key (Optional).
vdim : int
Dimension for value (Optional).
dropout : float
Dropout for the encoder (Optional).
input_module: torch class
The module to process the source input feature to expected
feature dimension (Optional).
Example
-------
>>> import torch
>>> x = torch.rand((8, 60, 512))
>>> net = TransformerEncoder(1, 8, 512, d_model=512)
>>> output, _ = net(x)
>>> output.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
num_layers,
nhead,
d_ffn,
input_shape=None,
d_model=None,
kdim=None,
vdim=None,
dropout=0.0,
activation=nn.ReLU,
normalize_before=False,
causal=False,
layerdrop_prob=0.0,
attention_type="regularMHA",
):
super().__init__()
self.layers = torch.nn.ModuleList(
[
TransformerEncoderLayer(
d_ffn=d_ffn,
nhead=nhead,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
activation=activation,
normalize_before=normalize_before,
causal=causal,
attention_type=attention_type,
)
for i in range(num_layers)
]
)
self.norm = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
self.layerdrop_prob = layerdrop_prob
self.rng = np.random.default_rng()
def forward(
self,
src,
src_mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.Tensor] = None,
pos_embs: Optional[torch.Tensor] = None,
):
"""
Arguments
----------
src : tensor
The sequence to the encoder layer (required).
src_mask : tensor
The mask for the src sequence (optional).
src_key_padding_mask : tensor
The mask for the src keys per batch (optional).
"""
output = src
if self.layerdrop_prob > 0.0:
keep_probs = self.rng.random(len(self.layers))
else:
keep_probs = None
attention_lst = []
for i, enc_layer in enumerate(self.layers):
if (
not self.training
or self.layerdrop_prob == 0.0
or keep_probs[i] > self.layerdrop_prob
):
output, attention = enc_layer(
output,
src_mask=src_mask,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs,
)
attention_lst.append(attention)
output = self.norm(output)
return output, attention_lst
class TransformerDecoderLayer(nn.Module):
"""This class implements the self-attention decoder layer.
Arguments
----------
d_ffn : int
Hidden size of self-attention Feed Forward layer.
nhead : int
Number of attention heads.
d_model : int
Dimension of the model.
kdim : int
Dimension for key (optional).
vdim : int
Dimension for value (optional).
dropout : float
Dropout for the decoder (optional).
Example
-------
>>> src = torch.rand((8, 60, 512))
>>> tgt = torch.rand((8, 60, 512))
>>> net = TransformerDecoderLayer(1024, 8, d_model=512)
>>> output, self_attn, multihead_attn = net(src, tgt)
>>> output.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
d_ffn,
nhead,
d_model,
kdim=None,
vdim=None,
dropout=0.0,
activation=nn.ReLU,
normalize_before=False,
attention_type="regularMHA",
causal=None,
):
super().__init__()
self.nhead = nhead
if attention_type == "regularMHA":
self.self_attn = sb.nnet.attention.MultiheadAttention(
nhead=nhead,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
)
self.mutihead_attn = sb.nnet.attention.MultiheadAttention(
nhead=nhead,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
)
elif attention_type == "RelPosMHAXL":
self.self_attn = sb.nnet.attention.RelPosMHAXL(
d_model, nhead, dropout, mask_pos_future=causal
)
self.mutihead_attn = sb.nnet.attention.RelPosMHAXL(
d_model, nhead, dropout, mask_pos_future=causal
)
self.pos_ffn = sb.nnet.attention.PositionalwiseFeedForward(
d_ffn=d_ffn,
input_size=d_model,
dropout=dropout,
activation=activation,
)
# normalization layers
self.norm1 = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
self.norm2 = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
self.norm3 = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.dropout3 = torch.nn.Dropout(dropout)
self.normalize_before = normalize_before
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos_embs_tgt=None,
pos_embs_src=None,
):
"""
Arguments
----------
tgt: tensor
The sequence to the decoder layer (required).
memory: tensor
The sequence from the last layer of the encoder (required).
tgt_mask: tensor
The mask for the tgt sequence (optional).
memory_mask: tensor
The mask for the memory sequence (optional).
tgt_key_padding_mask: tensor
The mask for the tgt keys per batch (optional).
memory_key_padding_mask: tensor
The mask for the memory keys per batch (optional).
"""
if self.normalize_before:
tgt1 = self.norm1(tgt)
else:
tgt1 = tgt
# self-attention over the target sequence
tgt2, self_attn = self.self_attn(
query=tgt1,
key=tgt1,
value=tgt1,
attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask,
pos_embs=pos_embs_tgt,
)
# add & norm
tgt = tgt + self.dropout1(tgt2)
if not self.normalize_before:
tgt = self.norm1(tgt)
if self.normalize_before:
tgt1 = self.norm2(tgt)
else:
tgt1 = tgt
# multi-head attention over the target sequence and encoder states
tgt2, multihead_attention = self.mutihead_attn(
query=tgt1,
key=memory,
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
pos_embs=pos_embs_src,
)
# add & norm
tgt = tgt + self.dropout2(tgt2)
if not self.normalize_before:
tgt = self.norm2(tgt)
if self.normalize_before:
tgt1 = self.norm3(tgt)
else:
tgt1 = tgt
tgt2 = self.pos_ffn(tgt1)
# add & norm
tgt = tgt + self.dropout3(tgt2)
if not self.normalize_before:
tgt = self.norm3(tgt)
return tgt, self_attn, multihead_attention
class TransformerDecoder(nn.Module):
"""This class implements the Transformer decoder.
Arguments
----------
nhead : int
Number of attention heads.
d_ffn : int
Hidden size of self-attention Feed Forward layer.
d_model : int
Dimension of the model.
kdim : int, optional
Dimension for key (Optional).
vdim : int, optional
Dimension for value (Optional).
dropout : float, optional
Dropout for the decoder (Optional).
Example
-------
>>> src = torch.rand((8, 60, 512))
>>> tgt = torch.rand((8, 60, 512))
>>> net = TransformerDecoder(1, 8, 1024, d_model=512)
>>> output, _, _ = net(src, tgt)
>>> output.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
num_layers,
nhead,
d_ffn,
d_model,
kdim=None,
vdim=None,
dropout=0.0,
activation=nn.ReLU,
normalize_before=False,
causal=False,
attention_type="regularMHA",
):
super().__init__()
self.layers = torch.nn.ModuleList(
[
TransformerDecoderLayer(
d_ffn=d_ffn,
nhead=nhead,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
activation=activation,
normalize_before=normalize_before,
causal=causal,
attention_type=attention_type,
)
for _ in range(num_layers)
]
)
self.norm = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos_embs_tgt=None,
pos_embs_src=None,
):
"""
Arguments
----------
tgt : tensor
The sequence to the decoder layer (required).
memory : tensor
The sequence from the last layer of the encoder (required).
tgt_mask : tensor
The mask for the tgt sequence (optional).
memory_mask : tensor
The mask for the memory sequence (optional).
tgt_key_padding_mask : tensor
The mask for the tgt keys per batch (optional).
memory_key_padding_mask : tensor
The mask for the memory keys per batch (optional).
"""
output = tgt
self_attns, multihead_attns = [], []
for dec_layer in self.layers:
output, self_attn, multihead_attn = dec_layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos_embs_tgt=pos_embs_tgt,
pos_embs_src=pos_embs_src,
)
self_attns.append(self_attn)
multihead_attns.append(multihead_attn)
output = self.norm(output)
return output, self_attns, multihead_attns
class NormalizedEmbedding(nn.Module):
"""This class implements the normalized embedding layer for the transformer.
Since the dot product of the self-attention is always normalized by sqrt(d_model)
and the final linear projection for prediction shares weight with the embedding layer,
we multiply the output of the embedding by sqrt(d_model).
Arguments
---------
d_model: int
The number of expected features in the encoder/decoder inputs (default=512).
vocab: int
The vocab size.
Example
-------
>>> emb = NormalizedEmbedding(512, 1000)
>>> trg = torch.randint(0, 999, (8, 50))
>>> emb_fea = emb(trg)
"""
def __init__(self, d_model, vocab):
super().__init__()
self.emb = sb.nnet.embedding.Embedding(
num_embeddings=vocab, embedding_dim=d_model, blank_id=0
)
self.d_model = d_model
def forward(self, x):
""" Processes the input tensor x and returns an output tensor."""
return self.emb(x) * math.sqrt(self.d_model)
def get_key_padding_mask(padded_input, pad_idx):
"""Creates a binary mask to prevent attention to padded locations.
Arguments
----------
padded_input: int
Padded input.
pad_idx:
idx for padding element.
Example
-------
>>> a = torch.LongTensor([[1,1,0], [2,3,0], [4,5,0]])
>>> get_key_padding_mask(a, pad_idx=0)
tensor([[False, False, True],
[False, False, True],
[False, False, True]])
"""
if len(padded_input.shape) == 4:
bz, time, ch1, ch2 = padded_input.shape
padded_input = padded_input.reshape(bz, time, ch1 * ch2)
key_padded_mask = padded_input.eq(pad_idx).to(padded_input.device)
# if the input is more than 2d, mask the locations where they are silence
# across all channels
if len(padded_input.shape) > 2:
key_padded_mask = key_padded_mask.float().prod(dim=-1).bool()
return key_padded_mask.detach()
return key_padded_mask.detach()
def get_lookahead_mask(padded_input):
"""Creates a binary mask for each sequence which maskes future frames.
Arguments
---------
padded_input: torch.Tensor
Padded input tensor.
Example
-------
>>> a = torch.LongTensor([[1,1,0], [2,3,0], [4,5,0]])
>>> get_lookahead_mask(a)
tensor([[0., -inf, -inf],
[0., 0., -inf],
[0., 0., 0.]])
"""
seq_len = padded_input.shape[1]
mask = (
torch.triu(torch.ones((seq_len, seq_len), device=padded_input.device))
== 1
).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask.detach().to(padded_input.device)
| 27,179 | 30.641444 | 119 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/transformer/TransformerSE.py | """CNN Transformer model for SE in the SpeechBrain style.
Authors
* Chien-Feng Liao 2020
"""
import torch # noqa E402
from torch import nn
from speechbrain.nnet.linear import Linear
from speechbrain.lobes.models.transformer.Transformer import (
TransformerInterface,
get_lookahead_mask,
)
class CNNTransformerSE(TransformerInterface):
"""This is an implementation of transformer model with CNN pre-encoder for SE.
Arguments
---------
d_model : int
The number of expected features in the encoder inputs.
output_size : int
The number of neurons in the output layer.
output_activation : torch class
The activation function of the output layer (default=ReLU).
nhead : int
The number of heads in the multi-head attention models (default=8).
num_layers : int
The number of sub-layers in the transformer (default=8).
d_ffn : int
The number of expected features in the encoder layers (default=512).
dropout : int
The dropout value (default=0.1).
activation : torch class
The activation function of intermediate layers (default=LeakyReLU).
causal : bool
True for causal setting, the model is forbidden to see future frames (default=True).
custom_emb_module : torch class
Module that processes the input features before the transformer model.
Example
-------
>>> src = torch.rand([8, 120, 256])
>>> net = CNNTransformerSE(d_model=256, output_size=257)
>>> out = net(src)
>>> out.shape
torch.Size([8, 120, 257])
"""
def __init__(
self,
d_model,
output_size,
output_activation=nn.ReLU,
nhead=8,
num_layers=8,
d_ffn=512,
dropout=0.1,
activation=nn.LeakyReLU,
causal=True,
custom_emb_module=None,
normalize_before=False,
):
super().__init__(
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_layers,
num_decoder_layers=0,
d_ffn=d_ffn,
dropout=dropout,
activation=activation,
positional_encoding=None,
normalize_before=normalize_before,
causal=causal,
)
self.custom_emb_module = custom_emb_module
self.output_layer = Linear(output_size, input_size=d_model, bias=False)
self.output_activation = output_activation()
def forward(self, x, src_key_padding_mask=None):
""" Processes the input tensor x and returns an output tensor."""
if self.causal:
self.attn_mask = get_lookahead_mask(x)
else:
self.attn_mask = None
if self.custom_emb_module is not None:
x = self.custom_emb_module(x)
encoder_output, _ = self.encoder(
src=x,
src_mask=self.attn_mask,
src_key_padding_mask=src_key_padding_mask,
)
output = self.output_layer(encoder_output)
output = self.output_activation(output)
return output
| 3,074 | 29.445545 | 92 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/transformer/TransformerLM.py | """An implementation of Transformer Language model.
Authors
* Jianyuan Zhong
* Samuele Cornell
"""
import torch # noqa 42
from torch import nn
from speechbrain.nnet.linear import Linear
from speechbrain.nnet.normalization import LayerNorm
from speechbrain.nnet.containers import ModuleList
from speechbrain.lobes.models.transformer.Transformer import (
TransformerInterface,
get_lookahead_mask,
get_key_padding_mask,
NormalizedEmbedding,
)
class TransformerLM(TransformerInterface):
"""This is an implementation of transformer language model.
The architecture is based on the paper "Attention Is All You Need": https://arxiv.org/pdf/1706.03762.pdf
Arguments
----------
d_model : int
The number of expected features in the encoder/decoder inputs (default=512).
nhead : int
The number of heads in the multiheadattention models (default=8).
num_encoder_layers : int
The number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers : int
The number of sub-decoder-layers in the decoder (default=6).
dim_ffn : int
The dimension of the feedforward network model (default=2048).
dropout : int
The dropout value (default=0.1).
activation: torch class
The activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
decoder_use_memory: bool
whether to use the hidden state in the decoder
Example
-------
>>> src = torch.randint(0, 720, [8, 120])
>>> net = TransformerLM(720, 512, 8, 1, 0, 1024, activation=torch.nn.GELU)
>>> enc_out = net.forward(src)
>>> print(enc_out.shape)
torch.Size([8, 120, 720])
"""
def __init__(
self,
vocab,
d_model=512,
nhead=8,
num_encoder_layers=12,
num_decoder_layers=0,
d_ffn=2048,
dropout=0.1,
activation=nn.ReLU,
positional_encoding="fixed_abs_sine",
normalize_before=False,
d_embedding=None,
max_length=2500,
causal=True,
attention_type="regularMHA",
decoder_use_memory=False,
):
super().__init__(
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
d_ffn=d_ffn,
dropout=dropout,
activation=activation,
positional_encoding=positional_encoding,
normalize_before=normalize_before,
max_length=max_length,
causal=causal,
attention_type=attention_type,
)
self.d_embedding = d_embedding
if d_embedding is None:
self.d_embedding = d_model
self.custom_src_module = NormalizedEmbedding(self.d_embedding, vocab)
self.embedding_proj = None
if d_embedding is not None:
self.embedding_proj = Linear(
input_size=self.d_embedding, n_neurons=d_model
)
self.output_proj = ModuleList(
Linear(input_size=d_model, n_neurons=d_model),
LayerNorm(d_model, eps=1e-6),
Linear(input_size=d_model, n_neurons=vocab),
)
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_use_memory = decoder_use_memory
# reset the params of the transformer model
self._reset_params()
def forward(self, src, hx=None):
"""
Arguments
---------
src : tensor
The sequence to the encoder (required).
"""
src_mask, src_key_padding_mask = self.make_masks(src)
src = self.custom_src_module(src)
if self.embedding_proj is not None:
src = self.embedding_proj(src)
src = src + self.positional_encoding(src)
if self.num_encoder_layers > 0:
encoder_out, _ = self.encoder(
src=src,
src_mask=src_mask,
src_key_padding_mask=src_key_padding_mask,
)
if self.num_decoder_layers > 0:
if self.decoder_use_memory:
encoder_out, _, _ = self.decoder(
tgt=src,
memory=encoder_out,
tgt_mask=src_mask,
tgt_key_padding_mask=src_key_padding_mask,
)
else:
encoder_out, _ = self.decoder(
src=src,
tgt=src,
tgt_mask=src_mask,
tgt_key_padding_mask=src_key_padding_mask,
)
pred = self.output_proj(encoder_out)
return pred
def _reset_params(self):
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
def make_masks(
self, src, pad_idx=0, look_ahead_mask=True, padding_mask=True
):
src_mask = None
if look_ahead_mask:
src_mask = get_lookahead_mask(src)
src_key_padding_mask = None
if padding_mask:
src_key_padding_mask = get_key_padding_mask(src, pad_idx)
return src_mask, src_key_padding_mask
| 5,248 | 29.876471 | 108 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/transformer/TransformerASR.py | """Transformer for ASR in the SpeechBrain style.
Authors
* Jianyuan Zhong 2020
"""
import torch # noqa 42
from torch import nn
from typing import Optional
from speechbrain.nnet.linear import Linear
from speechbrain.nnet.containers import ModuleList
from speechbrain.lobes.models.transformer.Transformer import (
TransformerInterface,
get_lookahead_mask,
get_key_padding_mask,
NormalizedEmbedding,
)
from speechbrain.nnet.activations import Swish
from speechbrain.dataio.dataio import length_to_mask
class TransformerASR(TransformerInterface):
"""This is an implementation of transformer model for ASR.
The architecture is based on the paper "Attention Is All You Need":
https://arxiv.org/pdf/1706.03762.pdf
Arguments
----------
tgt_vocab: int
Size of vocabulary.
input_size: int
Input feature size.
d_model : int, optional
Embedding dimension size.
(default=512).
nhead : int, optional
The number of heads in the multi-head attention models (default=8).
num_encoder_layers : int, optional
The number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers : int, optional
The number of sub-decoder-layers in the decoder (default=6).
dim_ffn : int, optional
The dimension of the feedforward network model (default=2048).
dropout : int, optional
The dropout value (default=0.1).
activation : torch.nn.Module, optional
The activation function of FFN layers.
Recommended: relu or gelu (default=relu).
positional_encoding: str, optional
Type of positional encoding used. e.g. 'fixed_abs_sine' for fixed absolute positional encodings.
normalize_before: bool, optional
Whether normalization should be applied before or after MHA or FFN in Transformer layers.
Defaults to True as this was shown to lead to better performance and training stability.
kernel_size: int, optional
Kernel size in convolutional layers when Conformer is used.
bias: bool, optional
Whether to use bias in Conformer convolutional layers.
encoder_module: str, optional
Choose between Conformer and Transformer for the encoder. The decoder is fixed to be a Transformer.
conformer_activation: torch.nn.Module, optional
Activation module used after Conformer convolutional layers. E.g. Swish, ReLU etc. it has to be a torch Module.
attention_type: str, optional
Type of attention layer used in all Transformer or Conformer layers.
e.g. regularMHA or RelPosMHA.
max_length: int, optional
Max length for the target and source sequence in input.
Used for positional encodings.
causal: bool, optional
Whether the encoder should be causal or not (the decoder is always causal).
If causal the Conformer convolutional layer is causal.
Example
-------
>>> src = torch.rand([8, 120, 512])
>>> tgt = torch.randint(0, 720, [8, 120])
>>> net = TransformerASR(
... 720, 512, 512, 8, 1, 1, 1024, activation=torch.nn.GELU
... )
>>> enc_out, dec_out = net.forward(src, tgt)
>>> enc_out.shape
torch.Size([8, 120, 512])
>>> dec_out.shape
torch.Size([8, 120, 512])
"""
def __init__(
self,
tgt_vocab,
input_size,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
d_ffn=2048,
dropout=0.1,
activation=nn.ReLU,
positional_encoding="fixed_abs_sine",
normalize_before=False,
kernel_size: Optional[int] = 31,
bias: Optional[bool] = True,
encoder_module: Optional[str] = "transformer",
conformer_activation: Optional[nn.Module] = Swish,
attention_type: Optional[str] = "regularMHA",
max_length: Optional[int] = 2500,
causal: Optional[bool] = True,
):
super().__init__(
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
d_ffn=d_ffn,
dropout=dropout,
activation=activation,
positional_encoding=positional_encoding,
normalize_before=normalize_before,
kernel_size=kernel_size,
bias=bias,
encoder_module=encoder_module,
conformer_activation=conformer_activation,
attention_type=attention_type,
max_length=max_length,
causal=causal,
)
self.custom_src_module = ModuleList(
Linear(
input_size=input_size,
n_neurons=d_model,
bias=True,
combine_dims=False,
),
torch.nn.Dropout(dropout),
)
self.custom_tgt_module = ModuleList(
NormalizedEmbedding(d_model, tgt_vocab)
)
# reset parameters using xavier_normal_
self._init_params()
def forward(self, src, tgt, wav_len=None, pad_idx=0):
"""
Arguments
----------
src : torch.Tensor
The sequence to the encoder.
tgt : torch.Tensor
The sequence to the decoder.
wav_len: torch.Tensor, optional
Torch Tensor of shape (batch, ) containing the relative length to padded length for each example.
pad_idx : int, optional
The index for <pad> token (default=0).
"""
# reshpae the src vector to [Batch, Time, Fea] is a 4d vector is given
if src.ndim == 4:
bz, t, ch1, ch2 = src.shape
src = src.reshape(bz, t, ch1 * ch2)
(
src_key_padding_mask,
tgt_key_padding_mask,
src_mask,
tgt_mask,
) = self.make_masks(src, tgt, wav_len, pad_idx=pad_idx)
src = self.custom_src_module(src)
# add pos encoding to queries if are sinusoidal ones else
if self.attention_type == "RelPosMHAXL":
pos_embs_encoder = self.positional_encoding(src)
elif self.positional_encoding_type == "fixed_abs_sine":
src = src + self.positional_encoding(src) # add the encodings here
pos_embs_encoder = None
encoder_out, _ = self.encoder(
src=src,
src_mask=src_mask,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs_encoder,
)
tgt = self.custom_tgt_module(tgt)
# Add positional encoding to the target before feeding the decoder.
if self.attention_type == "RelPosMHAXL":
# use standard sinusoidal pos encoding in decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
pos_embs_encoder = None # self.positional_encoding(src)
pos_embs_target = None
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt)
pos_embs_target = None
pos_embs_encoder = None
decoder_out, _, _ = self.decoder(
tgt=tgt,
memory=encoder_out,
memory_mask=src_mask,
tgt_mask=tgt_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=src_key_padding_mask,
pos_embs_tgt=pos_embs_target,
pos_embs_src=pos_embs_encoder,
)
return encoder_out, decoder_out
def make_masks(self, src, tgt, wav_len=None, pad_idx=0):
"""This method generates the masks for training the transformer model.
Arguments
---------
src : tensor
The sequence to the encoder (required).
tgt : tensor
The sequence to the decoder (required).
pad_idx : int
The index for <pad> token (default=0).
"""
src_key_padding_mask = None
if wav_len is not None:
abs_len = torch.round(wav_len * src.shape[1])
src_key_padding_mask = ~length_to_mask(abs_len).bool()
tgt_key_padding_mask = get_key_padding_mask(tgt, pad_idx=pad_idx)
src_mask = None
tgt_mask = get_lookahead_mask(tgt)
return src_key_padding_mask, tgt_key_padding_mask, src_mask, tgt_mask
@torch.no_grad()
def decode(self, tgt, encoder_out, enc_len=None):
"""This method implements a decoding step for the transformer model.
Arguments
---------
tgt : torch.Tensor
The sequence to the decoder.
encoder_out : torch.Tensor
Hidden output of the encoder.
enc_len : torch.LongTensor
The actual length of encoder states.
"""
tgt_mask = get_lookahead_mask(tgt)
src_key_padding_mask = None
if enc_len is not None:
src_key_padding_mask = (1 - length_to_mask(enc_len)).bool()
tgt = self.custom_tgt_module(tgt)
if self.attention_type == "RelPosMHAXL":
# use standard sinusoidal pos encoding in decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
pos_embs_encoder = None # self.positional_encoding(src)
pos_embs_target = None
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt)
pos_embs_target = None
pos_embs_encoder = None
prediction, self_attns, multihead_attns = self.decoder(
tgt,
encoder_out,
tgt_mask=tgt_mask,
memory_key_padding_mask=src_key_padding_mask,
pos_embs_tgt=pos_embs_target,
pos_embs_src=pos_embs_encoder,
)
return prediction, multihead_attns[-1]
def encode(self, src, wav_len=None):
"""
Encoder forward pass
Arguments
----------
src : torch.Tensor
The sequence to the encoder.
wav_len: torch.Tensor, optional
Torch Tensor of shape (batch, ) containing the relative length to padded length for each example.
"""
# reshape the src vector to [Batch, Time, Fea] if a 4d vector is given
if src.dim() == 4:
bz, t, ch1, ch2 = src.shape
src = src.reshape(bz, t, ch1 * ch2)
src_key_padding_mask = None
if wav_len is not None:
abs_len = torch.floor(wav_len * src.shape[1])
src_key_padding_mask = (
torch.arange(src.shape[1])[None, :].to(abs_len)
> abs_len[:, None]
)
src = self.custom_src_module(src)
if self.attention_type == "RelPosMHAXL":
pos_embs_source = self.positional_encoding(src)
elif self.positional_encoding_type == "fixed_abs_sine":
src = src + self.positional_encoding(src)
pos_embs_source = None
encoder_out, _ = self.encoder(
src=src,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs_source,
)
return encoder_out
def _init_params(self):
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
class EncoderWrapper(nn.Module):
"""This is a wrapper of any ASR transformer encoder. By default, the
TransformerASR .forward() function encodes and decodes. With this wrapper
the .forward() function becomes .encode() only.
Important: The TransformerASR class must contain a .encode() function.
Arguments
----------
transformer : sb.lobes.models.TransformerInterface
A Transformer instance that contains a .encode() function.
Example
-------
>>> src = torch.rand([8, 120, 512])
>>> tgt = torch.randint(0, 720, [8, 120])
>>> net = TransformerASR(
... 720, 512, 512, 8, 1, 1, 1024, activation=torch.nn.GELU
... )
>>> encoder = EncoderWrapper(net)
>>> enc_out = encoder(src)
>>> enc_out.shape
torch.Size([8, 120, 512])
"""
def __init__(self, transformer, *args, **kwargs):
super().__init__(*args, **kwargs)
self.transformer = transformer
def forward(self, x, wav_lens=None):
""" Processes the input tensor x and returns an output tensor."""
x = self.transformer.encode(x, wav_lens)
return x
| 12,371 | 34.348571 | 119 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/transformer/Conformer.py | """Conformer implementation.
Authors
* Jianyuan Zhong 2020
* Samuele Cornell 2021
"""
import torch
import torch.nn as nn
from typing import Optional
import speechbrain as sb
import warnings
from speechbrain.nnet.attention import (
RelPosMHAXL,
MultiheadAttention,
PositionalwiseFeedForward,
)
from speechbrain.nnet.normalization import LayerNorm
from speechbrain.nnet.activations import Swish
class ConvolutionModule(nn.Module):
"""This is an implementation of convolution module in Conformer.
Arguments
----------
input_size : int
The expected size of the input embedding dimension.
kernel_size: int, optional
Kernel size of non-bottleneck convolutional layer.
bias: bool, optional
Whether to use bias in the non-bottleneck conv layer.
activation: torch.nn.Module
Activation function used after non-bottleneck conv layer.
dropout: float, optional
Dropout rate.
causal: bool, optional
Whether the convolution should be causal or not.
dilation: int, optional
Dilation factor for the non bottleneck conv layer.
Example
-------
>>> import torch
>>> x = torch.rand((8, 60, 512))
>>> net = ConvolutionModule(512, 3)
>>> output = net(x)
>>> output.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
input_size,
kernel_size=31,
bias=True,
activation=Swish,
dropout=0.0,
causal=False,
dilation=1,
):
super().__init__()
self.causal = causal
if self.causal:
self.padding = (kernel_size - 1) * 2 ** (dilation - 1)
else:
self.padding = (kernel_size - 1) * 2 ** (dilation - 1) // 2
self.layer_norm = nn.LayerNorm(input_size)
self.bottleneck = nn.Sequential(
# pointwise
nn.Conv1d(
input_size, 2 * input_size, kernel_size=1, stride=1, bias=bias
),
nn.GLU(dim=1),
)
# depthwise
self.conv = nn.Conv1d(
input_size,
input_size,
kernel_size=kernel_size,
stride=1,
padding=self.padding,
dilation=dilation,
groups=input_size,
bias=bias,
)
self.after_conv = nn.Sequential(
nn.LayerNorm(input_size),
activation(),
# pointwise
nn.Linear(input_size, input_size, bias=bias),
nn.Dropout(dropout),
)
def forward(self, x, mask=None):
""" Processes the input tensor x and returns the output an output tensor"""
out = self.layer_norm(x)
out = out.transpose(1, 2)
out = self.bottleneck(out)
out = self.conv(out)
if self.causal:
# chomp
out = out[..., : -self.padding]
out = out.transpose(1, 2)
out = self.after_conv(out)
if mask is not None:
out.masked_fill_(mask, 0.0)
return out
class ConformerEncoderLayer(nn.Module):
"""This is an implementation of Conformer encoder layer.
Arguments
----------
d_model : int
The expected size of the input embedding.
d_ffn : int
Hidden size of self-attention Feed Forward layer.
nhead : int
Number of attention heads.
kernel_size : int, optional
Kernel size of convolution model.
kdim : int, optional
Dimension of the key.
vdim : int, optional
Dimension of the value.
activation: torch.nn.Module
Activation function used in each Conformer layer.
bias : bool, optional
Whether convolution module.
dropout : int, optional
Dropout for the encoder.
causal: bool, optional
Whether the convolutions should be causal or not.
attention_type: str, optional
type of attention layer, e.g. regulaMHA for regular MultiHeadAttention.
Example
-------
>>> import torch
>>> x = torch.rand((8, 60, 512))
>>> pos_embs = torch.rand((1, 2*60-1, 512))
>>> net = ConformerEncoderLayer(d_ffn=512, nhead=8, d_model=512, kernel_size=3)
>>> output = net(x, pos_embs=pos_embs)
>>> output[0].shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
d_model,
d_ffn,
nhead,
kernel_size=31,
kdim=None,
vdim=None,
activation=Swish,
bias=True,
dropout=0.0,
causal=False,
attention_type="RelPosMHAXL",
):
super().__init__()
if attention_type == "regularMHA":
self.mha_layer = MultiheadAttention(
nhead=nhead,
d_model=d_model,
dropout=dropout,
kdim=kdim,
vdim=vdim,
)
elif attention_type == "RelPosMHAXL":
# transformerXL style positional encoding
self.mha_layer = RelPosMHAXL(
num_heads=nhead,
embed_dim=d_model,
dropout=dropout,
mask_pos_future=causal,
)
self.convolution_module = ConvolutionModule(
d_model, kernel_size, bias, activation, dropout, causal=causal
)
self.ffn_module1 = nn.Sequential(
nn.LayerNorm(d_model),
PositionalwiseFeedForward(
d_ffn=d_ffn,
input_size=d_model,
dropout=dropout,
activation=activation,
),
nn.Dropout(dropout),
)
self.ffn_module2 = nn.Sequential(
nn.LayerNorm(d_model),
PositionalwiseFeedForward(
d_ffn=d_ffn,
input_size=d_model,
dropout=dropout,
activation=activation,
),
nn.Dropout(dropout),
)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.drop = nn.Dropout(dropout)
def forward(
self,
x,
src_mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.Tensor] = None,
pos_embs: Optional[torch.Tensor] = None,
):
"""
Arguments
----------
src : torch.Tensor
The sequence to the encoder layer.
src_mask : torch.Tensor, optional
The mask for the src sequence.
src_key_padding_mask : torch.Tensor, optional
The mask for the src keys per batch.
pos_embs: torch.Tensor, torch.nn.Module, optional
Module or tensor containing the input sequence positional embeddings
"""
conv_mask = None
if src_key_padding_mask is not None:
conv_mask = src_key_padding_mask.unsqueeze(-1)
# ffn module
x = x + 0.5 * self.ffn_module1(x)
# muti-head attention module
skip = x
x = self.norm1(x)
x, self_attn = self.mha_layer(
x,
x,
x,
attn_mask=src_mask,
key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs,
)
x = x + skip
# convolution module
x = x + self.convolution_module(x, conv_mask)
# ffn module
x = self.norm2(x + 0.5 * self.ffn_module2(x))
return x, self_attn
class ConformerEncoder(nn.Module):
"""This class implements the Conformer encoder.
Arguments
---------
num_layers : int
Number of layers.
d_model : int
Embedding dimension size.
d_ffn : int
Hidden size of self-attention Feed Forward layer.
nhead : int
Number of attention heads.
kernel_size : int, optional
Kernel size of convolution model.
kdim : int, optional
Dimension of the key.
vdim : int, optional
Dimension of the value.
activation: torch.nn.Module
Activation function used in each Confomer layer.
bias : bool, optional
Whether convolution module.
dropout : int, optional
Dropout for the encoder.
causal: bool, optional
Whether the convolutions should be causal or not.
attention_type: str, optional
type of attention layer, e.g. regulaMHA for regular MultiHeadAttention.
Example
-------
>>> import torch
>>> x = torch.rand((8, 60, 512))
>>> pos_emb = torch.rand((1, 2*60-1, 512))
>>> net = ConformerEncoder(1, 512, 512, 8)
>>> output, _ = net(x, pos_embs=pos_emb)
>>> output.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
num_layers,
d_model,
d_ffn,
nhead,
kernel_size=31,
kdim=None,
vdim=None,
activation=Swish,
bias=True,
dropout=0.0,
causal=False,
attention_type="RelPosMHAXL",
):
super().__init__()
self.layers = torch.nn.ModuleList(
[
ConformerEncoderLayer(
d_ffn=d_ffn,
nhead=nhead,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
activation=activation,
kernel_size=kernel_size,
bias=bias,
causal=causal,
attention_type=attention_type,
)
for i in range(num_layers)
]
)
self.norm = LayerNorm(d_model, eps=1e-6)
self.attention_type = attention_type
def forward(
self,
src,
src_mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.Tensor] = None,
pos_embs: Optional[torch.Tensor] = None,
):
"""
Arguments
----------
src : torch.Tensor
The sequence to the encoder layer.
src_mask : torch.Tensor, optional
The mask for the src sequence.
src_key_padding_mask : torch.Tensor, optional
The mask for the src keys per batch.
pos_embs: torch.Tensor, torch.nn.Module,
Module or tensor containing the input sequence positional embeddings
If custom pos_embs are given it needs to have the shape (1, 2*S-1, E)
where S is the sequence length, and E is the embedding dimension.
"""
if self.attention_type == "RelPosMHAXL":
if pos_embs is None:
raise ValueError(
"The chosen attention type for the Conformer is RelPosMHAXL. For this attention type, the positional embeddings are mandatory"
)
output = src
attention_lst = []
for enc_layer in self.layers:
output, attention = enc_layer(
output,
src_mask=src_mask,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs,
)
attention_lst.append(attention)
output = self.norm(output)
return output, attention_lst
class ConformerDecoderLayer(nn.Module):
"""This is an implementation of Conformer encoder layer.
Arguments
----------
d_model : int
The expected size of the input embedding.
d_ffn : int
Hidden size of self-attention Feed Forward layer.
nhead : int
Number of attention heads.
kernel_size : int, optional
Kernel size of convolution model.
kdim : int, optional
Dimension of the key.
vdim : int, optional
Dimension of the value.
activation: torch.nn.Module, optional
Activation function used in each Conformer layer.
bias : bool, optional
Whether convolution module.
dropout : int, optional
Dropout for the encoder.
causal: bool, optional
Whether the convolutions should be causal or not.
attention_type: str, optional
type of attention layer, e.g. regulaMHA for regular MultiHeadAttention.
Example
-------
>>> import torch
>>> x = torch.rand((8, 60, 512))
>>> pos_embs = torch.rand((1, 2*60-1, 512))
>>> net = ConformerEncoderLayer(d_ffn=512, nhead=8, d_model=512, kernel_size=3)
>>> output = net(x, pos_embs=pos_embs)
>>> output[0].shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
d_model,
d_ffn,
nhead,
kernel_size,
kdim=None,
vdim=None,
activation=Swish,
bias=True,
dropout=0.0,
causal=True,
attention_type="RelPosMHAXL",
):
super().__init__()
if not causal:
warnings.warn(
"Decoder is not causal, in most applications it should be causal, you have been warned !"
)
if attention_type == "regularMHA":
self.mha_layer = MultiheadAttention(
nhead=nhead,
d_model=d_model,
dropout=dropout,
kdim=kdim,
vdim=vdim,
)
elif attention_type == "RelPosMHAXL":
# transformerXL style positional encoding
self.mha_layer = RelPosMHAXL(
num_heads=nhead,
embed_dim=d_model,
dropout=dropout,
mask_pos_future=causal,
)
self.convolution_module = ConvolutionModule(
d_model, kernel_size, bias, activation, dropout, causal=causal
)
self.ffn_module1 = nn.Sequential(
nn.LayerNorm(d_model),
PositionalwiseFeedForward(
d_ffn=d_ffn,
input_size=d_model,
dropout=dropout,
activation=activation,
),
nn.Dropout(dropout),
)
self.ffn_module2 = nn.Sequential(
nn.LayerNorm(d_model),
PositionalwiseFeedForward(
d_ffn=d_ffn,
input_size=d_model,
dropout=dropout,
activation=activation,
),
nn.Dropout(dropout),
)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.drop = nn.Dropout(dropout)
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos_embs_tgt=None,
pos_embs_src=None,
):
"""
Arguments
----------
tgt: torch.Tensor
The sequence to the decoder layer.
memory: torch.Tensor
The sequence from the last layer of the encoder.
tgt_mask: torch.Tensor, optional, optional
The mask for the tgt sequence.
memory_mask: torch.Tensor, optional
The mask for the memory sequence.
tgt_key_padding_mask : torch.Tensor, optional
The mask for the tgt keys per batch.
memory_key_padding_mask : torch.Tensor, optional
The mask for the memory keys per batch.
pos_emb_tgt: torch.Tensor, torch.nn.Module, optional
Module or tensor containing the target sequence positional embeddings for each attention layer.
pos_embs_src: torch.Tensor, torch.nn.Module, optional
Module or tensor containing the source sequence positional embeddings for each attention layer.
"""
# ffn module
tgt = tgt + 0.5 * self.ffn_module1(tgt)
# muti-head attention module
skip = tgt
x = self.norm1(tgt)
x, self_attn = self.mha_layer(
x,
memory,
memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
pos_embs=pos_embs_src,
)
x = x + skip
# convolution module
x = x + self.convolution_module(x)
# ffn module
x = self.norm2(x + 0.5 * self.ffn_module2(x))
return x, self_attn, self_attn
class ConformerDecoder(nn.Module):
"""This class implements the Transformer decoder.
Arguments
----------
num_layers: int
Number of layers.
nhead: int
Number of attention heads.
d_ffn: int
Hidden size of self-attention Feed Forward layer.
d_model: int
Embedding dimension size.
kdim: int, optional
Dimension for key.
vdim: int, optional
Dimension for value.
dropout: float, optional
Dropout rate.
activation: torch.nn.Module, optional
Activation function used after non-bottleneck conv layer.
kernel_size : int, optional
Kernel size of convolutional layer.
bias : bool, optional
Whether convolution module.
causal: bool, optional
Whether the convolutions should be causal or not.
attention_type: str, optional
type of attention layer, e.g. regulaMHA for regular MultiHeadAttention.
Example
-------
>>> src = torch.rand((8, 60, 512))
>>> tgt = torch.rand((8, 60, 512))
>>> net = ConformerDecoder(1, 8, 1024, 512, attention_type="regularMHA")
>>> output, _, _ = net(tgt, src)
>>> output.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
num_layers,
nhead,
d_ffn,
d_model,
kdim=None,
vdim=None,
dropout=0.0,
activation=Swish,
kernel_size=3,
bias=True,
causal=True,
attention_type="RelPosMHAXL",
):
super().__init__()
self.layers = torch.nn.ModuleList(
[
ConformerDecoderLayer(
d_ffn=d_ffn,
nhead=nhead,
d_model=d_model,
kdim=kdim,
vdim=vdim,
dropout=dropout,
activation=activation,
kernel_size=kernel_size,
bias=bias,
causal=causal,
attention_type=attention_type,
)
for _ in range(num_layers)
]
)
self.norm = sb.nnet.normalization.LayerNorm(d_model, eps=1e-6)
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos_embs_tgt=None,
pos_embs_src=None,
):
"""
Arguments
----------
tgt: torch.Tensor
The sequence to the decoder layer.
memory: torch.Tensor
The sequence from the last layer of the encoder.
tgt_mask: torch.Tensor, optional, optional
The mask for the tgt sequence.
memory_mask: torch.Tensor, optional
The mask for the memory sequence.
tgt_key_padding_mask : torch.Tensor, optional
The mask for the tgt keys per batch.
memory_key_padding_mask : torch.Tensor, optional
The mask for the memory keys per batch.
pos_emb_tgt: torch.Tensor, torch.nn.Module, optional
Module or tensor containing the target sequence positional embeddings for each attention layer.
pos_embs_src: torch.Tensor, torch.nn.Module, optional
Module or tensor containing the source sequence positional embeddings for each attention layer.
"""
output = tgt
self_attns, multihead_attns = [], []
for dec_layer in self.layers:
output, self_attn, multihead_attn = dec_layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos_embs_tgt=pos_embs_tgt,
pos_embs_src=pos_embs_src,
)
self_attns.append(self_attn)
multihead_attns.append(multihead_attn)
output = self.norm(output)
return output, self_attns, multihead_attns
| 20,245 | 29.127976 | 146 | py |
speechbrain | speechbrain-main/speechbrain/lobes/models/transformer/TransformerST.py | """Transformer for ST in the SpeechBrain sytle.
Authors
* YAO FEI, CHENG 2021
"""
import torch # noqa 42
import logging
from torch import nn
from typing import Optional
from speechbrain.nnet.containers import ModuleList
from speechbrain.lobes.models.transformer.Transformer import (
get_lookahead_mask,
get_key_padding_mask,
NormalizedEmbedding,
TransformerDecoder,
TransformerEncoder,
)
from speechbrain.lobes.models.transformer.Conformer import ConformerEncoder
from speechbrain.lobes.models.transformer.TransformerASR import TransformerASR
from speechbrain.nnet.activations import Swish
logger = logging.getLogger(__name__)
class TransformerST(TransformerASR):
"""This is an implementation of transformer model for ST.
The architecture is based on the paper "Attention Is All You Need":
https://arxiv.org/pdf/1706.03762.pdf
Arguments
----------
tgt_vocab: int
Size of vocabulary.
input_size: int
Input feature size.
d_model : int, optional
Embedding dimension size.
(default=512).
nhead : int, optional
The number of heads in the multi-head attention models (default=8).
num_encoder_layers : int, optional
The number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers : int, optional
The number of sub-decoder-layers in the decoder (default=6).
dim_ffn : int, optional
The dimension of the feedforward network model (default=2048).
dropout : int, optional
The dropout value (default=0.1).
activation : torch.nn.Module, optional
The activation function of FFN layers.
Recommended: relu or gelu (default=relu).
positional_encoding: str, optional
Type of positional encoding used. e.g. 'fixed_abs_sine' for fixed absolute positional encodings.
normalize_before: bool, optional
Whether normalization should be applied before or after MHA or FFN in Transformer layers.
Defaults to True as this was shown to lead to better performance and training stability.
kernel_size: int, optional
Kernel size in convolutional layers when Conformer is used.
bias: bool, optional
Whether to use bias in Conformer convolutional layers.
encoder_module: str, optional
Choose between Conformer and Transformer for the encoder. The decoder is fixed to be a Transformer.
conformer_activation: torch.nn.Module, optional
Activation module used after Conformer convolutional layers. E.g. Swish, ReLU etc. it has to be a torch Module.
attention_type: str, optional
Type of attention layer used in all Transformer or Conformer layers.
e.g. regularMHA or RelPosMHA.
max_length: int, optional
Max length for the target and source sequence in input.
Used for positional encodings.
causal: bool, optional
Whether the encoder should be causal or not (the decoder is always causal).
If causal the Conformer convolutional layer is causal.
ctc_weight: float
The weight of ctc for asr task
asr_weight: float
The weight of asr task for calculating loss
mt_weight: float
The weight of mt task for calculating loss
asr_tgt_vocab: int
The size of the asr target language
mt_src_vocab: int
The size of the mt source language
Example
-------
>>> src = torch.rand([8, 120, 512])
>>> tgt = torch.randint(0, 720, [8, 120])
>>> net = TransformerST(
... 720, 512, 512, 8, 1, 1, 1024, activation=torch.nn.GELU,
... ctc_weight=1, asr_weight=0.3,
... )
>>> enc_out, dec_out = net.forward(src, tgt)
>>> enc_out.shape
torch.Size([8, 120, 512])
>>> dec_out.shape
torch.Size([8, 120, 512])
"""
def __init__(
self,
tgt_vocab,
input_size,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
d_ffn=2048,
dropout=0.1,
activation=nn.ReLU,
positional_encoding="fixed_abs_sine",
normalize_before=False,
kernel_size: Optional[int] = 31,
bias: Optional[bool] = True,
encoder_module: Optional[str] = "transformer",
conformer_activation: Optional[nn.Module] = Swish,
attention_type: Optional[str] = "regularMHA",
max_length: Optional[int] = 2500,
causal: Optional[bool] = True,
ctc_weight: float = 0.0,
asr_weight: float = 0.0,
mt_weight: float = 0.0,
asr_tgt_vocab: int = 0,
mt_src_vocab: int = 0,
):
super().__init__(
tgt_vocab=tgt_vocab,
input_size=input_size,
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
d_ffn=d_ffn,
dropout=dropout,
activation=activation,
positional_encoding=positional_encoding,
normalize_before=normalize_before,
kernel_size=kernel_size,
bias=bias,
encoder_module=encoder_module,
conformer_activation=conformer_activation,
attention_type=attention_type,
max_length=max_length,
causal=causal,
)
if ctc_weight < 1 and asr_weight > 0:
self.asr_decoder = TransformerDecoder(
num_layers=num_decoder_layers,
nhead=nhead,
d_ffn=d_ffn,
d_model=d_model,
dropout=dropout,
activation=activation,
normalize_before=normalize_before,
causal=True,
attention_type="regularMHA", # always use regular attention in decoder
)
self.custom_asr_tgt_module = ModuleList(
NormalizedEmbedding(d_model, asr_tgt_vocab)
)
if mt_weight > 0:
self.custom_mt_src_module = ModuleList(
NormalizedEmbedding(d_model, mt_src_vocab)
)
if encoder_module == "transformer":
self.mt_encoder = TransformerEncoder(
nhead=nhead,
num_layers=num_encoder_layers,
d_ffn=d_ffn,
d_model=d_model,
dropout=dropout,
activation=activation,
normalize_before=normalize_before,
causal=self.causal,
attention_type=self.attention_type,
)
elif encoder_module == "conformer":
self.mt_encoder = ConformerEncoder(
nhead=nhead,
num_layers=num_encoder_layers,
d_ffn=d_ffn,
d_model=d_model,
dropout=dropout,
activation=conformer_activation,
kernel_size=kernel_size,
bias=bias,
causal=self.causal,
attention_type=self.attention_type,
)
assert (
normalize_before
), "normalize_before must be True for Conformer"
assert (
conformer_activation is not None
), "conformer_activation must not be None"
# reset parameters using xavier_normal_
self._init_params()
def forward_asr(self, encoder_out, src, tgt, wav_len, pad_idx=0):
"""This method implements a decoding step for asr task
Arguments
----------
encoder_out : tensor
The representation of the encoder (required).
tgt (transcription): tensor
The sequence to the decoder (required).
pad_idx : int
The index for <pad> token (default=0).
"""
# reshpae the src vector to [Batch, Time, Fea] is a 4d vector is given
if src.dim() == 4:
bz, t, ch1, ch2 = src.shape
src = src.reshape(bz, t, ch1 * ch2)
(
src_key_padding_mask,
tgt_key_padding_mask,
src_mask,
tgt_mask,
) = self.make_masks(src, tgt, wav_len, pad_idx=pad_idx)
transcription = self.custom_asr_tgt_module(tgt)
if self.attention_type == "RelPosMHAXL":
transcription = transcription + self.positional_encoding_decoder(
transcription
)
elif self.attention_type == "fixed_abs_sine":
transcription = transcription + self.positional_encoding(
transcription
)
asr_decoder_out, _, _ = self.asr_decoder(
tgt=transcription,
memory=encoder_out,
memory_mask=src_mask,
tgt_mask=tgt_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=src_key_padding_mask,
)
return asr_decoder_out
def forward_mt(self, src, tgt, pad_idx=0):
"""This method implements a forward step for mt task
Arguments
----------
src (transcription): tensor
The sequence to the encoder (required).
tgt (translation): tensor
The sequence to the decoder (required).
pad_idx : int
The index for <pad> token (default=0).
"""
(
src_key_padding_mask,
tgt_key_padding_mask,
src_mask,
tgt_mask,
) = self.make_masks_for_mt(src, tgt, pad_idx=pad_idx)
src = self.custom_mt_src_module(src)
if self.attention_type == "RelPosMHAXL":
pos_embs_encoder = self.positional_encoding(src)
elif self.positional_encoding_type == "fixed_abs_sine":
src = src + self.positional_encoding(src)
pos_embs_encoder = None
encoder_out, _ = self.mt_encoder(
src=src,
src_mask=src_mask,
src_key_padding_mask=src_key_padding_mask,
pos_embs=pos_embs_encoder,
)
tgt = self.custom_tgt_module(tgt)
if self.attention_type == "RelPosMHAXL":
# use standard sinusoidal pos encoding in decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
src = src + self.positional_encoding_decoder(src)
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt)
decoder_out, _, _ = self.decoder(
tgt=tgt,
memory=encoder_out,
memory_mask=src_mask,
tgt_mask=tgt_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=src_key_padding_mask,
)
return encoder_out, decoder_out
def forward_mt_decoder_only(self, src, tgt, pad_idx=0):
"""This method implements a forward step for mt task using a wav2vec encoder
(same than above, but without the encoder stack)
Arguments
----------
src (transcription): tensor
output features from the w2v2 encoder
tgt (translation): tensor
The sequence to the decoder (required).
pad_idx : int
The index for <pad> token (default=0).
"""
(
src_key_padding_mask,
tgt_key_padding_mask,
src_mask,
tgt_mask,
) = self.make_masks_for_mt(src, tgt, pad_idx=pad_idx)
tgt = self.custom_tgt_module(tgt)
if self.attention_type == "RelPosMHAXL":
# use standard sinusoidal pos encoding in decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt)
decoder_out, _, multihead = self.decoder(
tgt=tgt,
memory=src,
memory_mask=src_mask,
tgt_mask=tgt_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=src_key_padding_mask,
)
return decoder_out
def decode_asr(self, tgt, encoder_out):
"""This method implements a decoding step for the transformer model.
Arguments
---------
tgt : torch.Tensor
The sequence to the decoder.
encoder_out : torch.Tensor
Hidden output of the encoder.
"""
tgt_mask = get_lookahead_mask(tgt)
tgt = self.custom_tgt_module(tgt)
if self.attention_type == "RelPosMHAXL":
# we use fixed positional encodings in the decoder
tgt = tgt + self.positional_encoding_decoder(tgt)
encoder_out = encoder_out + self.positional_encoding_decoder(
encoder_out
)
elif self.positional_encoding_type == "fixed_abs_sine":
tgt = tgt + self.positional_encoding(tgt) # add the encodings here
prediction, _, multihead_attns = self.asr_decoder(
tgt, encoder_out, tgt_mask=tgt_mask,
)
return prediction, multihead_attns[-1]
def make_masks_for_mt(self, src, tgt, pad_idx=0):
"""This method generates the masks for training the transformer model.
Arguments
---------
src : tensor
The sequence to the encoder (required).
tgt : tensor
The sequence to the decoder (required).
pad_idx : int
The index for <pad> token (default=0).
"""
src_key_padding_mask = None
if self.training:
src_key_padding_mask = get_key_padding_mask(src, pad_idx=pad_idx)
tgt_key_padding_mask = get_key_padding_mask(tgt, pad_idx=pad_idx)
src_mask = None
tgt_mask = get_lookahead_mask(tgt)
return src_key_padding_mask, tgt_key_padding_mask, src_mask, tgt_mask
| 13,931 | 34.360406 | 119 | py |
speechbrain | speechbrain-main/templates/hyperparameter_optimization_speaker_id/train.py | #!/usr/bin/env python3
"""Recipe for training a speaker-id system, with hyperparameter optimization support.
For a tutorial on hyperparameter optimization, refer to this tutorial:
https://colab.research.google.com/drive/1b-5EOjZC7M9RvfWZ0Pq0HMV0KmQKu730#scrollTo=lJup9mNnYw_0
The template can use used as a
basic example for any signal classification task such as language_id,
emotion recognition, command classification, etc. The proposed task classifies
28 speakers using Mini Librispeech. This task is very easy. In a real
scenario, you need to use datasets with a larger number of speakers such as
the voxceleb one (see recipes/VoxCeleb). Speechbrain has already some built-in
models for signal classifications (see the ECAPA one in
speechbrain.lobes.models.ECAPA_TDNN.py or the xvector in
speechbrain/lobes/models/Xvector.py)
To run this recipe, do the following:
> python train.py train.yaml
To read the code, first scroll to the bottom to see the "main" code.
This gives a high-level overview of what is going on, while the
Brain class definition provides the details of what happens
for each batch during training.
The first time you run it, this script should automatically download
and prepare the Mini Librispeech dataset for computation. Noise and
reverberation are automatically added to each sample from OpenRIR.
Authors
* Mirco Ravanelli 2021
* Artem Ploujnikov 2021
"""
import os
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from mini_librispeech_prepare import prepare_mini_librispeech
from speechbrain.utils import hpopt as hp
# Brain class for speech enhancement training
class SpkIdBrain(sb.Brain):
"""Class that manages the training loop. See speechbrain.core.Brain."""
def compute_forward(self, batch, stage):
"""Runs all the computation of that transforms the input into the
output probabilities over the N classes.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
predictions : Tensor
Tensor that contains the posterior probabilities over the N classes.
"""
# We first move the batch to the appropriate device.
batch = batch.to(self.device)
# Compute features, embeddings, and predictions
feats, lens = self.prepare_features(batch.sig, stage)
embeddings = self.modules.embedding_model(feats, lens)
predictions = self.modules.classifier(embeddings)
return predictions
def prepare_features(self, wavs, stage):
"""Prepare the features for computation, including augmentation.
Arguments
---------
wavs : tuple
Input signals (tensor) and their relative lengths (tensor).
stage : sb.Stage
The current stage of training.
"""
wavs, lens = wavs
# Add augmentation if specified. In this version of augmentation, we
# concatenate the original and the augment batches in a single bigger
# batch. This is more memory-demanding, but helps to improve the
# performance. Change it if you run OOM.
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
lens = torch.cat([lens, lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, lens)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
return feats, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs.
Arguments
---------
predictions : tensor
The output tensor from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
_, lens = batch.sig
spkid, _ = batch.spk_id_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN and hasattr(self.modules, "env_corrupt"):
spkid = torch.cat([spkid, spkid], dim=0)
lens = torch.cat([lens, lens])
# Compute the cost function
loss = sb.nnet.losses.nll_loss(predictions, spkid, lens)
# Append this batch of losses to the loss metric for easy
self.loss_metric.append(
batch.id, predictions, spkid, lens, reduction="batch"
)
# Compute classification error at test time
if stage != sb.Stage.TRAIN:
self.error_metrics.append(batch.id, predictions, spkid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
self.loss_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.losses.nll_loss
)
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
"error": self.error_metrics.summarize("average"),
}
# At the end of validation...
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints,
if self.hparams.ckpt_enable:
self.checkpointer.save_and_keep_only(
meta=stats, min_keys=["error"]
)
hp.report_result(stats)
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
We expect `prepare_mini_librispeech` to have been called before this,
so that the `train.json`, `valid.json`, and `valid.json` manifest files
are available.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Contains two keys, "train" and "valid" that correspond
to the appropriate DynamicItemDataset object.
"""
# Initialization of the label encoder. The label encoder assigns to each
# of the observed label a unique index (e.g, 'spk01': 0, 'spk02': 1, ..)
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# Define audio pipeline
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
return sig
# Define label pipeline:
@sb.utils.data_pipeline.takes("spk_id")
@sb.utils.data_pipeline.provides("spk_id", "spk_id_encoded")
def label_pipeline(spk_id):
"""Defines the pipeline to process the input speaker label."""
yield spk_id
spk_id_encoded = label_encoder.encode_label_torch(spk_id)
yield spk_id_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
hparams["dataloader_options"]["shuffle"] = False
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "spk_id_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mapping.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="spk_id",
)
return datasets
# Recipe begins!
if __name__ == "__main__":
with hp.hyperparameter_optimization(objective_key="error") as hp_ctx:
# Reading command line arguments
hparams_file, run_opts, overrides = hp_ctx.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training).
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides.
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Data preparation, to be run on only one process.
if not hparams["skip_prep"]:
sb.utils.distributed.run_on_main(
prepare_mini_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"split_ratio": hparams["split_ratio"],
},
)
# Create dataset objects "train", "valid", and "test".
datasets = dataio_prep(hparams)
# Initialize the Brain object to prepare for mask training.
spk_id_brain = SpkIdBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
spk_id_brain.fit(
epoch_counter=spk_id_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
if not hp_ctx.enabled:
# Load the best checkpoint for evaluation
test_stats = spk_id_brain.evaluate(
test_set=datasets["test"],
min_key="error",
test_loader_kwargs=hparams["dataloader_options"],
)
| 13,148 | 35.935393 | 95 | py |
speechbrain | speechbrain-main/templates/speech_recognition/LM/custom_model.py | """
This file contains a very simple PyTorch module to use for language modeling.
To replace this model, change the `!new:` tag in the hyperparameter file
to refer to a built-in SpeechBrain model or another file containing
a custom PyTorch module. Instead of this simple model, we suggest using one
of the following built-in neural models:
RNN-LM: speechbrain.lobes.models.RNNLM.RNNLM
transformer: speechbrain.lobes.models.transformers.TransformerLM.TransformerLM
Authors
* Mirco Ravanelli 2021
"""
import torch
import speechbrain as sb
class CustomModel(torch.nn.Module):
"""Basic LSTM model for language modeling.
Arguments
---------
embedding_dim : int
The dimension of the embeddings.The input indexes are transformed into
a latent space with this dimensionality.
rnn_size : int
Number of neurons to use in rnn (for each direction -> and <-).
layers : int
Number of RNN layers to use.
output_dim : int
Dimensionality of the output.
return_hidden : bool
If True, returns the hidden state of the RNN as well.
"""
def __init__(
self,
embedding_dim=128,
rnn_size=256,
layers=2,
output_dim=1000,
return_hidden=False,
):
super().__init__()
self.return_hidden = return_hidden
self.reshape = False
# Embedding model
self.embedding = sb.nnet.embedding.Embedding(
num_embeddings=output_dim, embedding_dim=embedding_dim
)
# LSTM
self.rnn = torch.nn.LSTM(
input_size=embedding_dim,
hidden_size=rnn_size,
bidirectional=False,
num_layers=layers,
)
# Final output transformation + softmax
self.out = sb.nnet.linear.Linear(
input_size=rnn_size, n_neurons=output_dim
)
self.log_softmax = sb.nnet.activations.Softmax(apply_log=True)
def forward(self, x, hx=None):
"""List of computations from input to output predictions"""
x = self.embedding(x)
# If 2d tensor, add a time-axis
# This is used for inference time (during beamforming)
if len(x.shape) == 2:
x = x.unsqueeze(dim=1)
self.reshape = True
x = x.transpose(0, 1)
x, hidden = self.rnn(x, hx)
x = x.transpose(0, 1)
x = self.out(x)
x = self.log_softmax(x)
if self.reshape:
x = x.squeeze(dim=1)
if self.return_hidden:
return x, hidden
else:
return x
| 2,590 | 27.163043 | 78 | py |
speechbrain | speechbrain-main/templates/speech_recognition/LM/train.py | #!/usr/bin/env python3
"""Recipe for training a language model with a given text corpus.
> python train.py RNNLM.yaml
To run this recipe, you need to first install the Huggingface dataset:
> pip install datasets
Authors
* Ju-Chieh Chou 2020
* Jianyuan Zhong 2021
* Mirco Ravanelli 2021
"""
import sys
import logging
import torch
from datasets import load_dataset
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Brain class for language model training
class LM(sb.core.Brain):
"""Class that manages the training loop. See speechbrain.core.Brain."""
def compute_forward(self, batch, stage):
"""Predicts the next word given the previous ones.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
predictions : torch.Tensor
A tensor containing the posterior probabilities (predictions).
"""
batch = batch.to(self.device)
tokens_bos, _ = batch.tokens_bos
pred = self.hparams.model(tokens_bos)
return pred
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs.
Arguments
---------
predictions : torch.Tensor
The posterior probabilities from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
batch = batch.to(self.device)
tokens_eos, tokens_len = batch.tokens_eos
loss = self.hparams.compute_cost(
predictions, tokens_eos, length=tokens_len
)
return loss
def fit_batch(self, batch):
"""Runs all the steps needed to train the model on a single batch.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
Returns
-------
Loss : torch.Tensor
A tensor containing the loss (single real number).
"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# Loss backpropagation (gradient computation)
(loss / self.hparams.accu_steps).backward()
# Manage gradient accumulation
if self.step % self.hparams.accu_steps == 0:
# Gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
# Update the parameters
self.optimizer.step()
# Reset the gradient
self.optimizer.zero_grad()
if isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
) or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
):
self.hparams.lr_annealing(self.optimizer)
return loss
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
}
# At the end of validation, we can wrote
if stage == sb.Stage.VALID:
# Update learning rate
old_lr, new_lr = self.hparams.lr_annealing(stage_loss)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints.
self.checkpointer.save_and_keep_only(meta=stats, min_keys=["loss"])
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
The language model is trained with the text files specified by the user in
the hyperparameter file.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : list
List containing "train", "valid", and "test" sets that correspond
to the appropriate DynamicItemDataset object.
"""
logging.info("generating datasets...")
# Prepare datasets
datasets = load_dataset(
"text",
data_files={
"train": hparams["lm_train_data"],
"valid": hparams["lm_valid_data"],
"test": hparams["lm_test_data"],
},
)
# Convert huggingface's dataset to DynamicItemDataset via a magical function
train_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
datasets["train"]
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
datasets["valid"]
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(
datasets["test"]
)
datasets = [train_data, valid_data, test_data]
tokenizer = hparams["tokenizer"]
# Define text processing pipeline. We start from the raw text and then
# encode it using the tokenizer. The tokens with bos are used for feeding
# the neural network, the tokens with eos for computing the cost function.
@sb.utils.data_pipeline.takes("text")
@sb.utils.data_pipeline.provides("text", "tokens_bos", "tokens_eos")
def text_pipeline(text):
"""Defines the pipeline that processes the input text."""
yield text
tokens_list = tokenizer.encode_as_ids(text)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set outputs to add into the batch. The batch variable will contain
# all these fields (e.g, batch.id, batch.text, batch.tokens.bos,..)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "text", "tokens_bos", "tokens_eos"],
)
return train_data, valid_data, test_data
# Recipe begins!
if __name__ == "__main__":
# Reading command line arguments
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# We download the tokenizer from HuggingFace (or elsewhere depending on
# the path given in the YAML file).
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Create dataset objects "train", "valid", and "test"
train_data, valid_data, test_data = dataio_prepare(hparams)
# Initialize the Brain object to prepare for LM training.
lm_brain = LM(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
lm_brain.fit(
lm_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Load best checkpoint for evaluation
test_stats = lm_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 9,669 | 32.344828 | 80 | py |
speechbrain | speechbrain-main/templates/speech_recognition/ASR/train.py | #!/usr/bin/env/python3
"""Recipe for training a sequence-to-sequence ASR system with mini-librispeech.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beam search coupled with a neural
language model.
To run this recipe, do the following:
> python train.py train.yaml
With the default hyperparameters, the system employs an LSTM encoder.
The decoder is based on a standard GRU. Beam search coupled with an RNN language
model is used on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the mini-librispeech
dataset. Note that this is a tiny dataset used here just to
provide a working example. To achieve a better performance you have to train with
larger datasets, such as the full LibriSpeech one. In this case, to allow the
model to converge, we pre-train it with a bigger one (trained on the full librispeech
with the seq2seq 1k BPE recipe).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE).
This recipe assumes that the tokenizer and the LM are already trained.
To avoid token mismatches, the tokenizer used for the acoustic model is
the same use for the LM. The recipe downloads the pre-trained tokenizer
and LM.
If you would like to train a full system from scratch do the following:
1- Train a tokenizer (see ../Tokenizer)
2- Train a language model (see ../LM)
3- Train the speech recognizer (with this code).
Authors
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
"""
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from mini_librispeech_prepare import prepare_mini_librispeech
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Brain class for speech recognition training
class ASR(sb.Brain):
"""Class that manages the training loop. See speechbrain.core.Brain."""
def compute_forward(self, batch, stage):
"""Runs all the computation of the CTC + seq2seq ASR. It returns the
posterior probabilities of the CTC and seq2seq networks.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
predictions : dict
At training time it returns predicted seq2seq log probabilities.
If needed it also returns the ctc output log probabilities.
At validation/test time, it returns the predicted tokens as well.
"""
# We first move the batch to the appropriate device.
batch = batch.to(self.device)
feats, self.feat_lens = self.prepare_features(stage, batch.sig)
tokens_bos, _ = self.prepare_tokens(stage, batch.tokens_bos)
# Running the encoder (prevent propagation to feature extraction)
encoded_signal = self.modules.encoder(feats.detach())
# Embed tokens and pass tokens & encoded signal to decoder
embedded_tokens = self.modules.embedding(tokens_bos)
decoder_outputs, _ = self.modules.decoder(
embedded_tokens, encoded_signal, self.feat_lens
)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(decoder_outputs)
predictions = {"seq_logprobs": self.hparams.log_softmax(logits)}
if self.is_ctc_active(stage):
# Output layer for ctc log-probabilities
ctc_logits = self.modules.ctc_lin(encoded_signal)
predictions["ctc_logprobs"] = self.hparams.log_softmax(ctc_logits)
elif stage == sb.Stage.VALID:
predictions["tokens"], _ = self.hparams.valid_search(
encoded_signal, self.feat_lens
)
elif stage == sb.Stage.TEST:
predictions["tokens"], _ = self.hparams.test_search(
encoded_signal, self.feat_lens
)
return predictions
def is_ctc_active(self, stage):
"""Check if CTC is currently active.
Arguments
---------
stage : sb.Stage
Currently executing stage.
"""
if stage != sb.Stage.TRAIN:
return False
current_epoch = self.hparams.epoch_counter.current
return current_epoch <= self.hparams.number_of_ctc_epochs
def prepare_features(self, stage, wavs):
"""Prepare features for computation on-the-fly
Arguments
---------
stage : sb.Stage
Currently executing stage.
wavs : tuple
The input signals (tensor) and their lengths (tensor).
"""
wavs, wav_lens = wavs
# Add augmentation if specified. In this version of augmentation, we
# concatenate the original and the augment batches in a single bigger
# batch. This is more memory-demanding, but helps to improve the
# performance. Change it if you run OOM.
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Feature computation and normalization
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
return feats, wav_lens
def prepare_tokens(self, stage, tokens):
"""Double the tokens batch if features are doubled.
Arguments
---------
stage : sb.Stage
Currently executing stage.
tokens : tuple
The tokens (tensor) and their lengths (tensor).
"""
tokens, token_lens = tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens = torch.cat([tokens, tokens], dim=0)
token_lens = torch.cat([token_lens, token_lens], dim=0)
return tokens, token_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs. We here
do multi-task learning and the loss is a weighted sum of the ctc + seq2seq
costs.
Arguments
---------
predictions : dict
The output dict from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
# Compute sequence loss against targets with EOS
tokens_eos, tokens_eos_lens = self.prepare_tokens(
stage, batch.tokens_eos
)
loss = sb.nnet.losses.nll_loss(
log_probabilities=predictions["seq_logprobs"],
targets=tokens_eos,
length=tokens_eos_lens,
label_smoothing=self.hparams.label_smoothing,
)
# Add ctc loss if necessary. The total cost is a weighted sum of
# ctc loss + seq2seq loss
if self.is_ctc_active(stage):
# Load tokens without EOS as CTC targets
tokens, tokens_lens = self.prepare_tokens(stage, batch.tokens)
loss_ctc = self.hparams.ctc_cost(
predictions["ctc_logprobs"], tokens, self.feat_lens, tokens_lens
)
loss *= 1 - self.hparams.ctc_weight
loss += self.hparams.ctc_weight * loss_ctc
if stage != sb.Stage.TRAIN:
# Converted predicted tokens from indexes to words
predicted_words = [
self.hparams.tokenizer.decode_ids(prediction).split(" ")
for prediction in predictions["tokens"]
]
target_words = [words.split(" ") for words in batch.words]
# Monitor word error rate and character error rated at
# valid and test time.
self.wer_metric.append(batch.id, predicted_words, target_words)
self.cer_metric.append(batch.id, predicted_words, target_words)
return loss
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
# In this case, we would like to keep track of the word error rate (wer)
# and the character error rate (cer)
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Summarize the statistics from the stage for record-keeping.
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Update learning rate
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
# Save the current checkpoint and delete previous checkpoints.
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
# We also write statistics about test data to stdout and to the logfile.
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Dictionary containing "train", "valid", and "test" keys that correspond
to the DynamicItemDataset objects.
"""
# Define audio pipeline. In this case, we simply read the path contained
# in the variable wav with the audio reader.
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the audio signal. This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
return sig
# Define text processing pipeline. We start from the raw text and then
# encode it using the tokenizer. The tokens with BOS are used for feeding
# decoder during training, the tokens with EOS for computing the cost function.
# The tokens without BOS or EOS is for computing CTC loss.
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides(
"words", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(words):
"""Processes the transcriptions to generate proper labels"""
yield words
tokens_list = hparams["tokenizer"].encode_as_ids(words)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
# Define datasets from json data manifest file
# Define datasets sorted by ascending lengths for efficiency
datasets = {}
data_folder = hparams["data_folder"]
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": data_folder},
dynamic_items=[audio_pipeline, text_pipeline],
output_keys=[
"id",
"sig",
"words",
"tokens_bos",
"tokens_eos",
"tokens",
],
)
hparams[f"{dataset}_dataloader_opts"]["shuffle"] = False
# Sorting training data with ascending order makes the code much
# faster because we minimize zero-padding. In most of the cases, this
# does not harm the performance.
if hparams["sorting"] == "ascending":
datasets["train"] = datasets["train"].filtered_sorted(sort_key="length")
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
datasets["train"] = datasets["train"].filtered_sorted(
sort_key="length", reverse=True
)
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
hparams["train_dataloader_opts"]["shuffle"] = True
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
return datasets
if __name__ == "__main__":
# Reading command line arguments
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Data preparation, to be run on only one process.
if not hparams["skip_prep"]:
sb.utils.distributed.run_on_main(
prepare_mini_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
},
)
# We can now directly create the datasets for training, valid, and test
datasets = dataio_prepare(hparams)
# In this case, pre-training is essential because mini-librispeech is not
# big enough to train an end-to-end model from scratch. With bigger dataset
# you can train from scratch and avoid this step.
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
asr_brain.fit(
asr_brain.hparams.epoch_counter,
datasets["train"],
datasets["valid"],
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Load best checkpoint for evaluation
test_stats = asr_brain.evaluate(
test_set=datasets["test"],
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 17,800 | 37.364224 | 85 | py |
speechbrain | speechbrain-main/templates/speaker_id/custom_model.py | """
This file contains a very simple TDNN module to use for speaker-id.
To replace this model, change the `!new:` tag in the hyperparameter file
to refer to a built-in SpeechBrain model or another file containing
a custom PyTorch module.
Authors
* Nauman Dawalatabad 2020
* Mirco Ravanelli 2020
"""
import torch # noqa: F401
import torch.nn as nn
import speechbrain as sb
from speechbrain.nnet.pooling import StatisticsPooling
from speechbrain.nnet.CNN import Conv1d
from speechbrain.nnet.linear import Linear
from speechbrain.nnet.normalization import BatchNorm1d
class Xvector(torch.nn.Module):
"""This model extracts X-vectors for speaker recognition
Arguments
---------
activation : torch class
A class for constructing the activation layers.
tdnn_blocks : int
Number of time-delay neural (TDNN) layers.
tdnn_channels : list of ints
Output channels for TDNN layer.
tdnn_kernel_sizes : list of ints
List of kernel sizes for each TDNN layer.
tdnn_dilations : list of ints
List of dilations for kernels in each TDNN layer.
lin_neurons : int
Number of neurons in linear layers.
Example
-------
>>> compute_xvect = Xvector()
>>> input_feats = torch.rand([5, 10, 40])
>>> outputs = compute_xvect(input_feats)
>>> outputs.shape
torch.Size([5, 1, 512])
"""
def __init__(
self,
device="cpu",
activation=torch.nn.LeakyReLU,
tdnn_blocks=5,
tdnn_channels=[512, 512, 512, 512, 1500],
tdnn_kernel_sizes=[5, 3, 3, 1, 1],
tdnn_dilations=[1, 2, 3, 1, 1],
lin_neurons=512,
in_channels=40,
):
super().__init__()
self.blocks = nn.ModuleList()
# TDNN has convolutional layers with the given dilation factors
# and kernel sizes. We here loop over all the convolutional layers
# that we wanna add. Note that batch normalization is used after
# the activations function in this case. This improves the
# speaker-id performance a bit.
for block_index in range(tdnn_blocks):
out_channels = tdnn_channels[block_index]
self.blocks.extend(
[
Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=tdnn_kernel_sizes[block_index],
dilation=tdnn_dilations[block_index],
),
activation(),
BatchNorm1d(input_size=out_channels),
]
)
in_channels = tdnn_channels[block_index]
# Statistical pooling. It converts a tensor of variable length
# into a fixed-length tensor. The statistical pooling returns the
# mean and the standard deviation.
self.blocks.append(StatisticsPooling())
# Final linear transformation.
self.blocks.append(
Linear(
input_size=out_channels * 2, # mean + std,
n_neurons=lin_neurons,
bias=True,
combine_dims=False,
)
)
def forward(self, x, lens=None):
"""Returns the x-vectors.
Arguments
---------
x : torch.Tensor
"""
for layer in self.blocks:
try:
x = layer(x, lengths=lens)
except TypeError:
x = layer(x)
return x
class Classifier(sb.nnet.containers.Sequential):
"""This class implements the last MLP on the top of xvector features.
Arguments
---------
input_shape : tuple
Expected shape of an example input.
activation : torch class
A class for constructing the activation layers.
lin_blocks : int
Number of linear layers.
lin_neurons : int
Number of neurons in linear layers.
out_neurons : int
Number of output neurons.
Example
-------
>>> input_feats = torch.rand([5, 10, 40])
>>> compute_xvect = Xvector()
>>> xvects = compute_xvect(input_feats)
>>> classify = Classifier(input_shape=xvects.shape)
>>> output = classify(xvects)
>>> output.shape
torch.Size([5, 1, 1211])
"""
def __init__(
self,
input_shape,
activation=torch.nn.LeakyReLU,
lin_blocks=1,
lin_neurons=512,
out_neurons=1211,
):
super().__init__(input_shape=input_shape)
self.append(activation(), layer_name="act")
self.append(sb.nnet.normalization.BatchNorm1d, layer_name="norm")
if lin_blocks > 0:
self.append(sb.nnet.containers.Sequential, layer_name="DNN")
# Adding fully-connected layers
for block_index in range(lin_blocks):
block_name = f"block_{block_index}"
self.DNN.append(
sb.nnet.containers.Sequential, layer_name=block_name
)
self.DNN[block_name].append(
sb.nnet.linear.Linear,
n_neurons=lin_neurons,
bias=True,
layer_name="linear",
)
self.DNN[block_name].append(activation(), layer_name="act")
self.DNN[block_name].append(
sb.nnet.normalization.BatchNorm1d, layer_name="norm"
)
# Final Softmax classifier
self.append(
sb.nnet.linear.Linear, n_neurons=out_neurons, layer_name="out"
)
self.append(
sb.nnet.activations.Softmax(apply_log=True), layer_name="softmax"
)
| 5,638 | 29.814208 | 77 | py |
speechbrain | speechbrain-main/templates/speaker_id/train.py | #!/usr/bin/env python3
"""Recipe for training a speaker-id system. The template can use used as a
basic example for any signal classification task such as language_id,
emotion recognition, command classification, etc. The proposed task classifies
28 speakers using Mini Librispeech. This task is very easy. In a real
scenario, you need to use datasets with a larger number of speakers such as
the voxceleb one (see recipes/VoxCeleb). Speechbrain has already some built-in
models for signal classifications (see the ECAPA one in
speechbrain.lobes.models.ECAPA_TDNN.py or the xvector in
speechbrain/lobes/models/Xvector.py)
To run this recipe, do the following:
> python train.py train.yaml
To read the code, first scroll to the bottom to see the "main" code.
This gives a high-level overview of what is going on, while the
Brain class definition provides the details of what happens
for each batch during training.
The first time you run it, this script should automatically download
and prepare the Mini Librispeech dataset for computation. Noise and
reverberation are automatically added to each sample from OpenRIR.
Authors
* Mirco Ravanelli 2021
"""
import os
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from mini_librispeech_prepare import prepare_mini_librispeech
# Brain class for speech enhancement training
class SpkIdBrain(sb.Brain):
"""Class that manages the training loop. See speechbrain.core.Brain."""
def compute_forward(self, batch, stage):
"""Runs all the computation of that transforms the input into the
output probabilities over the N classes.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
predictions : Tensor
Tensor that contains the posterior probabilities over the N classes.
"""
# We first move the batch to the appropriate device.
batch = batch.to(self.device)
# Compute features, embeddings, and predictions
feats, lens = self.prepare_features(batch.sig, stage)
embeddings = self.modules.embedding_model(feats, lens)
predictions = self.modules.classifier(embeddings)
return predictions
def prepare_features(self, wavs, stage):
"""Prepare the features for computation, including augmentation.
Arguments
---------
wavs : tuple
Input signals (tensor) and their relative lengths (tensor).
stage : sb.Stage
The current stage of training.
"""
wavs, lens = wavs
# Add augmentation if specified. In this version of augmentation, we
# concatenate the original and the augment batches in a single bigger
# batch. This is more memory-demanding, but helps to improve the
# performance. Change it if you run OOM.
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
lens = torch.cat([lens, lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, lens)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
return feats, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs.
Arguments
---------
predictions : tensor
The output tensor from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
_, lens = batch.sig
spkid, _ = batch.spk_id_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN and hasattr(self.modules, "env_corrupt"):
spkid = torch.cat([spkid, spkid], dim=0)
lens = torch.cat([lens, lens])
# Compute the cost function
loss = sb.nnet.losses.nll_loss(predictions, spkid, lens)
# Append this batch of losses to the loss metric for easy
self.loss_metric.append(
batch.id, predictions, spkid, lens, reduction="batch"
)
# Compute classification error at test time
if stage != sb.Stage.TRAIN:
self.error_metrics.append(batch.id, predictions, spkid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
self.loss_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.losses.nll_loss
)
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
"error": self.error_metrics.summarize("average"),
}
# At the end of validation...
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(meta=stats, min_keys=["error"])
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
We expect `prepare_mini_librispeech` to have been called before this,
so that the `train.json`, `valid.json`, and `valid.json` manifest files
are available.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Contains two keys, "train" and "valid" that correspond
to the appropriate DynamicItemDataset object.
"""
# Initialization of the label encoder. The label encoder assigns to each
# of the observed label a unique index (e.g, 'spk01': 0, 'spk02': 1, ..)
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# Define audio pipeline
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
return sig
# Define label pipeline:
@sb.utils.data_pipeline.takes("spk_id")
@sb.utils.data_pipeline.provides("spk_id", "spk_id_encoded")
def label_pipeline(spk_id):
"""Defines the pipeline to process the input speaker label."""
yield spk_id
spk_id_encoded = label_encoder.encode_label_torch(spk_id)
yield spk_id_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
hparams["dataloader_options"]["shuffle"] = False
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "spk_id_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mapping.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="spk_id",
)
return datasets
# Recipe begins!
if __name__ == "__main__":
# Reading command line arguments.
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training).
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides.
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Data preparation, to be run on only one process.
if not hparams["skip_prep"]:
sb.utils.distributed.run_on_main(
prepare_mini_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"split_ratio": hparams["split_ratio"],
},
)
# Create dataset objects "train", "valid", and "test".
datasets = dataio_prep(hparams)
# Initialize the Brain object to prepare for mask training.
spk_id_brain = SpkIdBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
spk_id_brain.fit(
epoch_counter=spk_id_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load the best checkpoint for evaluation
test_stats = spk_id_brain.evaluate(
test_set=datasets["test"],
min_key="error",
test_loader_kwargs=hparams["dataloader_options"],
)
| 12,410 | 35.289474 | 80 | py |
speechbrain | speechbrain-main/templates/enhancement/custom_model.py | """
This file contains a very simple PyTorch module to use for enhancement.
To replace this model, change the `!new:` tag in the hyperparameter file
to refer to a built-in SpeechBrain model or another file containing
a custom PyTorch module.
Authors
* Peter Plantinga 2021
"""
import torch
class CustomModel(torch.nn.Module):
"""Basic RNN model with projection layers between RNN layers.
Arguments
---------
input_size : int
Size of the expected input in the 3rd dimension.
rnn_size : int
Number of neurons to use in rnn (for each direction -> and <-).
projection : int
Number of neurons in projection layer.
layers : int
Number of RNN layers to use.
"""
def __init__(self, input_size, rnn_size=256, projection=128, layers=2):
super().__init__()
self.layers = torch.nn.ModuleList()
# Alternate RNN and projection layers.
for i in range(layers):
self.layers.append(
torch.nn.LSTM(
input_size=input_size if i == 0 else projection,
hidden_size=rnn_size,
bidirectional=True,
)
)
# Projection layer reduces size, except last layer, which
# goes back to input size to create the mask
linear_size = input_size if i == layers - 1 else projection
self.layers.append(
torch.nn.Linear(
in_features=rnn_size * 2, out_features=linear_size,
)
)
# Use ReLU to make sure outputs aren't negative (unhelpful for masking)
self.layers.append(torch.nn.ReLU())
def forward(self, x):
"""Shift to time-first, pass layers, then back to batch-first."""
x = x.transpose(0, 1)
for layer in self.layers:
x = layer(x)
if isinstance(x, tuple):
x = x[0]
x = x.transpose(0, 1)
return x
| 1,992 | 30.140625 | 79 | py |
speechbrain | speechbrain-main/templates/enhancement/train.py | #!/usr/bin/env/python3
"""Recipe for training a speech enhancement system with spectral masking.
To run this recipe, do the following:
> python train.py train.yaml --data_folder /path/to/save/mini_librispeech
To read the code, first scroll to the bottom to see the "main" code.
This gives a high-level overview of what is going on, while the
Brain class definition provides the details of what happens
for each batch during training.
The first time you run it, this script should automatically download
and prepare the Mini Librispeech dataset for computation. Noise and
reverberation are automatically added to each sample from OpenRIR.
Authors
* Szu-Wei Fu 2020
* Chien-Feng Liao 2020
* Peter Plantinga 2021
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from mini_librispeech_prepare import prepare_mini_librispeech
# Brain class for speech enhancement training
class SEBrain(sb.Brain):
"""Class that manages the training loop. See speechbrain.core.Brain."""
def compute_forward(self, batch, stage):
"""Apply masking to convert from noisy waveforms to enhanced signals.
Arguments
---------
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
predictions : dict
A dictionary with keys {"spec", "wav"} with predicted features.
"""
# We first move the batch to the appropriate device, and
# compute the features necessary for masking.
batch = batch.to(self.device)
noisy_wavs, lens = batch.noisy_sig
noisy_feats = self.compute_feats(noisy_wavs)
# Masking is done here with the "signal approximation (SA)" algorithm.
# The masked input is compared directly with clean speech targets.
mask = self.modules.model(noisy_feats)
predict_spec = torch.mul(mask, noisy_feats)
# Also return predicted wav, for evaluation. Note that this could
# also be used for a time-domain loss term.
predict_wav = self.hparams.resynth(
torch.expm1(predict_spec), noisy_wavs
)
# Return a dictionary so we don't have to remember the order
return {"spec": predict_spec, "wav": predict_wav}
def compute_feats(self, wavs):
"""Returns corresponding log-spectral features of the input waveforms.
Arguments
---------
wavs : torch.Tensor
The batch of waveforms to convert to log-spectral features.
"""
# Log-spectral features
feats = self.hparams.compute_STFT(wavs)
feats = sb.processing.features.spectral_magnitude(feats, power=0.5)
# Log1p reduces the emphasis on small differences
feats = torch.log1p(feats)
return feats
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given the predicted and targeted outputs.
Arguments
---------
predictions : dict
The output dict from `compute_forward`.
batch : PaddedBatch
This batch object contains all the relevant tensors for computation.
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
Returns
-------
loss : torch.Tensor
A one-element tensor used for backpropagating the gradient.
"""
# Prepare clean targets for comparison
clean_wavs, lens = batch.clean_sig
clean_spec = self.compute_feats(clean_wavs)
# Directly compare the masked spectrograms with the clean targets
loss = sb.nnet.losses.mse_loss(predictions["spec"], clean_spec, lens)
# Append this batch of losses to the loss metric for easy
self.loss_metric.append(
batch.id, predictions["spec"], clean_spec, lens, reduction="batch"
)
# Some evaluations are slower, and we only want to perform them
# on the validation set.
if stage != sb.Stage.TRAIN:
# Evaluate speech intelligibility as an additional metric
self.stoi_metric.append(
batch.id,
predictions["wav"],
clean_wavs,
lens,
reduction="batch",
)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
self.loss_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.losses.mse_loss
)
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.stoi_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.loss.stoi_loss.stoi_loss
)
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
"stoi": -self.stoi_metric.summarize("average"),
}
# At the end of validation, we can write stats and checkpoints
if stage == sb.Stage.VALID:
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints,
# unless they have the current best STOI score.
self.checkpointer.save_and_keep_only(meta=stats, max_keys=["stoi"])
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
We expect `prepare_mini_librispeech` to have been called before this,
so that the `train.json` and `valid.json` manifest files are available.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Contains two keys, "train" and "valid" that correspond
to the appropriate DynamicItemDataset object.
"""
# Define audio pipeline. Adds noise, reverb, and babble on-the-fly.
# Of course for a real enhancement dataset, you'd want a fixed valid set.
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("noisy_sig", "clean_sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
clean_sig = sb.dataio.dataio.read_audio(wav)
noisy_sig = hparams["env_corruption"](
clean_sig.unsqueeze(0), torch.ones(1)
).squeeze(0)
return noisy_sig, clean_sig
# Define datasets sorted by ascending lengths for efficiency
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
hparams["dataloader_options"]["shuffle"] = False
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline],
output_keys=["id", "noisy_sig", "clean_sig"],
).filtered_sorted(sort_key="length")
return datasets
# Recipe begins!
if __name__ == "__main__":
# Reading command line arguments
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Data preparation, to be run on only one process.
if not hparams["skip_prep"]:
sb.utils.distributed.run_on_main(
prepare_mini_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
},
)
# Create dataset objects "train" and "valid"
datasets = dataio_prep(hparams)
# Initialize the Brain object to prepare for mask training.
se_brain = SEBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
se_brain.fit(
epoch_counter=se_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load best checkpoint (highest STOI) for evaluation
test_stats = se_brain.evaluate(
test_set=datasets["test"],
max_key="stoi",
test_loader_kwargs=hparams["dataloader_options"],
)
| 11,127 | 34.552716 | 80 | py |
speechbrain | speechbrain-main/recipes/BinauralWSJ0Mix/separation/dynamic_mixing.py | import speechbrain as sb
import numpy as np
import torch
import torchaudio
import glob
import os
import random
from speechbrain.processing.signal_processing import rescale
from speechbrain.dataio.batch import PaddedBatch
from scipy.signal import fftconvolve
"""
The functions to implement Dynamic Mixing For SpeechSeparation
Authors
* Samuele Cornell 2021
* Cem Subakan 2021
* Zijian Huang 2022
"""
def dynamic_mix_data_prep(hparams):
"""
Dynamic mixing for WSJ0-2/3Mix and WHAM!/WHAMR!
"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
# we build an dictionary where keys are speakers id and entries are list
# of utterances files of that speaker
from recipes.WSJ0Mix.separation.dynamic_mixing import (
build_spk_hashtable,
get_wham_noise_filenames,
)
spk_hashtable, spk_weights = build_spk_hashtable(hparams)
spk_list = [x for x in spk_hashtable.keys()]
spk_weights = [x / sum(spk_weights) for x in spk_weights]
if "noise" in hparams["experiment_name"]:
noise_files = get_wham_noise_filenames(hparams)
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides(
"mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"
)
def audio_pipeline(
mix_wav,
): # this is dummy --> it means one epoch will be same as without dynamic mixing
"""
This audio pipeline defines the compute graph for dynamic mixing
"""
speakers = np.random.choice(
spk_list, hparams["num_spks"], replace=False, p=spk_weights
)
if "noise" in hparams["experiment_name"]:
noise_file = np.random.choice(noise_files, 1, replace=False)
noise, fs_read = torchaudio.load(noise_file[0])
noise = noise.squeeze()
# select two speakers randomly
sources = []
first_lvl = None
spk_files = [
np.random.choice(spk_hashtable[spk], 1, False)[0]
for spk in speakers
]
minlen = min(
*[torchaudio.info(x).num_frames for x in spk_files],
hparams["training_signal_len"],
)
for i, spk_file in enumerate(spk_files):
# select random offset
length = torchaudio.info(spk_file).num_frames
start = 0
stop = length
if length > minlen: # take a random window
start = np.random.randint(0, length - minlen)
stop = start + minlen
tmp, fs_read = torchaudio.load(
spk_file, frame_offset=start, num_frames=stop - start,
)
tmp = tmp[0] # * peak # remove channel dim and normalize
if i == 0:
gain = np.clip(random.normalvariate(-27.43, 2.57), -45, 0)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
# assert not torch.all(torch.isnan(tmp))
first_lvl = gain
else:
gain = np.clip(
first_lvl + random.normalvariate(-2.51, 2.66), -45, 0
)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale="dB")
if "reverb" in hparams["experiment_name"]:
tmp = torch.stack((tmp, tmp), 1)
reverb_time = np.random.choice(
["0_1s", "0_2s", "0_4s", "0_7s", "0_8s"]
)
azimuth = np.random.choice(list(range(-90, 91, 5)))
hrtf_file = os.path.join(
hparams["hrtf_wav_path"],
reverb_time,
"CATT_{}_{}.wav".format(reverb_time, azimuth),
)
hrtf, sr = torchaudio.load(hrtf_file)
transform = torchaudio.transforms.Resample(sr, fs_read)
hrtf = transform(hrtf)
tmp_bi = torch.from_numpy(
fftconvolve(tmp.numpy(), hrtf.numpy(), mode="same")
)
else:
tmp_bi = torch.FloatTensor(len(tmp), 2) # binaural
subject_path_list = glob.glob(
os.path.join(hparams["hrtf_wav_path"], "subject*")
)
subject_path = np.random.choice(subject_path_list)
azimuth_list = (
[-80, -65, -55] + list(range(-45, 46, 5)) + [55, 65, 80]
)
azimuth = np.random.choice(azimuth_list)
for i, loc in enumerate(["left", "right"]):
hrtf_file = os.path.join(
subject_path,
"{}az{}.wav".format(
azimuth.astype("str").replace("-", "neg"), loc
),
)
hrtf, sr = torchaudio.load(hrtf_file)
transform = torchaudio.transforms.Resample(sr, fs_read)
hrtf = transform(hrtf[:, np.random.randint(50)])
tmp_bi[:, i] = torch.from_numpy(
fftconvolve(tmp.numpy(), hrtf.numpy(), mode="same")
)
# Make relative source energy same with original
spatial_scaling = torch.sqrt(
torch.sum(tmp ** 2) * 2 / torch.sum(tmp_bi ** 2)
)
sources.append(tmp_bi * spatial_scaling)
# we mix the sources together
# here we can also use augmentations ! -> runs on cpu and for each
# mixture parameters will be different rather than for whole batch.
# no difference however for bsz=1 :)
# padding left
# sources, _ = batch_pad_right(sources)
sources = torch.stack(sources)
mixture = torch.sum(sources, 0)
if "noise" in hparams["experiment_name"]:
len_noise = len(noise)
len_mix = len(mixture)
min_len = min(len_noise, len_mix)
noise = torch.swapaxes(noise, 0, 1)
mixture = mixture[:min_len] + noise[:min_len]
max_amp = max(
torch.abs(mixture).max().item(),
*[
x.item()
for x in torch.abs(sources).max(dim=-1)[0].max(dim=-1)[0]
],
)
mix_scaling = 1 / max_amp * 0.9
sources = mix_scaling * sources
mixture = mix_scaling * mixture
yield mixture
for i in range(hparams["num_spks"]):
yield sources[i]
# If the number of speakers is 2, yield None for the 3rd speaker
if hparams["num_spks"] == 2:
yield None
if "noise" in hparams["experiment_name"]:
mean_source_lvl = sources.abs().mean()
mean_noise_lvl = noise.abs().mean()
noise = (mean_source_lvl / mean_noise_lvl) * noise
yield noise
else:
yield None
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)
sb.dataio.dataset.set_output_keys(
[train_data],
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
train_data = torch.utils.data.DataLoader(
train_data,
batch_size=hparams["dataloader_opts"]["batch_size"],
num_workers=hparams["dataloader_opts"]["num_workers"],
collate_fn=PaddedBatch,
worker_init_fn=lambda x: np.random.seed(
int.from_bytes(os.urandom(4), "little") + x
),
)
return train_data
| 7,597 | 33.694064 | 85 | py |
speechbrain | speechbrain-main/recipes/BinauralWSJ0Mix/separation/train.py | #!/usr/bin/env/python3
"""Recipe for training a neural speech separation system on binaural wsjmix the
dataset. The system employs an encoder, a decoder, and a masking network.
To run this recipe, do the following:
> python train.py hparams/convtasnet-parallel.yaml
--data_folder yourpath/binaural-wsj0mix/2speakers
--wsj_root yourpath/to/wsj/
The experiment file is flexible enough to support different neural
networks. By properly changing the parameter files, you can try
different architectures.
Authors
* Cem Subakan 2020
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Mirko Bronzi 2020
* Jianyuan Zhong 2020
* Zijian Huang 2022
"""
import os
import sys
import torch
import torch.nn.functional as F
import torchaudio
import speechbrain as sb
import speechbrain.nnet.schedulers as schedulers
from speechbrain.utils.distributed import run_on_main
from torch.cuda.amp import autocast
from hyperpyyaml import load_hyperpyyaml
import numpy as np
from tqdm import tqdm
import csv
import logging
from pyroomacoustics.experimental.localization import tdoa
from speechbrain.processing.features import STFT, spectral_magnitude
from torch.nn import Conv1d
from speechbrain.pretrained.fetching import fetch
import zipfile
logger = logging.getLogger(__name__)
# Define training procedure
class Separation(sb.Brain):
def compute_forward(self, mix, targets, stage, noise=None):
"""Forward computations from the mixture to the separated signals."""
# Unpack lists and put tensors in the right device
mix, mix_lens = mix
mix, mix_lens = mix.to(self.device), mix_lens.to(self.device)
# Convert targets to tensor
targets = torch.cat(
[targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)],
dim=-1,
).to(self.device)
# [1,t,2,2] dim = -1 is num_speakers
# Add speech distortions
if stage == sb.Stage.TRAIN:
with torch.no_grad():
if self.hparams.use_speedperturb or self.hparams.use_rand_shift:
mix, targets = self.add_speed_perturb(targets, mix_lens)
mix = targets.sum(-1)
if "noise" in self.hparams.experiment_name:
noise = noise.to(self.device)
len_noise = noise.shape[1]
len_mix = mix.shape[1]
min_len = min(len_noise, len_mix)
# add the noise
mix = mix[:, :min_len] + noise[:, :min_len]
# fix the length of targets also
targets = targets[:, :min_len, :]
if self.hparams.use_wavedrop:
mix = self.hparams.wavedrop(mix, mix_lens)
if self.hparams.limit_training_signal_len:
mix, targets = self.cut_signals(mix, targets)
# Separation
if "independent" in self.hparams.experiment_name:
mix_wl = self.hparams.EncoderL(mix[:, :, 0])
est_maskl = self.hparams.MaskNetL(mix_wl)
mix_wl = torch.stack([mix_wl] * self.hparams.num_spks)
sep_hl = mix_wl * est_maskl
mix_wr = self.hparams.EncoderR(mix[:, :, 1])
est_maskr = self.hparams.MaskNetR(mix_wr)
mix_wr = torch.stack([mix_wr] * self.hparams.num_spks)
sep_hr = mix_wr * est_maskr
elif "cross" in self.hparams.experiment_name:
EPS = 1e-8
compute_stft = STFT(
sample_rate=self.hparams.sample_rate,
win_length=256 * 1000.0 / self.hparams.sample_rate,
hop_length=128 * 1000.0 / self.hparams.sample_rate,
n_fft=256,
).to(self.device)
mix_stft = compute_stft(mix).permute(-1, 0, 2, 1, 3)
# IPD = torch.atan2(mix_stft[:, :, :, :, 1], mix_stft[:, :, :, :, 0])
# sinIPD = torch.sin(IPD[0] - IPD[1])
# cosIPD = torch.cos(IPD[0] - IPD[1])
ILD_beforelog = spectral_magnitude(mix_stft[0], power=0.5) / (
spectral_magnitude(mix_stft[1], power=0.5) + EPS
)
ILD = 10 * torch.log10(ILD_beforelog + EPS)
# print(ILD.shape) # [1,129,t/win]
# Separation
mix_wl = self.hparams.EncoderL(mix[:, :, 0])
# [1,64,t/k]
n_samples = mix_wl.shape[-1]
ILD_upsample = F.interpolate(ILD, size=n_samples)
conv1 = Conv1d(
ILD_upsample.shape[1], mix_wl.shape[1], kernel_size=1
).to(self.device)
ILD_cat = conv1(ILD_upsample)
mix_catl = torch.cat((mix_wl, ILD_cat), dim=1)
est_maskl = self.hparams.MaskNetL(mix_catl)
mix_wl = torch.stack([mix_wl] * self.hparams.num_spks)
sep_hl = mix_wl * torch.chunk(est_maskl, 2, dim=2)[0]
mix_wr = self.hparams.EncoderR(mix[:, :, 1])
mix_catr = torch.cat((mix_wr, -ILD_cat), dim=1)
est_maskr = self.hparams.MaskNetR(mix_catr)
mix_wr = torch.stack([mix_wr] * self.hparams.num_spks)
sep_hr = mix_wr * torch.chunk(est_maskr, 2, dim=2)[0]
elif "parallel" in self.hparams.experiment_name:
mix_wl1 = self.hparams.EncoderL(mix[:, :, 0])
mix_wr2 = self.hparams.EncoderR(mix[:, :, 1])
mix_wl = torch.cat((mix_wl1, mix_wr2), dim=1)
est_maskl = self.hparams.MaskNetL(mix_wl)
mix_wl1 = torch.stack([mix_wl1] * self.hparams.num_spks)
mix_wr2 = torch.stack([mix_wr2] * self.hparams.num_spks)
sep_hl1 = mix_wl1 * torch.chunk(est_maskl, 2, dim=2)[0]
sep_hr2 = mix_wr2 * torch.chunk(est_maskl, 2, dim=2)[1]
mix_wl2 = self.hparams.EncoderR(mix[:, :, 0])
mix_wr1 = self.hparams.EncoderL(mix[:, :, 1])
mix_wr = torch.cat((mix_wl2, mix_wr1), dim=1)
est_maskr = self.hparams.MaskNetR(mix_wr)
mix_wl2 = torch.stack([mix_wl2] * self.hparams.num_spks)
mix_wr1 = torch.stack([mix_wr1] * self.hparams.num_spks)
sep_hl2 = mix_wl2 * torch.chunk(est_maskr, 2, dim=2)[0]
sep_hr1 = mix_wr1 * torch.chunk(est_maskr, 2, dim=2)[1]
sep_hl = sep_hl1 + sep_hr2
sep_hr = sep_hl2 + sep_hr1
else:
raise ValueError(
"Experiment name in hparams should contain one of these--'independent', 'cross', and 'parallel'."
)
# Decoding
est_sourcel = torch.cat(
[
self.hparams.DecoderL(sep_hl[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
est_sourcer = torch.cat(
[
self.hparams.DecoderR(sep_hr[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
est_source = torch.cat(
[est_sourcel.unsqueeze(-2), est_sourcer.unsqueeze(-2)], dim=-2
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, 0, 0, 0, 0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin, :]
return est_source, targets
def compute_objectives(self, predictions, targets):
"""Computes the snr loss"""
return self.hparams.loss(targets, predictions)
def fit_batch(self, batch):
"""Trains one batch"""
# Unpacking batch list
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
noise = None
if "noise" in self.hparams.experiment_name:
noise = batch.noise_sig[0]
if self.auto_mix_prec:
with autocast():
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
# hard threshold the easy dataitems
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
self.scaler.scale(loss).backward()
if self.hparams.clip_grad_norm >= 0:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm,
)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
else:
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
snt_id = batch.id
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(mixture, targets, stage)
loss = self.compute_objectives(predictions, targets)
# Manage audio file saving
if stage == sb.Stage.TEST and self.hparams.save_audio:
if hasattr(self.hparams, "n_audio_to_save"):
if self.hparams.n_audio_to_save > 0:
self.save_audio(snt_id[0], mixture, targets, predictions)
self.hparams.n_audio_to_save += -1
else:
self.save_audio(snt_id[0], mixture, targets, predictions)
return loss.mean().detach()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"snr": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"snr": stage_stats["snr"]}, min_keys=["snr"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def add_speed_perturb(self, targets, targ_lens):
"""Adds speed perturbation and random_shift to the input signals"""
min_len = -1
recombine = False
if self.hparams.use_speedperturb:
# Performing speed change (independently on each source)
new_targets = []
recombine = True
for i in range(targets.shape[-1]):
new_target = self.hparams.speedperturb(
targets[:, :, :, i], targ_lens
)
new_targets.append(new_target)
if i == 0:
min_len = new_target.shape[1]
else:
if new_target.shape[1] < min_len:
min_len = new_target.shape[1]
if self.hparams.use_rand_shift:
# Performing random_shift (independently on each source)
recombine = True
for i in range(targets.shape[-1]):
rand_shift = torch.randint(
self.hparams.min_shift, self.hparams.max_shift, (1,)
)
new_targets[i] = new_targets[i].to(self.device)
new_targets[i] = torch.roll(
new_targets[i], shifts=(rand_shift[0],), dims=1
)
# Re-combination
if recombine:
if self.hparams.use_speedperturb:
targets = torch.zeros(
targets.shape[0],
min_len,
targets.shape[-2],
targets.shape[-1],
device=targets.device,
dtype=torch.float,
)
for i, new_target in enumerate(new_targets):
targets[:, :, :, i] = new_targets[i][:, 0:min_len]
mix = targets.sum(-1)
return mix, targets
def cut_signals(self, mixture, targets):
"""This function selects a random segment of a given length within the mixture.
The corresponding targets are selected accordingly"""
randstart = torch.randint(
0,
1 + max(0, mixture.shape[1] - self.hparams.training_signal_len),
(1,),
).item()
targets = targets[
:, randstart : randstart + self.hparams.training_signal_len, :
]
mixture = mixture[
:, randstart : randstart + self.hparams.training_signal_len
]
return mixture, targets
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def cal_interaural_error(self, predictions, targets):
"""Compute ITD and ILD errors"""
EPS = 1e-8
s_target = targets[0] # [T,E,C]
s_prediction = predictions[0] # [T,E,C]
# ITD is computed with generalized cross-correlation phase transform (GCC-PHAT)
ITD_target = [
tdoa(
s_target[:, 0, i].cpu().numpy(),
s_target[:, 1, i].cpu().numpy(),
fs=self.hparams.sample_rate,
)
* 10 ** 6
for i in range(s_target.shape[-1])
]
ITD_prediction = [
tdoa(
s_prediction[:, 0, i].cpu().numpy(),
s_prediction[:, 1, i].cpu().numpy(),
fs=self.hparams.sample_rate,
)
* 10 ** 6
for i in range(s_prediction.shape[-1])
]
ITD_error1 = np.mean(
np.abs(np.array(ITD_target) - np.array(ITD_prediction))
)
ITD_error2 = np.mean(
np.abs(np.array(ITD_target) - np.array(ITD_prediction)[::-1])
)
ITD_error = min(ITD_error1, ITD_error2)
# ILD = 10 * log_10(||s_left||^2 / ||s_right||^2)
ILD_target_beforelog = torch.sum(s_target[:, 0] ** 2, dim=0) / (
torch.sum(s_target[:, 1] ** 2, dim=0) + EPS
)
ILD_target = 10 * torch.log10(ILD_target_beforelog + EPS) # [C]
ILD_prediction_beforelog = torch.sum(s_prediction[:, 0] ** 2, dim=0) / (
torch.sum(s_prediction[:, 1] ** 2, dim=0) + EPS
)
ILD_prediction = 10 * torch.log10(ILD_prediction_beforelog + EPS) # [C]
ILD_error1 = torch.mean(torch.abs(ILD_target - ILD_prediction))
ILD_error2 = torch.mean(torch.abs(ILD_target - ILD_prediction.flip(0)))
ILD_error = min(ILD_error1.item(), ILD_error2.item())
return ITD_error, ILD_error
def save_results(self, test_data):
"""This script computes the SDR and SI-SNR metrics and saves
them into a csv file"""
# Create folders where to store audio
save_file = os.path.join(self.hparams.output_folder, "test_results.csv")
# Variable init
all_snrs = []
all_snrs_i = []
all_delta_ITDs = []
all_delta_ILDs = []
csv_columns = ["snt_id", "snr", "snr_i", "delta_ITD", "delta_ILD"]
test_loader = sb.dataio.dataloader.make_dataloader(
test_data, **self.hparams.dataloader_opts
)
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop over all test sentence
with tqdm(test_loader, dynamic_ncols=True) as t:
for i, batch in enumerate(t):
# Apply Separation
mixture, mix_len = batch.mix_sig
snt_id = batch.id
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(
batch.mix_sig, targets, sb.Stage.TEST
)
# Compute SNR
snr = self.compute_objectives(predictions, targets)
# Compute SNR improvement
mixture_signal = torch.stack(
[mixture] * self.hparams.num_spks, dim=-1
)
mixture_signal = mixture_signal.to(targets.device)
snr_baseline = self.compute_objectives(
mixture_signal, targets
)
snr_i = snr - snr_baseline
# Compute ITD and ILD
delta_ITD, delta_ILD = self.cal_interaural_error(
predictions, targets
)
# Saving on a csv file
row = {
"snt_id": snt_id[0],
"snr": -snr.item(),
"snr_i": -snr_i.item(),
"delta_ITD": delta_ITD,
"delta_ILD": delta_ILD,
}
writer.writerow(row)
# Metric Accumulation
all_snrs.append(-snr.item())
all_snrs_i.append(-snr_i.item())
all_delta_ITDs.append(delta_ITD)
all_delta_ILDs.append(delta_ILD)
row = {
"snt_id": "avg",
"snr": np.array(all_snrs).mean(),
"snr_i": np.array(all_snrs_i).mean(),
"delta_ITD": np.array(all_delta_ITDs).mean(),
"delta_ILD": np.array(all_delta_ILDs).mean(),
}
writer.writerow(row)
logger.info("Mean SNR is {}".format(np.array(all_snrs).mean()))
logger.info("Mean SNRi is {}".format(np.array(all_snrs_i).mean()))
logger.info(
"Mean Delta ITD is {}".format(np.array(all_delta_ITDs).mean())
)
logger.info(
"Mean Delta ILD is {}".format(np.array(all_delta_ILDs).mean())
)
def save_audio(self, snt_id, mixture, targets, predictions):
"saves the test audio (mixture, targets, and estimated sources) on disk"
# Create outout folder
save_path = os.path.join(self.hparams.save_folder, "audio_results")
if not os.path.exists(save_path):
os.mkdir(save_path)
for ns in range(self.hparams.num_spks):
# Estimated source
signal = predictions[0, :, :, ns]
signal = signal / signal.abs().max(0).values
save_file = os.path.join(
save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.permute(1, 0).cpu(), self.hparams.sample_rate
)
# Original source
signal = targets[0, :, :, ns]
signal = signal / signal.abs().max(0).values
save_file = os.path.join(
save_path, "item{}_source{}.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.permute(1, 0).cpu(), self.hparams.sample_rate
)
# Mixture
signal = mixture[0][0, :]
signal = signal / signal.abs().max(0).values
save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id))
torchaudio.save(
save_file, signal.permute(1, 0).cpu(), self.hparams.sample_rate
)
def dataio_prep(hparams):
"""Creates data processing pipeline"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"],
replacements={"data_root": hparams["data_folder"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"],
replacements={"data_root": hparams["data_folder"]},
)
datasets = [train_data, valid_data, test_data]
# 2. Provide audio pipelines
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides("mix_sig")
def audio_pipeline_mix(mix_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
return mix_sig
@sb.utils.data_pipeline.takes("s1_wav")
@sb.utils.data_pipeline.provides("s1_sig")
def audio_pipeline_s1(s1_wav):
s1_sig = sb.dataio.dataio.read_audio(s1_wav)
return s1_sig
@sb.utils.data_pipeline.takes("s2_wav")
@sb.utils.data_pipeline.provides("s2_sig")
def audio_pipeline_s2(s2_wav):
s2_sig = sb.dataio.dataio.read_audio(s2_wav)
return s2_sig
if hparams["num_spks"] == 3:
@sb.utils.data_pipeline.takes("s3_wav")
@sb.utils.data_pipeline.provides("s3_sig")
def audio_pipeline_s3(s3_wav):
s3_sig = sb.dataio.dataio.read_audio(s3_wav)
return s3_sig
if "noise" in hparams["experiment_name"]:
@sb.utils.data_pipeline.takes("noise_wav")
@sb.utils.data_pipeline.provides("noise_sig")
def audio_pipeline_noise(noise_wav):
noise_sig = sb.dataio.dataio.read_audio(noise_wav)
return noise_sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2)
if hparams["num_spks"] == 3:
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s3)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig"]
)
elif "noise" in hparams["experiment_name"]:
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "noise_sig"]
)
else:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig"]
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Check if wsj0_tr is set with dynamic mixing
if hparams["dynamic_mixing"] and not os.path.exists(
hparams["base_folder_dm"]
):
print(
"Please, specify a valid base_folder_dm folder when using dynamic mixing"
)
sys.exit(1)
if not hparams["skip_prep"]:
if not os.path.exists(hparams["datasets_generation"]):
print("Download Datasets Generation scripts")
fetch(
filename="main.zip",
source="https://github.com/huangzj421/Binaural-WSJ0Mix/archive/refs/heads",
savedir=hparams["data_folder"],
save_filename="Binaural-WSJ0Mix-main.zip",
)
file = zipfile.ZipFile(
os.path.join(
hparams["data_folder"], "Binaural-WSJ0Mix-main.zip"
)
)
file.extractall(path=hparams["data_folder"])
sys.path.append(hparams["datasets_generation"])
if "noise" in hparams["experiment_name"]:
from create_wav_2speakers_noise import create_binaural_wsj0mix
hparams["data_folder"] = os.path.join(
hparams["data_folder"], "noise"
)
elif "reverb" in hparams["experiment_name"]:
from create_wav_2speakers_reverb import create_binaural_wsj0mix
hparams["data_folder"] = os.path.join(
hparams["data_folder"], "reverb"
)
elif hparams["num_spks"] == 2:
from create_wav_2speakers import create_binaural_wsj0mix
hparams["data_folder"] = os.path.join(
hparams["data_folder"], "2speakers"
)
else:
from create_wav_3speakers import create_binaural_wsj0mix
hparams["data_folder"] = os.path.join(
hparams["data_folder"], "3speakers"
)
if not os.path.exists(os.path.join(hparams["data_folder"], "wav8k")):
print("Generate Binaural WSJ0Mix dataset automatically")
run_on_main(
create_binaural_wsj0mix,
kwargs={
"wsj_root": hparams["wsj_root"],
"output_root": hparams["data_folder"],
"datafreqs": hparams["data_freqs"],
"datamodes": hparams["data_modes"],
},
)
# Data preparation
from prepare_data import prepare_binaural_wsj0mix # noqa
run_on_main(
prepare_binaural_wsj0mix,
kwargs={
"experiment_name": hparams["experiment_name"],
"datapath": hparams["data_folder"],
"savepath": hparams["save_folder"],
"n_spks": hparams["num_spks"],
"skip_prep": hparams["skip_prep"],
"fs": hparams["sample_rate"],
},
)
# Create dataset objects
if hparams["dynamic_mixing"]:
from dynamic_mixing import dynamic_mix_data_prep
# if the base_folder for dm is not processed, preprocess them
if "processed" not in hparams["base_folder_dm"]:
# if the processed folder already exists we just use it otherwise we do the preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
):
from recipes.WSJ0Mix.meta.preprocess_dynamic_mixing import (
resample_folder,
)
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm"],
"output_folder": os.path.normpath(
hparams["base_folder_dm"]
)
+ "_processed",
"fs": hparams["sample_rate"],
"regex": "**/*.wav",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
# Prepare dictionary with hparams for dynamic mixing
dm_hparams = {
"train_data": hparams["train_data"],
"data_folder": hparams["data_folder"],
"base_folder_dm": hparams["base_folder_dm"],
"sample_rate": hparams["sample_rate"],
"num_spks": hparams["num_spks"],
"training_signal_len": hparams["training_signal_len"],
"dataloader_opts": hparams["dataloader_opts"],
"hrtf_wav_path": hparams["hrtf_wav_path"],
}
train_data = dynamic_mix_data_prep(dm_hparams)
_, valid_data, test_data = dataio_prep(hparams)
else:
train_data, valid_data, test_data = dataio_prep(hparams)
# Load pretrained model if pretrained_separator is present in the yaml
if "pretrained_separator" in hparams:
run_on_main(hparams["pretrained_separator"].collect_files)
hparams["pretrained_separator"].load_collected()
# Brain class initialization
separator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# re-initialize the parameters if we don't use a pretrained model
if "pretrained_separator" not in hparams:
for module in separator.modules.values():
separator.reset_layer_recursively(module)
if not hparams["test_only"]:
# Training
separator.fit(
separator.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Eval
separator.evaluate(test_data, min_key="snr")
separator.save_results(test_data)
| 32,509 | 37.023392 | 113 | py |
speechbrain | speechbrain-main/recipes/KsponSpeech/ksponspeech_prepare.py | """
Data preparation.
Download: https://aihub.or.kr/aidata/105/download
Author
------
Dongwon Kim, Dongwoo Kim 2021
"""
import csv
import logging
import os
import re
import torchaudio
from speechbrain.dataio.dataio import load_pkl, merge_csvs, save_pkl
from speechbrain.utils.data_utils import get_all_files
logger = logging.getLogger(__name__)
OPT_FILE = "opt_ksponspeech_prepare.pkl"
SAMPLERATE = 16000
def prepare_ksponspeech(
data_folder,
save_folder,
tr_splits=[],
dev_splits=[],
te_splits=[],
select_n_sentences=None,
merge_lst=[],
merge_name=None,
skip_prep=False,
):
"""
This class prepares the csv files for the KsponSpeech dataset.
Download link: https://aihub.or.kr/aidata/105/download
Arguments
---------
data_folder : str
Path to the folder where the original KsponSpeech dataset is stored.
tr_splits : list
List of train splits to prepare from ['train', 'dev', 'eval_clean',
'eval_other'].
dev_splits : list
List of dev splits to prepare from ['dev'].
te_splits : list
List of test splits to prepare from ['eval_clean','eval_other'].
save_folder : str
The directory where to store the csv files.
select_n_sentences : int
Default : None
If not None, only pick this many sentences.
merge_lst : list
List of KsponSpeech splits (e.g, eval_clean, eval_other) to
merge in a singe csv file.
merge_name: str
Name of the merged csv file.
skip_prep: bool
If True, data preparation is skipped.
Example
-------
>>> data_folder = 'datasets/KsponSpeech'
>>> tr_splits = ['train']
>>> dev_splits = ['dev']
>>> te_splits = ['eval_clean']
>>> save_folder = 'KsponSpeech_prepared'
>>> prepare_ksponspeech(data_folder, save_folder, tr_splits, dev_splits, \
te_splits)
"""
if skip_prep:
return
data_folder = data_folder
splits = tr_splits + dev_splits + te_splits
save_folder = save_folder
select_n_sentences = select_n_sentences
conf = {
"select_n_sentences": select_n_sentences,
}
# Other variables
# Saving folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
save_opt = os.path.join(save_folder, OPT_FILE)
# Check if this phase is already done (if so, skip it)
if skip(splits, save_folder, conf):
logger.info("Skipping preparation, completed in previous run.")
return
else:
logger.info("Data_preparation...")
# Additional checks to make sure the data folder contains ksponspeech
check_ksponspeech_folders(data_folder, splits)
# parse trn file
all_texts = {}
for split_index in range(len(splits)):
split = splits[split_index]
dirlist = split2dirs(split)
wav_lst = []
for dir in dirlist:
wav_lst += get_all_files(
os.path.join(data_folder, dir), match_and=[".wav"]
)
trnpath = os.path.join(data_folder, split + ".trn")
text_dict = text_to_dict(trnpath)
all_texts.update(text_dict)
if select_n_sentences is not None:
n_sentences = select_n_sentences[split_index]
else:
n_sentences = len(wav_lst)
create_csv(
save_folder, wav_lst, text_dict, split, n_sentences,
)
# Merging csv file if needed
if merge_lst and merge_name is not None:
merge_files = [split_kspon + ".csv" for split_kspon in merge_lst]
merge_csvs(
data_folder=save_folder, csv_lst=merge_files, merged_csv=merge_name,
)
# saving options
save_pkl(conf, save_opt)
def create_csv(
save_folder, wav_lst, text_dict, split, select_n_sentences,
):
"""
Create the dataset csv file given a list of wav files.
Arguments
---------
save_folder : str
Location of the folder for storing the csv.
wav_lst : list
The list of wav files of a given data split.
text_dict : list
The dictionary containing the text of each sentence.
split : str
The name of the current data split.
select_n_sentences : int, optional
The number of sentences to select.
Returns
-------
None
"""
# Setting path for the csv file
csv_file = os.path.join(save_folder, split + ".csv")
# Preliminary prints
msg = "Creating csv lists in %s..." % (csv_file)
logger.info(msg)
csv_lines = [["ID", "duration", "wav", "spk_id", "wrd"]]
snt_cnt = 0
# Processing all the wav files in wav_lst
for wav_file in wav_lst:
snt_id = wav_file.split("/")[-1].replace(".wav", "")
spk_id = snt_id.split("_")[-1]
wrds = text_dict[snt_id]
duration = torchaudio.info(wav_file).num_frames / SAMPLERATE
csv_line = [
snt_id,
str(duration),
wav_file,
spk_id,
str(" ".join(wrds.split())),
]
# Appending current file to the csv_lines list
csv_lines.append(csv_line)
snt_cnt = snt_cnt + 1
if snt_cnt == select_n_sentences:
break
# Writing the csv_lines
with open(csv_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
for line in csv_lines:
csv_writer.writerow(line)
# Final print
msg = "%s successfully created!" % (csv_file)
logger.info(msg)
def skip(splits, save_folder, conf):
"""
Detect when the ksponspeech data prep can be skipped.
Arguments
---------
splits : list
A list of the splits expected in the preparation.
save_folder : str
The location of the seave directory
conf : dict
The configuration options to ensure they haven't changed.
Returns
-------
bool
if True, the preparation phase can be skipped.
if False, it must be done.
"""
# Checking csv files
skip = True
for split in splits:
if not os.path.isfile(os.path.join(save_folder, split + ".csv")):
skip = False
# Checking saved options
save_opt = os.path.join(save_folder, OPT_FILE)
if skip is True:
if os.path.isfile(save_opt):
opts_old = load_pkl(save_opt)
if opts_old == conf:
skip = True
else:
skip = False
else:
skip = False
return skip
def text_to_dict(trnpath):
"""
This converts lines of text into a dictionary-
Arguments
---------
text_lst : str
Path to the file containing the ksponspeech text transcription.
Returns
-------
dict
The dictionary containing the text transcriptions for each sentence.
"""
# Initialization of the text dictionary
text_dict = {}
# Reading all the transcription files is text_lst
with open(trnpath, "r") as f:
# Reading all line of the transcription file
for line in f:
filename, raw_script = line.split(" :: ")
file_id = filename.split("/")[-1].replace(".pcm", "")
script = normalize(raw_script)
text_dict[file_id] = script
return text_dict
def normalize(string):
"""
This function normalizes a given string according to
the normalization rule
The normalization rule removes "/" indicating filler words,
removes "+" indicating repeated words,
removes all punctuation marks,
removes non-speech symbols,
and extracts orthographic transcriptions.
Arguments
---------
string : str
The string to be normalized
Returns
-------
str
The string normalized according to the rules
"""
# extracts orthographic transcription
string = re.sub(r"\(([^)]*)\)\/\(([^)]*)\)", r"\1", string)
# removes non-speech symbols
string = re.sub(r"n/|b/|o/|l/|u/", "", string)
# removes punctuation marks
string = re.sub(r"[+*/.?!,]", "", string)
# removes extra spaces
string = re.sub(r"\s+", " ", string)
string = string.strip()
return string
def split2dirs(split):
"""
This gives directory names for a given data split
Arguments
---------
split : str
The split of ksponspeech data
Returns
-------
list
A list containing directories of the given data split
"""
if split not in ["eval_other", "eval_clean", "train", "dev"]:
raise ValueError("Unsupported data split")
if "eval" in split:
dirs = ["test/" + split]
elif split == "dev":
dirs = [
"train/KsponSpeech_05/KsponSpeech_{0:>04d}".format(num)
for num in range(621, 624)
]
elif split == "train":
dirs = (
[
"train/KsponSpeech_01/KsponSpeech_{0:>04d}".format(num)
for num in range(1, 125)
]
+ [
"train/KsponSpeech_02/KsponSpeech_{0:>04d}".format(num)
for num in range(125, 249)
]
+ [
"train/KsponSpeech_03/KsponSpeech_{0:>04d}".format(num)
for num in range(249, 373)
]
+ [
"train/KsponSpeech_04/KsponSpeech_{0:>04d}".format(num)
for num in range(373, 497)
]
+ [
"train/KsponSpeech_05/KsponSpeech_{0:>04d}".format(num)
for num in range(497, 621)
]
)
return dirs
def check_ksponspeech_folders(data_folder, splits):
"""
Check if the data folder actually contains the ksponspeech dataset.
If it does not, an error is raised.
Returns
-------
None
Raises
------
OSError
If ksponspeech is not found at the specified path.
"""
# Checking if all the splits exist
for split in splits:
if split not in ["eval_other", "eval_clean", "train", "dev"]:
raise ValueError("Unsupported data split")
if "eval" in split:
trn_folder = os.path.join(data_folder, split + ".trn")
if not os.path.exists(trn_folder):
err_msg = (
"the file %s does not exist (it is expected in the "
"ksponspeech dataset)" % trn_folder
)
raise OSError(err_msg)
elif split == "dev":
trn_folder = os.path.join(data_folder, "train.trn")
if not os.path.exists(trn_folder):
err_msg = (
"the file %s does not exist (it is expected in the "
"ksponspeech dataset)" % trn_folder
)
raise OSError(err_msg)
elif split == "train":
trn_folder = os.path.join(data_folder, "train.trn")
if not os.path.exists(trn_folder):
err_msg = (
"the file %s does not exist (it is expected in the "
"ksponspeech dataset)" % trn_folder
)
raise OSError(err_msg)
dirs = split2dirs(split)
for dir in dirs:
dir_folder = os.path.join(data_folder, dir)
if not os.path.exists(dir_folder):
err_msg = (
"the file %s does not exist (it is expected in the "
"ksponspeech dataset)" % dir_folder
)
raise OSError(err_msg)
| 11,619 | 26.213115 | 80 | py |
speechbrain | speechbrain-main/recipes/KsponSpeech/LM/train.py | #!/usr/bin/env python3
"""Recipe for training a Language Model with ksponspeech train-965.2
transcript and lm_corpus.
To run this recipe, do the following:
> pip install datasets
> python train.py hparams/<hparam_file>.yaml \
--data_folder <local_path_to_librispeech_dataset>
Authors
* Jianyuan Zhong 2021
* Ju-Chieh Chou 2020
* Dongwon Kim, Dongwoo Kim 2021
"""
import sys
import logging
from pathlib import Path
import torch
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class LM(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the sentence batches
to the output probabilities."""
batch = batch.to(self.device)
tokens_bos, _ = batch.tokens_bos
logits = self.hparams.model(tokens_bos)
pred = self.hparams.log_softmax(logits)
return pred
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss given predictions and targets."""
batch = batch.to(self.device)
tokens_eos, tokens_len = batch.tokens_eos
loss = self.hparams.compute_cost(
predictions, tokens_eos, length=tokens_len
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
(loss / self.hparams.accu_steps).backward()
if self.step % self.hparams.accu_steps == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
if isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
) or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
):
self.hparams.lr_annealing(self.optimizer)
if isinstance(
self.hparams.train_logger, sb.utils.train_logger.TensorboardLogger
):
self.hparams.train_logger.log_stats(
stats_meta={"step": self.step}, train_stats={"loss": loss},
)
return loss
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
if not (
isinstance(
self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler
)
or isinstance(
self.hparams.lr_annealing,
sb.nnet.schedulers.CyclicCosineScheduler,
)
):
old_lr, new_lr = self.hparams.lr_annealing(stage_loss)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
else:
old_lr = self.hparams.lr_annealing.current_lr
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta=stage_stats, min_keys=["loss"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined
functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
"""Define text pipeline"""
# TODO: implement text augmentations pipelines
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides("wrd", "tokens_bos", "tokens_eos")
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "wrd", "tokens_bos", "tokens_eos"],
)
return train_data, valid_data, test_datasets
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# here we create the dataloader objects as well as
# tokenization and encoding
train_data, valid_data, test_datasets = dataio_prepare(hparams)
# We download the tokenizer from HuggingFace (or elsewhere depending on
# the path given in the YAML file).
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
lm_brain = LM(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
lm_brain.fit(
lm_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# evaluation
for k in test_datasets.keys(): # keys are eval_clean, eval_other etc
lm_brain.evaluate(
test_datasets[k],
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 7,234 | 32.967136 | 80 | py |
speechbrain | speechbrain-main/recipes/KsponSpeech/ASR/transformer/train.py | #!/usr/bin/env python3
"""Recipe for training a Transformer ASR system with KsponSpeech.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with (CTC/Att joint) beamsearch
coupled with a neural language model.
To run this recipe, do the following:
> python train.py hparams/transformer.yaml
> python train.py hparams/conformer.yaml
With the default hyperparameters, the system employs
a convolutional frontend and a transformer.
The decoder is based on a Transformer decoder.
Beamsearch coupled with a Transformer language model is used
on the top of decoder probabilities.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens. Training is performed on the full
KsponSpeech dataset (965.2 h).
The best model is the average of the checkpoints from last 5 epochs.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
Authors
* Jianyuan Zhong 2020
* Mirco Ravanelli 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
* Titouan Parcollet 2021
* Dongwon Kim, Dongwoo Kim 2021
"""
import os
import sys
import torch
import logging
from pathlib import Path
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches
to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
src = self.modules.CNN(feats)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index
)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
# for the sake of efficiency, we only perform beamsearch with
# limited capacity and no LM to give user some idea of
# how the AM is doing
hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, hyps,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = [
tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
predicted_chars = [
list("".join(utt_seq)) for utt_seq in predicted_words
]
target_chars = [list("".join(wrd.split())) for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_chars, target_chars)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# check if we need to switch optimizer
# if so change the optimizer from Adam to SGD
self.check_and_reset_optimizer()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
if isinstance(
self.hparams.train_logger,
sb.utils.train_logger.TensorboardLogger,
):
self.hparams.train_logger.log_stats(
stats_meta={"step": self.step}, train_stats={"loss": loss},
)
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.wer_metric = self.hparams.error_rate_computer()
self.cer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
# report different epoch stages according current stage
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.stage_one_epochs:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
else:
lr = self.hparams.lr_sgd
steps = -1
epoch_stats = {"epoch": epoch, "lr": lr, "steps": steps}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
num_to_keep=5,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
self.cer_metric.write_stats(w)
# save the averaged checkpoint at the end of the evaluation stage
# delete the rest of the intermediate checkpoints
# ACC is set to 1.1 so checkpointer
# only keeps the averaged checkpoint
self.checkpointer.save_and_keep_only(
meta={"ACC": 1.1, "epoch": epoch},
max_keys=["ACC"],
num_to_keep=1,
)
def check_and_reset_optimizer(self):
"""reset the optimizer if training enters stage 2"""
current_epoch = self.hparams.epoch_counter.current
if not hasattr(self, "switched"):
self.switched = False
if isinstance(self.optimizer, torch.optim.SGD):
self.switched = True
if self.switched is True:
return
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
self.switched = True
def on_fit_start(self):
"""Initialize the right optimizer on the training start"""
super().on_fit_start()
# if the model is resumed from stage two, reinitialize the optimizer
current_epoch = self.hparams.epoch_counter.current
current_optimizer = self.optimizer
if current_epoch > self.hparams.stage_one_epochs:
del self.optimizer
self.optimizer = self.hparams.SGD(self.modules.parameters())
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
# do not reload the weights if training is interrupted
# right before stage 2
group = current_optimizer.param_groups[0]
if "momentum" not in group:
return
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
def on_evaluate_start(self, max_key=None, min_key=None):
"""perform checkpoint averge if needed"""
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(
max_key=max_key, min_key=min_key
)
ckpt = sb.utils.checkpoints.average_checkpoints(
ckpts, recoverable_name="model", device=self.device
)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined
functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# We get the tokenizer as we need it to encode the labels when creating
# mini-batches.
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets, tokenizer
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# 1. # Dataset prep (parsing KsponSpeech)
from ksponspeech_prepare import prepare_ksponspeech # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_ksponspeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["data_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": hparams["train_csv"],
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams)
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = hparams["tokenizer"]
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are eval_clean, eval_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k],
max_key="ACC",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 17,980 | 36.696017 | 80 | py |
speechbrain | speechbrain-main/recipes/timers-and-such/LM/train.py | #!/usr/bin/env/python3
"""
Recipe for Timers and Such LM training.
Run using:
> python train.py hparams/train.yaml
Authors
* Loren Lugosch 2020
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
# Define training procedure
class LM(sb.Brain):
def compute_forward(self, batch, stage):
"""Computations from input text to next_token prob"""
batch = batch.to(self.device)
tokens_bos, tokens_bos_lens = batch.tokens_bos
tokens_eos, tokens_eos_lens = batch.tokens_eos
# Forward pass
logits = self.hparams.net(tokens_bos)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
return p_seq, tokens_eos, tokens_eos_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL)."""
p_seq, tokens_eos, tokens_eos_lens = predictions
loss = self.hparams.seq_cost(p_seq, tokens_eos, length=tokens_eos_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"loss": stage_stats["loss"]}, min_keys=["loss"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_valid"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_real"],
replacements={"data_root": data_folder},
)
test_real_data = test_real_data.filtered_sorted(sort_key="duration")
test_synth_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_synth"],
replacements={"data_root": data_folder},
)
test_synth_data = test_synth_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_real_data, test_synth_data]
tokenizer = hparams["tokenizer"]
@sb.utils.data_pipeline.takes("transcript")
@sb.utils.data_pipeline.provides(
"transcript", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(transcript):
yield transcript
tokens_list = tokenizer.encode_as_ids(transcript)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "transcript", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_real_data, test_synth_data, tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# 1. # Dataset prep (parsing Librispeech)
from prepare import prepare_TAS # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_TAS,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"type": "decoupled",
"skip_prep": hparams["skip_prep"],
},
)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# here we create the datasets objects as well as tokenization and encoding
(
train_set,
valid_set,
test_real_set,
test_synth_set,
tokenizer,
) = dataio_prepare(hparams)
# Brain class initialization
lm_brain = LM(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
lm_brain.tokenizer = tokenizer
# Training
lm_brain.fit(
lm_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test
lm_brain.evaluate(
test_real_set, test_loader_kwargs=hparams["dataloader_opts"]
)
lm_brain.evaluate(
test_synth_set, test_loader_kwargs=hparams["dataloader_opts"]
)
| 8,041 | 32.648536 | 83 | py |
speechbrain | speechbrain-main/recipes/timers-and-such/decoupled/train.py | #!/usr/bin/env/python3
"""
Recipe for "decoupled" (speech -> ASR -> text -> NLU -> semantics) SLU.
The NLU part is trained on the ground truth transcripts, and at test time
we use the ASR to transcribe the audio and use that transcript as the input to the NLU.
Run using:
> python train.py hparams/train.yaml
Authors
* Loren Lugosch, Mirco Ravanelli 2020
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
# Define training procedure
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
if stage == sb.Stage.TRAIN or stage == sb.Stage.VALID:
# true_transcripts, true_transcript_lens = batch.transcript
true_transcripts = batch.transcript
else:
wavs, wav_lens = batch.sig
tokens_bos, tokens_bos_lens = batch.tokens_bos
# Forward pass
if stage == sb.Stage.TRAIN or stage == sb.Stage.VALID:
asr_tokens = [
self.hparams.asr_model.tokenizer.encode_as_ids(t)
for t in true_transcripts
]
else:
words, asr_tokens = self.hparams.asr_model.transcribe_batch(
wavs.detach(), wav_lens
)
# Pad examples to have same length.
asr_tokens_lens = torch.tensor([max(len(t), 1) for t in asr_tokens])
max_length = asr_tokens_lens.max().item()
for t in asr_tokens:
t += [0] * (max_length - len(t))
asr_tokens = torch.tensor([t for t in asr_tokens])
asr_tokens_lens = asr_tokens_lens.float()
asr_tokens_lens = asr_tokens_lens / asr_tokens_lens.max()
asr_tokens, asr_tokens_lens = (
asr_tokens.to(self.device),
asr_tokens_lens.to(self.device),
)
embedded_transcripts = self.hparams.input_emb(asr_tokens)
# SLU forward pass
encoder_out = self.hparams.slu_enc(embedded_transcripts)
e_in = self.hparams.output_emb(tokens_bos)
h, _ = self.hparams.dec(e_in, encoder_out, asr_tokens_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, asr_tokens_lens
else:
p_tokens, scores = self.hparams.beam_searcher(
encoder_out, asr_tokens_lens
)
return p_seq, asr_tokens_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, asr_tokens_lens = predictions
else:
p_seq, asr_tokens_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# (No ctc loss)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]))
print(" ".join(target_semantics[i]))
print("")
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
stage_stats["SER"] = self.wer_metric.summarize("SER")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["SER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"SER": stage_stats["SER"]}, min_keys=["SER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def data_io_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
# If we are testing on all the real data, including dev-real,
# we shouldn't use dev-real as the validation set.
if hparams["test_on_all_real"]:
valid_path = hparams["csv_dev_synth"]
else:
valid_path = hparams["csv_dev_real"]
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=valid_path, replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_real"],
replacements={"data_root": data_folder},
)
test_real_data = test_real_data.filtered_sorted(sort_key="duration")
test_synth_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_synth"],
replacements={"data_root": data_folder},
)
test_synth_data = test_synth_data.filtered_sorted(sort_key="duration")
all_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_all_real"],
replacements={"data_root": data_folder},
)
all_real_data = all_real_data.filtered_sorted(sort_key="duration")
datasets = [
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
]
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(semantics):
yield semantics
tokens_list = tokenizer.encode_as_ids(semantics)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
[
"id",
"sig",
"transcript",
"semantics",
"tokens_bos",
"tokens_eos",
"tokens",
],
)
return (
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
tokenizer,
)
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing TAS)
from prepare import prepare_TAS # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_TAS,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"type": "decoupled",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_set,
valid_set,
test_real_set,
test_synth_set,
all_real_set,
tokenizer,
) = data_io_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test (ALL real data)
if slu_brain.hparams.test_on_all_real:
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_all_real.txt"
)
slu_brain.evaluate(
all_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (real data)
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt"
slu_brain.evaluate(
test_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (synth data)
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_test_synth.txt"
)
slu_brain.evaluate(
test_synth_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
| 13,448 | 32.125616 | 89 | py |
speechbrain | speechbrain-main/recipes/timers-and-such/direct/train_with_wav2vec2.py | #!/usr/bin/env/python3
"""
Recipe for "direct" (speech -> semantics) SLU with wav2vec2.0_based transfer learning.
We encode input waveforms into features using a wav2vec2.0 model pretrained on ASR from HuggingFace (facebook/wav2vec2-base-960h),
then feed the features into a seq2seq model to map them to semantics.
(Adapted from the LibriSpeech seq2seq ASR recipe written by Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, and Peter Plantinga.)
Run using:
> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml
Authors
* Boumadane Abdelmoumene 2021
* Heba Abdelwahab 2021
* Lugosch Loren 2020
"""
import sys
import torch
import speechbrain as sb
import logging
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, tokens_bos_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# wav2vec forward pass
wav2vec2_out = self.modules.wav2vec2(wavs, wav_lens)
# SLU forward pass
encoder_out = self.hparams.slu_enc(wav2vec2_out)
e_in = self.hparams.output_emb(tokens_bos)
h, _ = self.hparams.dec(e_in, encoder_out, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]))
print(" ".join(target_semantics[i]))
print("")
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec2_optimizer.step()
self.optimizer.step()
self.wav2vec2_optimizer.zero_grad()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
stage_stats["SER"] = self.wer_metric.summarize("SER")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["SER"])
(
old_lr_wav2vec2,
new_lr_wav2vec2,
) = self.hparams.lr_annealing_wav2vec2(stage_stats["SER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec2_optimizer, new_lr_wav2vec2
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr": old_lr,
"wave2vec2_lr": old_lr_wav2vec2,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"SER": stage_stats["SER"]}, min_keys=["SER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec2_optimizer = self.hparams.wav2vec2_opt_class(
self.modules.wav2vec2.parameters()
)
self.optimizer = self.hparams.opt_class(self.hparams.model.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec2_opt", self.wav2vec2_optimizer
)
self.checkpointer.add_recoverable("optimizer", self.optimizer)
def zero_grad(self, set_to_none=False):
self.wav2vec2_optimizer.zero_grad(set_to_none)
self.optimizer.zero_grad(set_to_none)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
# If we are testing on all the real data, including dev-real,
# we shouldn't use dev-real as the validation set.
if hparams["test_on_all_real"]:
valid_path = hparams["csv_dev_synth"]
else:
valid_path = hparams["csv_dev_real"]
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=valid_path, replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_real"],
replacements={"data_root": data_folder},
)
test_real_data = test_real_data.filtered_sorted(sort_key="duration")
test_synth_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_synth"],
replacements={"data_root": data_folder},
)
test_synth_data = test_synth_data.filtered_sorted(sort_key="duration")
all_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_all_real"],
replacements={"data_root": data_folder},
)
all_real_data = all_real_data.filtered_sorted(sort_key="duration")
datasets = [
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
]
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(semantics):
yield semantics
tokens_list = tokenizer.encode_as_ids(semantics)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "semantics", "tokens_bos", "tokens_eos", "tokens"],
)
return (
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
tokenizer,
)
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing TAS)
from prepare import prepare_TAS # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_TAS,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"type": "direct",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_set,
valid_set,
test_real_set,
test_synth_set,
all_real_set,
tokenizer,
) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
hparams["wav2vec2"] = hparams["wav2vec2"].to(run_opts["device"])
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test (ALL real data)
if slu_brain.hparams.test_on_all_real:
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_all_real.txt"
)
slu_brain.evaluate(
all_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (real data)
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt"
slu_brain.evaluate(
test_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (synth data)
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_test_synth.txt"
)
slu_brain.evaluate(
test_synth_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
| 13,849 | 32.373494 | 130 | py |
speechbrain | speechbrain-main/recipes/timers-and-such/direct/train.py | #!/usr/bin/env/python3
"""
Recipe for "direct" (speech -> semantics) SLU with ASR-based transfer learning.
We encode input waveforms into features using a model trained on LibriSpeech,
then feed the features into a seq2seq model to map them to semantics.
(Adapted from the LibriSpeech seq2seq ASR recipe written by Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, and Peter Plantinga.)
Run using:
> python train.py hparams/train.yaml
Authors
* Loren Lugosch 2020
* Mirco Ravanelli 2020
"""
import sys
import torch
import speechbrain as sb
import logging
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
# Define training procedure
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, tokens_bos_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
tokens_bos_lens = torch.cat([tokens_bos_lens, tokens_bos_lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# ASR encoder forward pass
with torch.no_grad():
ASR_encoder_out = self.hparams.asr_model.encode_batch(
wavs.detach(), wav_lens
)
# SLU forward pass
encoder_out = self.hparams.slu_enc(ASR_encoder_out)
e_in = self.hparams.output_emb(tokens_bos)
h, _ = self.hparams.dec(e_in, encoder_out, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# (No ctc loss)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]))
print(" ".join(target_semantics[i]))
print("")
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
stage_stats["SER"] = self.wer_metric.summarize("SER")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["SER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"SER": stage_stats["SER"]}, min_keys=["SER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
# If we are testing on all the real data, including dev-real,
# we shouldn't use dev-real as the validation set.
if hparams["test_on_all_real"]:
valid_path = hparams["csv_dev_synth"]
else:
valid_path = hparams["csv_dev_real"]
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=valid_path, replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_real"],
replacements={"data_root": data_folder},
)
test_real_data = test_real_data.filtered_sorted(sort_key="duration")
test_synth_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_synth"],
replacements={"data_root": data_folder},
)
test_synth_data = test_synth_data.filtered_sorted(sort_key="duration")
all_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_all_real"],
replacements={"data_root": data_folder},
)
all_real_data = all_real_data.filtered_sorted(sort_key="duration")
datasets = [
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
]
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(semantics):
yield semantics
tokens_list = tokenizer.encode_as_ids(semantics)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "semantics", "tokens_bos", "tokens_eos", "tokens"],
)
return (
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
tokenizer,
)
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing TAS)
from prepare import prepare_TAS # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_TAS,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"type": "direct",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_set,
valid_set,
test_real_set,
test_synth_set,
all_real_set,
tokenizer,
) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test (ALL real data)
if slu_brain.hparams.test_on_all_real:
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_all_real.txt"
)
slu_brain.evaluate(
all_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (real data)
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt"
slu_brain.evaluate(
test_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (synth data)
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_test_synth.txt"
)
slu_brain.evaluate(
test_synth_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
| 13,273 | 32.605063 | 125 | py |
speechbrain | speechbrain-main/recipes/timers-and-such/multistage/train.py | #!/usr/bin/env/python3
"""
Recipe for "multistage" (speech -> ASR -> text -> NLU -> semantics) SLU.
We transcribe each minibatch using a model trained on LibriSpeech,
then feed the transcriptions into a seq2seq model to map them to semantics.
(The transcriptions could be done offline to make training faster;
the benefit of doing it online is that we can use augmentation
and sample many possible transcriptions.)
(Adapted from the LibriSpeech seq2seq ASR recipe written by Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, and Peter Pl$
Run using:
> python train.py hparams/train.yaml
Authors
* Loren Lugosch, Mirco Ravanelli 2020
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
# Define training procedure
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, tokens_bos_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
tokens_bos_lens = torch.cat([tokens_bos_lens, tokens_bos_lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# ASR forward pass
words, asr_tokens = self.hparams.asr_model.transcribe_batch(
wavs.detach(), wav_lens
)
# Pad examples to have same length.
asr_tokens_lens = torch.tensor([max(len(t), 1) for t in asr_tokens])
max_length = asr_tokens_lens.max().item()
for t in asr_tokens:
t += [0] * (max_length - len(t))
asr_tokens = torch.tensor([t for t in asr_tokens])
asr_tokens_lens = asr_tokens_lens.float()
asr_tokens_lens = asr_tokens_lens / asr_tokens_lens.max()
asr_tokens, asr_tokens_lens = (
asr_tokens.to(self.device),
asr_tokens_lens.to(self.device),
)
embedded_transcripts = self.hparams.input_emb(asr_tokens)
# SLU forward pass
encoder_out = self.hparams.slu_enc(embedded_transcripts)
e_in = self.hparams.output_emb(tokens_bos)
h, _ = self.hparams.dec(e_in, encoder_out, asr_tokens_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, asr_tokens_lens
else:
p_tokens, scores = self.hparams.beam_searcher(
encoder_out, asr_tokens_lens
)
return p_seq, asr_tokens_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, asr_tokens_lens = predictions
else:
p_seq, asr_tokens_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# (No ctc loss)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]))
print(" ".join(target_semantics[i]))
print("")
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
stage_stats["SER"] = self.wer_metric.summarize("SER")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["SER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"SER": stage_stats["SER"]}, min_keys=["SER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
# If we are testing on all the real data, including dev-real,
# we shouldn't use dev-real as the validation set.
if hparams["test_on_all_real"]:
valid_path = hparams["csv_dev_synth"]
else:
valid_path = hparams["csv_dev_real"]
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=valid_path, replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_real"],
replacements={"data_root": data_folder},
)
test_real_data = test_real_data.filtered_sorted(sort_key="duration")
test_synth_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test_synth"],
replacements={"data_root": data_folder},
)
test_synth_data = test_synth_data.filtered_sorted(sort_key="duration")
all_real_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_all_real"],
replacements={"data_root": data_folder},
)
all_real_data = all_real_data.filtered_sorted(sort_key="duration")
datasets = [
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
]
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(semantics):
yield semantics
tokens_list = tokenizer.encode_as_ids(semantics)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "semantics", "tokens_bos", "tokens_eos", "tokens"],
)
return (
train_data,
valid_data,
test_real_data,
test_synth_data,
all_real_data,
tokenizer,
)
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing TAS)
from prepare import prepare_TAS # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_TAS,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"type": "multistage",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_set,
valid_set,
test_real_set,
test_synth_set,
all_real_set,
tokenizer,
) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test (ALL real data)
if slu_brain.hparams.test_on_all_real:
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_all_real.txt"
)
slu_brain.evaluate(
all_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (real data)
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt"
slu_brain.evaluate(
test_real_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
# Test (synth data)
slu_brain.hparams.wer_file = (
hparams["output_folder"] + "/wer_test_synth.txt"
)
slu_brain.evaluate(
test_synth_set,
test_loader_kwargs=hparams["dataloader_opts"],
min_key="SER",
)
| 14,030 | 33.138686 | 117 | py |
speechbrain | speechbrain-main/recipes/VoxLingua107/lang_id/create_wds_shards.py | ################################################################################
#
# Converts the unzipped <LANG_ID>/<VIDEO---0000.000-0000.000.wav> folder
# structure of VoxLingua107 into a WebDataset format
#
# Author(s): Tanel Alumäe, Nik Vaessen
################################################################################
import json
import pathlib
import argparse
import random
import re
from collections import defaultdict
import torch
import torchaudio
import webdataset as wds
################################################################################
# methods for writing the shards
ID_SEPARATOR = "&"
def load_audio(audio_file_path: pathlib.Path) -> torch.Tensor:
t, sr = torchaudio.load(audio_file_path)
if sr != 16000:
raise ValueError("expected sampling rate of 16 kHz")
return t
def write_shards(
voxlingua_folder_path: pathlib.Path,
shards_path: pathlib.Path,
seed: int,
samples_per_shard: int,
min_dur: float,
):
"""
Parameters
----------
voxlingua_folder_path: folder where extracted voxceleb data is located
shards_path: folder to write shards of data to
seed: random seed used to initially shuffle data into shards
samples_per_shard: number of data samples to store in each shards.
"""
# make sure output folder exist
shards_path.mkdir(parents=True, exist_ok=True)
# find all audio files
audio_files = sorted([f for f in voxlingua_folder_path.rglob("*.wav")])
# create tuples (unique_sample_id, language_id, path_to_audio_file, duration)
data_tuples = []
# track statistics on data
all_language_ids = set()
sample_keys_per_language = defaultdict(list)
for f in audio_files:
# path should be
# voxlingua107_folder_path/<LANG_ID>/<VIDEO---0000.000-0000.000.wav>
m = re.match(
r"(.*/((.+)/.+---(\d\d\d\d\.\d\d\d)-(\d\d\d\d\.\d\d\d))\.wav)",
f.as_posix(),
)
if m:
loc = m.group(1)
key = m.group(2)
lang = m.group(3)
start = float(m.group(4))
end = float(m.group(5))
dur = end - start
# Period is not allowed in a WebDataset key name
key = key.replace(".", "_")
if dur > min_dur:
# store statistics
all_language_ids.add(lang)
sample_keys_per_language[lang].append(key)
t = (key, lang, loc, dur)
data_tuples.append(t)
else:
raise Exception("Unexpected wav name: " + f)
all_language_ids = sorted(all_language_ids)
# write a meta.json file which contains statistics on the data
# which will be written to shards
meta_dict = {
"language_ids": list(all_language_ids),
"sample_keys_per_language": sample_keys_per_language,
"num_data_samples": len(data_tuples),
}
with (shards_path / "meta.json").open("w") as f:
json.dump(meta_dict, f)
# shuffle the tuples so that each shard has a large variety in languages
random.seed(seed)
random.shuffle(data_tuples)
# write shards
all_keys = set()
shards_path.mkdir(exist_ok=True, parents=True)
pattern = str(shards_path / "shard") + "-%06d.tar"
with wds.ShardWriter(pattern, maxcount=samples_per_shard) as sink:
for key, language_id, f, duration in data_tuples:
# load the audio tensor
tensor = load_audio(f)
# verify key is unique
assert key not in all_keys
all_keys.add(key)
# extract language_id, youtube_id and utterance_id from key
# language_id = all_language_ids[language_id_idx]
# create sample to write
sample = {
"__key__": key,
"audio.pth": tensor,
"language_id": language_id,
}
# write sample to sink
sink.write(sample)
################################################################################
# define CLI
parser = argparse.ArgumentParser(
description="Convert VoxLingua107 to WebDataset shards"
)
parser.add_argument(
"voxlingua107_path",
type=pathlib.Path,
help="directory containing the (unzipped) VoxLingua107 dataset",
)
parser.add_argument(
"shards_path", type=pathlib.Path, help="directory to write shards to"
)
parser.add_argument(
"--seed",
type=int,
default=12345,
help="random seed used for shuffling data before writing to shard",
)
parser.add_argument(
"--samples_per_shard",
type=int,
default=5000,
help="the maximum amount of samples placed in each shard. The last shard "
"will most likely contain fewer samples.",
)
parser.add_argument(
"--min-duration",
type=float,
default=3.0,
help="Minimum duration of the audio",
)
################################################################################
# execute script
if __name__ == "__main__":
args = parser.parse_args()
write_shards(
args.voxlingua107_path,
args.shards_path,
args.seed,
args.samples_per_shard,
args.min_duration,
)
| 5,210 | 27.47541 | 81 | py |
speechbrain | speechbrain-main/recipes/VoxLingua107/lang_id/train.py | #!/usr/bin/python3
"""Recipe for training language embeddings using the VoxLingua107 Dataset.
This recipe is heavily inspired by this: https://github.com/nikvaessen/speechbrain/tree/sharded-voxceleb/my-recipes/SpeakerRec
To run this recipe, use the following command:
> python train_lang_embeddings_wds.py {hyperparameter_file}
Using your own hyperparameter file or one of the following:
hparams/train_epaca_tdnn_wds.yaml (for the ecapa+tdnn system)
Author
* Mirco Ravanelli 2020
* Hwidong Na 2020
* Nauman Dawalatabad 2020
* Tanel Alumäe 2021
* @nikvaessen
"""
import os
import sys
import random
from typing import Dict
import json
from functools import partial
import webdataset as wds
import logging
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.dataio.batch import PaddedBatch
logger = logging.getLogger(__name__)
class LanguageBrain(sb.core.Brain):
"""Class for language ID training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + speaker classifier.
Data augmentation and environmental corruption are applied to the
input speech.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
# Embeddings + speaker classifier
embeddings = self.modules.embedding_model(feats, lens)
outputs = self.modules.classifier(embeddings)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using speaker-id as label.
"""
predictions, lens = predictions
uttid = batch.id
langid = batch.lang_id_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN:
langid = torch.cat([langid] * self.n_augment, dim=0)
# breakpoint()
loss = self.hparams.compute_cost(predictions, langid.unsqueeze(1), lens)
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(
uttid, predictions, langid.unsqueeze(1), lens
)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of an epoch."""
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ErrorRate"] = self.error_metrics.summarize("average")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ErrorRate": stage_stats["ErrorRate"]},
min_keys=["ErrorRate"],
)
def dataio_prep_shards(hparams):
# load the meta info json file
with wds.gopen(hparams["train_meta"], "rb") as f:
train_meta = json.load(f)
with wds.gopen(hparams["val_meta"], "rb") as f:
val_meta = json.load(f)
# define the mapping functions in the data pipeline
snt_len_sample = int(hparams["sample_rate"] * hparams["sentence_len"])
label_encoder = sb.dataio.encoder.CategoricalEncoder()
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_iterables=[train_meta["language_ids"]],
output_key="lang_id",
)
# breakpoint()
def audio_pipeline(sample_dict: Dict, random_chunk=True):
key = sample_dict["__key__"]
language_id = sample_dict["language_id"].decode("ascii")
audio_tensor = sample_dict["audio.pth"]
# determine what part of audio sample to use
audio_tensor = audio_tensor.squeeze()
if random_chunk:
if len(audio_tensor) - snt_len_sample - 1 <= 0:
start = 0
else:
start = random.randint(
0, len(audio_tensor) - snt_len_sample - 1
)
stop = start + snt_len_sample
else:
start = 0
stop = len(audio_tensor)
sig = audio_tensor[start:stop]
# determine the language ID of the sample
lang_id_idx = label_encoder.encode_label(language_id)
return {
"sig": sig,
"lang_id_encoded": lang_id_idx,
"id": key,
}
train_data = (
wds.WebDataset(
hparams["train_shards"], cache_dir=hparams["shard_cache_dir"],
)
.repeat()
.shuffle(1000)
.decode("pil")
.map(partial(audio_pipeline, random_chunk=True))
)
logger.info(
f"Training data consist of {train_meta['num_data_samples']} samples"
)
valid_data = (
wds.WebDataset(
hparams["val_shards"], cache_dir=hparams["shard_cache_dir"],
)
.decode("pil")
.map(partial(audio_pipeline, random_chunk=False))
)
logger.info(
f"Validation data consist of {val_meta['num_data_samples']} samples"
)
return (
train_data,
valid_data,
train_meta["num_data_samples"],
val_meta["num_data_samples"],
)
if __name__ == "__main__":
logger.info("Starting training...")
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
(
train_data,
valid_data,
num_train_samples,
num_valid_samples,
) = dataio_prep_shards(hparams)
# add collate_fn to dataloader options
hparams["train_dataloader_options"]["collate_fn"] = PaddedBatch
hparams["val_dataloader_options"]["collate_fn"] = PaddedBatch
hparams["train_dataloader_options"]["looped_nominal_epoch"] = (
num_train_samples // hparams["train_dataloader_options"]["batch_size"]
)
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Brain class initialization
language_brain = LanguageBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Training
language_brain.fit(
language_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_options"],
valid_loader_kwargs=hparams["val_dataloader_options"],
)
| 8,757 | 30.390681 | 126 | py |
speechbrain | speechbrain-main/recipes/SLURP/NLU/train.py | #!/usr/bin/env/python3
"""
Text-only NLU recipe. This recipes takes the golden ASR
transcriptions and tries to estimate the semantics on
the top of that.
Authors
* Loren Lugosch, Mirco Ravanelli 2020
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
import jsonlines
import ast
import pandas as pd
# Define training procedure
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Computations from input to semantic outputs"""
batch = batch.to(self.device)
transcript_tokens, transcript_tokens_lens = batch.transcript_tokens
(
semantics_tokens_bos,
semantics_tokens_bos_lens,
) = batch.semantics_tokens_bos
embedded_transcripts = self.hparams.input_emb(transcript_tokens)
encoder_out = self.hparams.slu_enc(embedded_transcripts)
e_in = self.hparams.output_emb(semantics_tokens_bos)
h, _ = self.hparams.dec(e_in, encoder_out, transcript_tokens_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, transcript_tokens_lens
else:
p_tokens, scores = self.hparams.beam_searcher(
encoder_out, transcript_tokens_lens
)
return p_seq, transcript_tokens_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, transcript_tokens_lens = predictions
else:
p_seq, transcript_tokens_lens, predicted_tokens = predictions
ids = batch.id
(
semantics_tokens_eos,
semantics_tokens_eos_lens,
) = batch.semantics_tokens_eos
semantics_tokens, semantics_tokens_lens = batch.semantics_tokens
loss_seq = self.hparams.seq_cost(
p_seq, semantics_tokens_eos, length=semantics_tokens_eos_lens
)
# (No ctc loss)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
slu_tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
self.log_outputs(predicted_semantics, target_semantics)
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
if stage == sb.Stage.TEST:
# write to "predictions.jsonl"
with jsonlines.open(
hparams["output_folder"] + "/predictions.jsonl", mode="a"
) as writer:
for i in range(len(predicted_semantics)):
try:
dict = ast.literal_eval(
" ".join(predicted_semantics[i]).replace(
"|", ","
)
)
except SyntaxError: # need this if the output is not a valid dictionary
dict = {
"scenario": "none",
"action": "none",
"entities": [],
}
dict["file"] = id_to_file[ids[i]]
writer.write(dict)
return loss
def log_outputs(self, predicted_semantics, target_semantics):
""" TODO: log these to a file instead of stdout """
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]).replace("|", ","))
print(" ".join(target_semantics[i]).replace("|", ","))
print("")
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_valid"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
asr_tokenizer = hparams["asr_tokenizer"]
slu_tokenizer = hparams["slu_tokenizer"]
# 2. Define input pipeline:
@sb.utils.data_pipeline.takes("transcript")
@sb.utils.data_pipeline.provides("transcript", "transcript_tokens")
def transcript_pipeline(transcript):
yield transcript
transcript_tokens_list = asr_tokenizer.encode_as_ids(transcript)
transcript_tokens = torch.LongTensor(transcript_tokens_list)
yield transcript_tokens
sb.dataio.dataset.add_dynamic_item(datasets, transcript_pipeline)
# 3. Define output pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics",
"semantics_token_list",
"semantics_tokens_bos",
"semantics_tokens_eos",
"semantics_tokens",
)
def semantics_pipeline(semantics):
yield semantics
semantics_tokens_list = slu_tokenizer.encode_as_ids(semantics)
yield semantics_tokens_list
semantics_tokens_bos = torch.LongTensor(
[hparams["bos_index"]] + (semantics_tokens_list)
)
yield semantics_tokens_bos
semantics_tokens_eos = torch.LongTensor(
semantics_tokens_list + [hparams["eos_index"]]
)
yield semantics_tokens_eos
semantics_tokens = torch.LongTensor(semantics_tokens_list)
yield semantics_tokens
sb.dataio.dataset.add_dynamic_item(datasets, semantics_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
[
"id",
"transcript",
"transcript_tokens",
"semantics",
"semantics_tokens_bos",
"semantics_tokens_eos",
"semantics_tokens",
],
)
return train_data, valid_data, test_data, asr_tokenizer, slu_tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing SLURP)
from prepare import prepare_SLURP # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_SLURP,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"slu_type": "decoupled",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_set,
valid_set,
test_set,
asr_tokenizer,
slu_tokenizer,
) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
# slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test
print("Creating id_to_file mapping...")
id_to_file = {}
df = pd.read_csv(hparams["csv_test"])
for i in range(len(df)):
id_to_file[str(df.ID[i])] = df.wav[i].split("/")[-1]
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt"
slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"])
| 12,701 | 33.895604 | 96 | py |
speechbrain | speechbrain-main/recipes/SLURP/direct/train_with_wav2vec2.py | #!/usr/bin/env/python3
"""
Recipe for "direct" (speech -> semantics) SLU.
We encode input waveforms into features using the wav2vec2/HuBert model,
then feed the features into a seq2seq model to map them to semantics.
(Adapted from the LibriSpeech seq2seq ASR recipe written by Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, and Peter Plantinga.)
Run using:
> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml
Authors
* Loren Lugosch 2020
* Mirco Ravanelli 2020
* Boumadane Abdelmoumene 2021
* AbdelWahab Heba 2021
* Yingzhi Wang 2021
For more wav2vec2/HuBERT results, please see https://arxiv.org/pdf/2111.02735.pdf
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
import jsonlines
import ast
import pandas as pd
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, tokens_bos_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# encoder forward pass
wav2vec2_out = self.modules.wav2vec2(wavs, wav_lens)
# SLU forward pass
e_in = self.hparams.output_emb(tokens_bos)
h, _ = self.hparams.dec(e_in, wav2vec2_out, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(
wav2vec2_out, wav_lens
)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
self.log_outputs(predicted_semantics, target_semantics)
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
if stage == sb.Stage.TEST:
# write to "predictions.jsonl"
with jsonlines.open(
hparams["output_folder"] + "/predictions.jsonl", mode="a"
) as writer:
for i in range(len(predicted_semantics)):
try:
_dict = ast.literal_eval(
" ".join(predicted_semantics[i]).replace(
"|", ","
)
)
if not isinstance(_dict, dict):
_dict = {
"scenario": "none",
"action": "none",
"entities": [],
}
except SyntaxError: # need this if the output is not a valid dictionary
_dict = {
"scenario": "none",
"action": "none",
"entities": [],
}
_dict["file"] = id_to_file[ids[i]]
writer.write(_dict)
return loss
def log_outputs(self, predicted_semantics, target_semantics):
""" TODO: log these to a file instead of stdout """
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]).replace("|", ","))
print(" ".join(target_semantics[i]).replace("|", ","))
print("")
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec2_optimizer.step()
self.optimizer.step()
self.wav2vec2_optimizer.zero_grad()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
(
old_lr_wav2vec2,
new_lr_wav2vec2,
) = self.hparams.lr_annealing_wav2vec2(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec2_optimizer, new_lr_wav2vec2
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr": old_lr,
"wave2vec_lr": old_lr_wav2vec2,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec2_optimizer = self.hparams.wav2vec2_opt_class(
self.modules.wav2vec2.parameters()
)
self.optimizer = self.hparams.opt_class(self.hparams.model.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec2_opt", self.wav2vec2_optimizer
)
self.checkpointer.add_recoverable("optimizer", self.optimizer)
def zero_grad(self, set_to_none=False):
self.wav2vec2_optimizer.zero_grad(set_to_none)
self.optimizer.zero_grad(set_to_none)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_valid"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(semantics):
yield semantics
tokens_list = tokenizer.encode_as_ids(semantics)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "semantics", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data, tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing SLURP)
from prepare import prepare_SLURP # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_SLURP,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"slu_type": "direct",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(train_set, valid_set, test_set, tokenizer,) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Move the wav2vec2
hparams["wav2vec2"] = hparams["wav2vec2"].to(run_opts["device"])
# freeze the feature extractor part when unfreezing
if not hparams["freeze_wav2vec2"] and hparams["freeze_wav2vec2_conv"]:
hparams["wav2vec2"].model.feature_extractor._freeze_parameters()
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test
print("Creating id_to_file mapping...")
id_to_file = {}
df = pd.read_csv(hparams["csv_test"])
for i in range(len(df)):
id_to_file[str(df.ID[i])] = df.wav[i].split("/")[-1]
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt"
slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"])
| 13,958 | 35.163212 | 125 | py |
speechbrain | speechbrain-main/recipes/SLURP/direct/train.py | #!/usr/bin/env/python3
"""
Recipe for "direct" (speech -> semantics) SLU with ASR-based transfer learning.
We encode input waveforms into features using a model trained on LibriSpeech,
then feed the features into a seq2seq model to map them to semantics.
(Adapted from the LibriSpeech seq2seq ASR recipe written by Ju-Chieh Chou, Mirco Ravanelli, Abdel Heba, and Peter Plantinga.)
Run using:
> python train.py hparams/train.yaml
Authors
* Loren Lugosch 2020
* Mirco Ravanelli 2020
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
import jsonlines
import ast
import pandas as pd
# Define training procedure
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, tokens_bos_lens = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "env_corrupt"):
wavs_noise = self.hparams.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
tokens_bos_lens = torch.cat([tokens_bos_lens, tokens_bos_lens])
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# ASR encoder forward pass
with torch.no_grad():
ASR_encoder_out = self.hparams.asr_model.encode_batch(
wavs.detach(), wav_lens
)
# SLU forward pass
encoder_out = self.hparams.slu_enc(ASR_encoder_out)
e_in = self.hparams.output_emb(tokens_bos)
h, _ = self.hparams.dec(e_in, encoder_out, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.hparams.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(encoder_out, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (NLL) given predictions and targets."""
if (
stage == sb.Stage.TRAIN
and self.batch_count % show_results_every != 0
):
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.hparams, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# (No ctc loss)
loss = loss_seq
if (stage != sb.Stage.TRAIN) or (
self.batch_count % show_results_every == 0
):
# Decode token terms to words
predicted_semantics = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_semantics = [wrd.split(" ") for wrd in batch.semantics]
self.log_outputs(predicted_semantics, target_semantics)
if stage != sb.Stage.TRAIN:
self.wer_metric.append(
ids, predicted_semantics, target_semantics
)
self.cer_metric.append(
ids, predicted_semantics, target_semantics
)
if stage == sb.Stage.TEST:
# write to "predictions.jsonl"
with jsonlines.open(
hparams["output_folder"] + "/predictions.jsonl", mode="a"
) as writer:
for i in range(len(predicted_semantics)):
try:
_dict = ast.literal_eval(
" ".join(predicted_semantics[i]).replace(
"|", ","
)
)
if not isinstance(_dict, dict):
_dict = {
"scenario": "none",
"action": "none",
"entities": [],
}
except SyntaxError: # need this if the output is not a valid dictionary
_dict = {
"scenario": "none",
"action": "none",
"entities": [],
}
_dict["file"] = id_to_file[ids[i]]
writer.write(_dict)
return loss
def log_outputs(self, predicted_semantics, target_semantics):
""" TODO: log these to a file instead of stdout """
for i in range(len(target_semantics)):
print(" ".join(predicted_semantics[i]).replace("|", ","))
print(" ".join(target_semantics[i]).replace("|", ","))
print("")
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_count = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["WER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_train"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_valid"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["csv_test"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("semantics")
@sb.utils.data_pipeline.provides(
"semantics", "token_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(semantics):
yield semantics
tokens_list = tokenizer.encode_as_ids(semantics)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "semantics", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data, tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
show_results_every = 100 # plots results every N iterations
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing SLURP)
from prepare import prepare_SLURP # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_SLURP,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"train_splits": hparams["train_splits"],
"slu_type": "direct",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(train_set, valid_set, test_set, tokenizer,) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Brain class initialization
slu_brain = SLU(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
slu_brain.tokenizer = tokenizer
# Training
slu_brain.fit(
slu_brain.hparams.epoch_counter,
train_set,
valid_set,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Test
print("Creating id_to_file mapping...")
id_to_file = {}
df = pd.read_csv(hparams["csv_test"])
for i in range(len(df)):
id_to_file[str(df.ID[i])] = df.wav[i].split("/")[-1]
slu_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test_real.txt"
slu_brain.evaluate(test_set, test_loader_kwargs=hparams["dataloader_opts"])
| 13,228 | 35.144809 | 125 | py |
speechbrain | speechbrain-main/recipes/IEMOCAP/emotion_recognition/train_with_wav2vec2.py | #!/usr/bin/env python3
"""Recipe for training an emotion recognition system from speech data only using IEMOCAP.
The system classifies 4 emotions ( anger, happiness, sadness, neutrality) with wav2vec2.
To run this recipe, do the following:
> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml --data_folder /path/to/IEMOCAP_full_release
For more wav2vec2/HuBERT results, please see https://arxiv.org/pdf/2111.02735.pdf
Authors
* Yingzhi WANG 2021
"""
import os
import sys
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class EmoIdBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + emotion classifier.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
outputs = self.modules.wav2vec2(wavs, lens)
# last dim will be used for AdaptativeAVG pool
outputs = self.hparams.avg_pool(outputs, lens)
outputs = outputs.view(outputs.shape[0], -1)
outputs = self.modules.output_mlp(outputs)
outputs = self.hparams.log_softmax(outputs)
return outputs
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using speaker-id as label.
"""
emoid, _ = batch.emo_encoded
"""to meet the input form of nll loss"""
emoid = emoid.squeeze(1)
loss = self.hparams.compute_cost(predictions, emoid)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(batch.id, predictions, emoid)
return loss
def fit_batch(self, batch):
"""Trains the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec2_optimizer.step()
self.optimizer.step()
self.wav2vec2_optimizer.zero_grad()
self.optimizer.zero_grad()
return loss.detach()
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
self.loss_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.losses.nll_loss
)
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
"error_rate": self.error_metrics.summarize("average"),
}
# At the end of validation...
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stats["error_rate"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
(
old_lr_wav2vec2,
new_lr_wav2vec2,
) = self.hparams.lr_annealing_wav2vec2(stats["error_rate"])
sb.nnet.schedulers.update_learning_rate(
self.wav2vec2_optimizer, new_lr_wav2vec2
)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch, "lr": old_lr, "wave2vec_lr": old_lr_wav2vec2},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(
meta=stats, min_keys=["error_rate"]
)
# We also write statistics about test data to stdout and to logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec2_optimizer = self.hparams.wav2vec2_opt_class(
self.modules.wav2vec2.parameters()
)
self.optimizer = self.hparams.opt_class(self.hparams.model.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec2_opt", self.wav2vec2_optimizer
)
self.checkpointer.add_recoverable("optimizer", self.optimizer)
def zero_grad(self, set_to_none=False):
self.wav2vec2_optimizer.zero_grad(set_to_none)
self.optimizer.zero_grad(set_to_none)
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined
functions. We expect `prepare_mini_librispeech` to have been called before
this, so that the `train.json`, `valid.json`, and `valid.json` manifest
files are available.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Contains two keys, "train" and "valid" that correspond
to the appropriate DynamicItemDataset object.
"""
# Define audio pipeline
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
return sig
# Initialization of the label encoder. The label encoder assignes to each
# of the observed label a unique index (e.g, 'spk01': 0, 'spk02': 1, ..)
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# Define label pipeline:
@sb.utils.data_pipeline.takes("emo")
@sb.utils.data_pipeline.provides("emo", "emo_encoded")
def label_pipeline(emo):
yield emo
emo_encoded = label_encoder.encode_label_torch(emo)
yield emo_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "emo_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mappinng.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="emo",
)
return datasets
# RECIPE BEGINS!
if __name__ == "__main__":
# Reading command line arguments.
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training).
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides.
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
from iemocap_prepare import prepare_data # noqa E402
# Data preparation, to be run on only one process.
if not hparams["skip_prep"]:
sb.utils.distributed.run_on_main(
prepare_data,
kwargs={
"data_original": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"split_ratio": hparams["split_ratio"],
"different_speakers": hparams["different_speakers"],
"test_spk_id": hparams["test_spk_id"],
"seed": hparams["seed"],
},
)
# Create dataset objects "train", "valid", and "test".
datasets = dataio_prep(hparams)
hparams["wav2vec2"] = hparams["wav2vec2"].to(device=run_opts["device"])
# freeze the feature extractor part when unfreezing
if not hparams["freeze_wav2vec2"] and hparams["freeze_wav2vec2_conv"]:
hparams["wav2vec2"].model.feature_extractor._freeze_parameters()
# Initialize the Brain object to prepare for mask training.
emo_id_brain = EmoIdBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
emo_id_brain.fit(
epoch_counter=emo_id_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load the best checkpoint for evaluation
test_stats = emo_id_brain.evaluate(
test_set=datasets["test"],
min_key="error_rate",
test_loader_kwargs=hparams["dataloader_options"],
)
| 10,890 | 35.182724 | 108 | py |
speechbrain | speechbrain-main/recipes/IEMOCAP/emotion_recognition/train.py | #!/usr/bin/env python3
"""Recipe for training an emotion recognition system from speech data only using IEMOCAP.
The system classifies 4 emotions ( anger, happiness, sadness, neutrality)
with an ECAPA-TDNN model.
To run this recipe, do the following:
> python train.py hparams/train.yaml --data_folder /path/to/IEMOCAP
Authors
* Pierre-Yves Yanni 2021
"""
import os
import sys
import csv
import speechbrain as sb
import torch
from torch.utils.data import DataLoader
from enum import Enum, auto
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
class EmoIdBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + emotion classifier.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
# Embeddings + speaker classifier
embeddings = self.modules.embedding_model(feats, lens)
outputs = self.modules.classifier(embeddings)
return outputs
def fit_batch(self, batch):
"""Trains the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not finite
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using speaker-id as label.
"""
_, lens = batch.sig
emoid, _ = batch.emo_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
loss = self.hparams.compute_cost(predictions, emoid, lens)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(batch.id, predictions, emoid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
self.loss_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.losses.nll_loss
)
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
# Summarize the statistics from the stage for record-keeping.
else:
stats = {
"loss": stage_loss,
"error": self.error_metrics.summarize("average"),
}
# At the end of validation...
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
{"Epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats=stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(meta=stats, min_keys=["error"])
# We also write statistics about test data to stdout and to logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stats,
)
def output_predictions_test_set(
self,
test_set,
max_key=None,
min_key=None,
progressbar=None,
test_loader_kwargs={},
):
"""Iterate test_set and create output file (id, predictions, true values).
Arguments
---------
test_set : Dataset, DataLoader
If a DataLoader is given, it is iterated directly. Otherwise passed
to ``self.make_dataloader()``.
max_key : str
Key to use for finding best checkpoint, passed to
``on_evaluate_start()``.
min_key : str
Key to use for finding best checkpoint, passed to
``on_evaluate_start()``.
progressbar : bool
Whether to display the progress in a progressbar.
test_loader_kwargs : dict
Kwargs passed to ``make_dataloader()`` if ``test_set`` is not a
DataLoader. NOTE: ``loader_kwargs["ckpt_prefix"]`` gets
automatically overwritten to ``None`` (so that the test DataLoader
is not added to the checkpointer).
"""
if progressbar is None:
progressbar = not self.noprogressbar
if not isinstance(test_set, DataLoader):
test_loader_kwargs["ckpt_prefix"] = None
test_set = self.make_dataloader(
test_set, Stage.TEST, **test_loader_kwargs
)
save_file = os.path.join(
self.hparams.output_folder, "predictions.csv"
)
with open(save_file, "w", newline="") as csvfile:
outwriter = csv.writer(csvfile, delimiter=",")
outwriter.writerow(["id", "prediction", "true_value"])
self.on_evaluate_start(max_key=max_key, min_key=min_key) # done before
self.modules.eval()
with torch.no_grad():
for batch in tqdm(
test_set, dynamic_ncols=True, disable=not progressbar
):
self.step += 1
emo_ids = batch.id
true_vals = batch.emo_encoded.data.squeeze(dim=1).tolist()
output = self.compute_forward(batch, stage=Stage.TEST)
predictions = (
torch.argmax(output, dim=-1).squeeze(dim=1).tolist()
)
with open(save_file, "a", newline="") as csvfile:
outwriter = csv.writer(csvfile, delimiter=",")
for emo_id, prediction, true_val in zip(
emo_ids, predictions, true_vals
):
outwriter.writerow([emo_id, prediction, true_val])
# Debug mode only runs a few batches
if self.debug and self.step == self.debug_batches:
break
self.step = 0
class Stage(Enum):
"""Simple enum to track stage of experiments."""
TRAIN = auto()
VALID = auto()
TEST = auto()
def dataio_prep(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined
functions. We expect `prepare_mini_librispeech` to have been called before
this, so that the `train.json`, `valid.json`, and `valid.json` manifest
files are available.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Contains two keys, "train" and "valid" that correspond
to the appropriate DynamicItemDataset object.
"""
# Define audio pipeline
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
sig = sb.dataio.dataio.read_audio(wav)
return sig
# Initialization of the label encoder. The label encoder assignes to each
# of the observed label a unique index (e.g, 'spk01': 0, 'spk02': 1, ..)
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# Define label pipeline:
@sb.utils.data_pipeline.takes("emo")
@sb.utils.data_pipeline.provides("emo", "emo_encoded")
def label_pipeline(emo):
yield emo
emo_encoded = label_encoder.encode_label_torch(emo)
yield emo_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "emo_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mappinng.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="emo",
)
return datasets
# RECIPE BEGINS!
if __name__ == "__main__":
# Reading command line arguments.
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training).
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides.
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
from iemocap_prepare import prepare_data # noqa E402
# Data preparation, to be run on only one process.
if not hparams["skip_prep"]:
sb.utils.distributed.run_on_main(
prepare_data,
kwargs={
"data_original": hparams["data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"split_ratio": hparams["split_ratio"],
"different_speakers": hparams["different_speakers"],
"test_spk_id": hparams["test_spk_id"],
"seed": hparams["seed"],
},
)
# Create dataset objects "train", "valid", and "test".
datasets = dataio_prep(hparams)
# Initialize the Brain object to prepare for mask training.
emo_id_brain = EmoIdBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# The `fit()` method iterates the training loop, calling the methods
# necessary to update the parameters of the model. Since all objects
# with changing state are managed by the Checkpointer, training can be
# stopped at any point, and will be resumed on next call.
emo_id_brain.fit(
epoch_counter=emo_id_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load the best checkpoint for evaluation
test_stats = emo_id_brain.evaluate(
test_set=datasets["test"],
min_key="error",
test_loader_kwargs=hparams["dataloader_options"],
)
# Create output file with predictions
emo_id_brain.output_predictions_test_set(
test_set=datasets["test"],
min_key="error",
test_loader_kwargs=hparams["dataloader_options"],
)
| 13,084 | 34.080429 | 89 | py |
speechbrain | speechbrain-main/recipes/LibriMix/separation/dynamic_mixing.py | import speechbrain as sb
import numpy as np
import torch
import torchaudio
import glob
import os
from speechbrain.dataio.batch import PaddedBatch
from tqdm import tqdm
import warnings
import pyloudnorm
import random
"""
The functions to implement Dynamic Mixing For SpeechSeparation
Authors
* Samuele Cornell 2021
* Cem Subakan 2021
"""
def build_spk_hashtable_librimix(hparams):
"""
This function builds a dictionary of speaker-utterance pairs to be used in dynamic mixing
"""
libri_utterances = glob.glob(
os.path.join(hparams["base_folder_dm"], "**/*.wav"), recursive=True
)
spk_hashtable = {}
# just for one file check if the sample rate is correct
assert (
torchaudio.info(libri_utterances[0]).sample_rate
== hparams["sample_rate"]
)
for utt in tqdm(libri_utterances):
path = os.path.normpath(utt)
path_list = path.split(os.sep)
spk_id = path_list[-3]
# e.g. LibriSpeech/train-clean-100/441/128988/441-128988-0014.flac
# id of speaker is 441 utterance is 128988-0014
if spk_id not in spk_hashtable.keys():
spk_hashtable[spk_id] = [utt]
else:
spk_hashtable[spk_id].append(utt)
# calculate weights for each speaker ( len of list of utterances)
spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]
return spk_hashtable, spk_weights
def get_wham_noise_filenames(hparams):
"This function lists the WHAM! noise files to be used in dynamic mixing"
if "Libri" in hparams["data_folder"]:
# Data folder should point to Libri2Mix folder
if hparams["sample_rate"] == 8000:
noise_path = "wav8k/min/train-360/noise/"
elif hparams["sample_rate"] == 16000:
noise_path = "wav16k/min/train-360/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
else:
if hparams["sample_rate"] == 8000:
noise_path = "wav8k/min/tr/noise/"
elif hparams["sample_rate"] == 16000:
noise_path = "wav16k/min/tr/noise/"
else:
raise ValueError("Unsupported Sampling Rate")
noise_files = glob.glob(
os.path.join(hparams["data_folder"], noise_path, "*.wav")
)
return noise_files
def dynamic_mix_data_prep_librimix(hparams):
"""
Dynamic mixing for LibriMix
"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
# we build an dictionary where keys are speakers id and entries are list
# of utterances files of that speaker
print("Building the speaker hashtable for dynamic mixing")
spk_hashtable, spk_weights = build_spk_hashtable_librimix(hparams)
spk_list = [x for x in spk_hashtable.keys()]
spk_weights = [x / sum(spk_weights) for x in spk_weights]
if hparams["use_wham_noise"]:
noise_files = get_wham_noise_filenames(hparams)
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides(
"mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"
)
def audio_pipeline(
mix_wav,
): # this is dummy --> it means one epoch will be same as without dynamic mixing
"""
This audio pipeline defines the compute graph for dynamic mixing
"""
speakers = np.random.choice(
spk_list, hparams["num_spks"], replace=False, p=spk_weights
)
if hparams["use_wham_noise"]:
noise_file = np.random.choice(noise_files, 1, replace=False)
noise, fs_read = torchaudio.load(noise_file[0])
noise = noise.squeeze()
# select two speakers randomly
sources = []
spk_files = [
np.random.choice(spk_hashtable[spk], 1, False)[0]
for spk in speakers
]
minlen = min(
*[torchaudio.info(x).num_frames for x in spk_files],
hparams["training_signal_len"],
)
meter = pyloudnorm.Meter(hparams["sample_rate"])
MAX_AMP = 0.9
MIN_LOUDNESS = -33
MAX_LOUDNESS = -25
def normalize(signal, is_noise=False):
"""
This function normalizes the audio signals for loudness
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
c_loudness = meter.integrated_loudness(signal)
if is_noise:
target_loudness = random.uniform(
MIN_LOUDNESS - 5, MAX_LOUDNESS - 5
)
else:
target_loudness = random.uniform(MIN_LOUDNESS, MAX_LOUDNESS)
signal = pyloudnorm.normalize.loudness(
signal, c_loudness, target_loudness
)
# check for clipping
if np.max(np.abs(signal)) >= 1:
signal = signal * MAX_AMP / np.max(np.abs(signal))
return torch.from_numpy(signal)
for i, spk_file in enumerate(spk_files):
# select random offset
length = torchaudio.info(spk_file).num_frames
start = 0
stop = length
if length > minlen: # take a random window
start = np.random.randint(0, length - minlen)
stop = start + minlen
tmp, fs_read = torchaudio.load(
spk_file, frame_offset=start, num_frames=stop - start,
)
tmp = tmp[0].numpy()
tmp = normalize(tmp)
sources.append(tmp)
sources = torch.stack(sources)
mixture = torch.sum(sources, 0)
if hparams["use_wham_noise"]:
len_noise = len(noise)
len_mix = len(mixture)
min_len = min(len_noise, len_mix)
noise = normalize(noise.numpy(), is_noise=True)
mixture = mixture[:min_len] + noise[:min_len]
# check for clipping
max_amp_insig = mixture.abs().max().item()
if max_amp_insig > MAX_AMP:
weight = MAX_AMP / max_amp_insig
else:
weight = 1
sources = weight * sources
mixture = weight * mixture
yield mixture
for i in range(hparams["num_spks"]):
yield sources[i]
# If the number of speakers is 2, yield None for the 3rd speaker
if hparams["num_spks"] == 2:
yield None
if hparams["use_wham_noise"]:
noise = noise * weight
yield noise
else:
yield None
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)
sb.dataio.dataset.set_output_keys(
[train_data],
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
train_data = torch.utils.data.DataLoader(
train_data,
batch_size=hparams["dataloader_opts"]["batch_size"],
num_workers=hparams["dataloader_opts"]["num_workers"],
collate_fn=PaddedBatch,
worker_init_fn=lambda x: np.random.seed(
int.from_bytes(os.urandom(4), "little") + x
),
)
return train_data
| 7,257 | 30.284483 | 93 | py |
speechbrain | speechbrain-main/recipes/LibriMix/separation/train.py | #!/usr/bin/env/python3
"""Recipe for training a neural speech separation system on Libri2/3Mix datasets.
The system employs an encoder, a decoder, and a masking network.
To run this recipe, do the following:
> python train.py hparams/sepformer-libri2mix.yaml
> python train.py hparams/sepformer-libri3mix.yaml
The experiment file is flexible enough to support different neural
networks. By properly changing the parameter files, you can try
different architectures. The script supports both libri2mix and
libri3mix.
Authors
* Cem Subakan 2020
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Mirko Bronzi 2020
* Jianyuan Zhong 2020
"""
import os
import sys
import torch
import torch.nn.functional as F
import torchaudio
import speechbrain as sb
import speechbrain.nnet.schedulers as schedulers
from speechbrain.utils.distributed import run_on_main
from torch.cuda.amp import autocast
from hyperpyyaml import load_hyperpyyaml
import numpy as np
from tqdm import tqdm
import csv
import logging
logger = logging.getLogger(__name__)
# Define training procedure
class Separation(sb.Brain):
def compute_forward(self, mix, targets, stage, noise=None):
"""Forward computations from the mixture to the separated signals."""
# Unpack lists and put tensors in the right device
mix, mix_lens = mix
mix, mix_lens = mix.to(self.device), mix_lens.to(self.device)
# Convert targets to tensor
targets = torch.cat(
[targets[i][0].unsqueeze(-1) for i in range(self.hparams.num_spks)],
dim=-1,
).to(self.device)
# Add speech distortions
if stage == sb.Stage.TRAIN:
with torch.no_grad():
if self.hparams.use_speedperturb or self.hparams.use_rand_shift:
mix, targets = self.add_speed_perturb(targets, mix_lens)
mix = targets.sum(-1)
if self.hparams.use_wham_noise:
noise = noise.to(self.device)
len_noise = noise.shape[1]
len_mix = mix.shape[1]
min_len = min(len_noise, len_mix)
# add the noise
mix = mix[:, :min_len] + noise[:, :min_len]
# fix the length of targets also
targets = targets[:, :min_len, :]
if self.hparams.use_wavedrop:
mix = self.hparams.wavedrop(mix, mix_lens)
if self.hparams.limit_training_signal_len:
mix, targets = self.cut_signals(mix, targets)
# Separation
mix_w = self.hparams.Encoder(mix)
est_mask = self.hparams.MaskNet(mix_w)
mix_w = torch.stack([mix_w] * self.hparams.num_spks)
sep_h = mix_w * est_mask
# Decoding
est_source = torch.cat(
[
self.hparams.Decoder(sep_h[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, 0, 0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin, :]
return est_source, targets
def compute_objectives(self, predictions, targets):
"""Computes the si-snr loss"""
return self.hparams.loss(targets, predictions)
def fit_batch(self, batch):
"""Trains one batch"""
# Unpacking batch list
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.use_wham_noise:
noise = batch.noise_sig[0]
else:
noise = None
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
if self.auto_mix_prec:
with autocast():
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
# hard threshold the easy dataitems
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
self.scaler.scale(loss).backward()
if self.hparams.clip_grad_norm >= 0:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm,
)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
else:
predictions, targets = self.compute_forward(
mixture, targets, sb.Stage.TRAIN, noise
)
loss = self.compute_objectives(predictions, targets)
if self.hparams.threshold_byloss:
th = self.hparams.threshold
loss_to_keep = loss[loss > th]
if loss_to_keep.nelement() > 0:
loss = loss_to_keep.mean()
else:
loss = loss.mean()
if (
loss < self.hparams.loss_upper_lim and loss.nelement() > 0
): # the fix for computational problems
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
snt_id = batch.id
mixture = batch.mix_sig
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(mixture, targets, stage)
loss = self.compute_objectives(predictions, targets)
# Manage audio file saving
if stage == sb.Stage.TEST and self.hparams.save_audio:
if hasattr(self.hparams, "n_audio_to_save"):
if self.hparams.n_audio_to_save > 0:
self.save_audio(snt_id[0], mixture, targets, predictions)
self.hparams.n_audio_to_save += -1
else:
self.save_audio(snt_id[0], mixture, targets, predictions)
return loss.mean().detach()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"si-snr": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def add_speed_perturb(self, targets, targ_lens):
"""Adds speed perturbation and random_shift to the input signals"""
min_len = -1
recombine = False
if self.hparams.use_speedperturb:
# Performing speed change (independently on each source)
new_targets = []
recombine = True
for i in range(targets.shape[-1]):
new_target = self.hparams.speedperturb(
targets[:, :, i], targ_lens
)
new_targets.append(new_target)
if i == 0:
min_len = new_target.shape[-1]
else:
if new_target.shape[-1] < min_len:
min_len = new_target.shape[-1]
if self.hparams.use_rand_shift:
# Performing random_shift (independently on each source)
recombine = True
for i in range(targets.shape[-1]):
rand_shift = torch.randint(
self.hparams.min_shift, self.hparams.max_shift, (1,)
)
new_targets[i] = new_targets[i].to(self.device)
new_targets[i] = torch.roll(
new_targets[i], shifts=(rand_shift[0],), dims=1
)
# Re-combination
if recombine:
if self.hparams.use_speedperturb:
targets = torch.zeros(
targets.shape[0],
min_len,
targets.shape[-1],
device=targets.device,
dtype=torch.float,
)
for i, new_target in enumerate(new_targets):
targets[:, :, i] = new_targets[i][:, 0:min_len]
mix = targets.sum(-1)
return mix, targets
def cut_signals(self, mixture, targets):
"""This function selects a random segment of a given length withing the mixture.
The corresponding targets are selected accordingly"""
randstart = torch.randint(
0,
1 + max(0, mixture.shape[1] - self.hparams.training_signal_len),
(1,),
).item()
targets = targets[
:, randstart : randstart + self.hparams.training_signal_len, :
]
mixture = mixture[
:, randstart : randstart + self.hparams.training_signal_len
]
return mixture, targets
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def save_results(self, test_data):
"""This script computes the SDR and SI-SNR metrics and saves
them into a csv file"""
# This package is required for SDR computation
from mir_eval.separation import bss_eval_sources
# Create folders where to store audio
save_file = os.path.join(self.hparams.output_folder, "test_results.csv")
# Variable init
all_sdrs = []
all_sdrs_i = []
all_sisnrs = []
all_sisnrs_i = []
csv_columns = ["snt_id", "sdr", "sdr_i", "si-snr", "si-snr_i"]
test_loader = sb.dataio.dataloader.make_dataloader(
test_data, **self.hparams.dataloader_opts
)
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop over all test sentence
with tqdm(test_loader, dynamic_ncols=True) as t:
for i, batch in enumerate(t):
# Apply Separation
mixture, mix_len = batch.mix_sig
snt_id = batch.id
targets = [batch.s1_sig, batch.s2_sig]
if self.hparams.num_spks == 3:
targets.append(batch.s3_sig)
with torch.no_grad():
predictions, targets = self.compute_forward(
batch.mix_sig, targets, sb.Stage.TEST
)
# Compute SI-SNR
sisnr = self.compute_objectives(predictions, targets)
# Compute SI-SNR improvement
mixture_signal = torch.stack(
[mixture] * self.hparams.num_spks, dim=-1
)
mixture_signal = mixture_signal.to(targets.device)
sisnr_baseline = self.compute_objectives(
mixture_signal, targets
)
sisnr_i = sisnr - sisnr_baseline
# Compute SDR
sdr, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
predictions[0].t().detach().cpu().numpy(),
)
sdr_baseline, _, _, _ = bss_eval_sources(
targets[0].t().cpu().numpy(),
mixture_signal[0].t().detach().cpu().numpy(),
)
sdr_i = sdr.mean() - sdr_baseline.mean()
# Saving on a csv file
row = {
"snt_id": snt_id[0],
"sdr": sdr.mean(),
"sdr_i": sdr_i,
"si-snr": -sisnr.item(),
"si-snr_i": -sisnr_i.item(),
}
writer.writerow(row)
# Metric Accumulation
all_sdrs.append(sdr.mean())
all_sdrs_i.append(sdr_i.mean())
all_sisnrs.append(-sisnr.item())
all_sisnrs_i.append(-sisnr_i.item())
row = {
"snt_id": "avg",
"sdr": np.array(all_sdrs).mean(),
"sdr_i": np.array(all_sdrs_i).mean(),
"si-snr": np.array(all_sisnrs).mean(),
"si-snr_i": np.array(all_sisnrs_i).mean(),
}
writer.writerow(row)
logger.info("Mean SISNR is {}".format(np.array(all_sisnrs).mean()))
logger.info("Mean SISNRi is {}".format(np.array(all_sisnrs_i).mean()))
logger.info("Mean SDR is {}".format(np.array(all_sdrs).mean()))
logger.info("Mean SDRi is {}".format(np.array(all_sdrs_i).mean()))
def save_audio(self, snt_id, mixture, targets, predictions):
"saves the test audio (mixture, targets, and estimated sources) on disk"
# Create outout folder
save_path = os.path.join(self.hparams.save_folder, "audio_results")
if not os.path.exists(save_path):
os.mkdir(save_path)
for ns in range(self.hparams.num_spks):
# Estimated source
signal = predictions[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}hat.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Original source
signal = targets[0, :, ns]
signal = signal / signal.abs().max()
save_file = os.path.join(
save_path, "item{}_source{}.wav".format(snt_id, ns + 1)
)
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
# Mixture
signal = mixture[0][0, :]
signal = signal / signal.abs().max()
save_file = os.path.join(save_path, "item{}_mix.wav".format(snt_id))
torchaudio.save(
save_file, signal.unsqueeze(0).cpu(), self.hparams.sample_rate
)
def dataio_prep(hparams):
"""Creates data processing pipeline"""
# 1. Define datasets
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"],
replacements={"data_root": hparams["data_folder"]},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"],
replacements={"data_root": hparams["data_folder"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"],
replacements={"data_root": hparams["data_folder"]},
)
datasets = [train_data, valid_data, test_data]
# 2. Provide audio pipelines
@sb.utils.data_pipeline.takes("mix_wav")
@sb.utils.data_pipeline.provides("mix_sig")
def audio_pipeline_mix(mix_wav):
mix_sig = sb.dataio.dataio.read_audio(mix_wav)
return mix_sig
@sb.utils.data_pipeline.takes("s1_wav")
@sb.utils.data_pipeline.provides("s1_sig")
def audio_pipeline_s1(s1_wav):
s1_sig = sb.dataio.dataio.read_audio(s1_wav)
return s1_sig
@sb.utils.data_pipeline.takes("s2_wav")
@sb.utils.data_pipeline.provides("s2_sig")
def audio_pipeline_s2(s2_wav):
s2_sig = sb.dataio.dataio.read_audio(s2_wav)
return s2_sig
if hparams["num_spks"] == 3:
@sb.utils.data_pipeline.takes("s3_wav")
@sb.utils.data_pipeline.provides("s3_sig")
def audio_pipeline_s3(s3_wav):
s3_sig = sb.dataio.dataio.read_audio(s3_wav)
return s3_sig
if hparams["use_wham_noise"]:
@sb.utils.data_pipeline.takes("noise_wav")
@sb.utils.data_pipeline.provides("noise_sig")
def audio_pipeline_noise(noise_wav):
noise_sig = sb.dataio.dataio.read_audio(noise_wav)
return noise_sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_mix)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s1)
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s2)
if hparams["num_spks"] == 3:
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_s3)
if hparams["use_wham_noise"]:
print("Using the WHAM! noise in the data pipeline")
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline_noise)
if (hparams["num_spks"] == 2) and hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "noise_sig"]
)
elif (hparams["num_spks"] == 3) and hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig", "noise_sig"],
)
elif (hparams["num_spks"] == 2) and not hparams["use_wham_noise"]:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig"]
)
else:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "mix_sig", "s1_sig", "s2_sig", "s3_sig"]
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Check if wsj0_tr is set with dynamic mixing
if hparams["dynamic_mixing"] and not os.path.exists(
hparams["base_folder_dm"]
):
print(
"Please, specify a valid base_folder_dm folder when using dynamic mixing"
)
sys.exit(1)
# Data preparation
from prepare_data import prepare_librimix
run_on_main(
prepare_librimix,
kwargs={
"datapath": hparams["data_folder"],
"savepath": hparams["save_folder"],
"n_spks": hparams["num_spks"],
"skip_prep": hparams["skip_prep"],
"librimix_addnoise": hparams["use_wham_noise"],
"fs": hparams["sample_rate"],
},
)
# Create dataset objects
if hparams["dynamic_mixing"]:
from dynamic_mixing import (
dynamic_mix_data_prep_librimix as dynamic_mix_data_prep,
)
# if the base_folder for dm is not processed, preprocess them
if "processed" not in hparams["base_folder_dm"]:
# if the processed folder already exists we just use it otherwise we do the preprocessing
if not os.path.exists(
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
):
from recipes.LibriMix.meta.preprocess_dynamic_mixing import (
resample_folder,
)
print("Resampling the base folder")
run_on_main(
resample_folder,
kwargs={
"input_folder": hparams["base_folder_dm"],
"output_folder": os.path.normpath(
hparams["base_folder_dm"]
)
+ "_processed",
"fs": hparams["sample_rate"],
"regex": "**/*.flac",
},
)
# adjust the base_folder_dm path
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
else:
print(
"Using the existing processed folder on the same directory as base_folder_dm"
)
hparams["base_folder_dm"] = (
os.path.normpath(hparams["base_folder_dm"]) + "_processed"
)
dm_hparams = {
"train_data": hparams["train_data"],
"data_folder": hparams["data_folder"],
"base_folder_dm": hparams["base_folder_dm"],
"sample_rate": hparams["sample_rate"],
"num_spks": hparams["num_spks"],
"training_signal_len": hparams["training_signal_len"],
"dataloader_opts": hparams["dataloader_opts"],
}
train_data = dynamic_mix_data_prep(dm_hparams)
_, valid_data, test_data = dataio_prep(hparams)
else:
train_data, valid_data, test_data = dataio_prep(hparams)
# Load pretrained model if pretrained_separator is present in the yaml
if "pretrained_separator" in hparams:
run_on_main(hparams["pretrained_separator"].collect_files)
hparams["pretrained_separator"].load_collected(
device=run_opts["device"]
)
# Brain class initialization
separator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# re-initialize the parameters if we don't use a pretrained model
if "pretrained_separator" not in hparams:
for module in separator.modules.values():
separator.reset_layer_recursively(module)
if not hparams["test_only"]:
# Training
separator.fit(
separator.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_opts"],
valid_loader_kwargs=hparams["dataloader_opts"],
)
# Eval
separator.evaluate(test_data, min_key="si-snr")
separator.save_results(test_data)
| 25,102 | 35.754026 | 108 | py |
speechbrain | speechbrain-main/recipes/LibriMix/meta/preprocess_dynamic_mixing.py | """
This script allows to resample a folder which contains audio files.
The files are parsed recursively. An exact copy of the folder is created,
with same structure but contained resampled audio files.
Resampling is performed by using sox through torchaudio.
Author
------
Samuele Cornell, 2020
"""
import os
import argparse
from pathlib import Path
import tqdm
import torchaudio
import glob
# from oct2py import octave
from scipy import signal
import numpy as np
import torch
parser = argparse.ArgumentParser(
"utility for resampling all audio files in a folder recursively"
"It --input_folder to --output_folder and "
"resamples all audio files with specified format to --fs."
)
parser.add_argument("--input_folder", type=str, required=True)
parser.add_argument("--output_folder", type=str, required=True)
parser.add_argument(
"--fs", type=str, default=8000, help="this is the target sampling frequency"
)
parser.add_argument("--regex", type=str, default="**/*.wav")
def resample_folder(input_folder, output_folder, fs, regex):
"""Resamples the wav files within an input folder.
Arguments
---------
input_folder : path
Path of the folder to resample.
output_folder : path
Path of the output folder with the resampled data.
fs : int
Target sampling frequency.
reg_exp: str
Regular expression for search.
"""
# filedir = os.path.dirname(os.path.realpath(__file__))
# octave.addpath(filedir)
# add the matlab functions to octave dir here
files = glob.glob(os.path.join(input_folder, regex), recursive=True)
for f in tqdm.tqdm(files):
audio, fs_read = torchaudio.load(f)
audio = audio[0].numpy()
audio = signal.resample_poly(audio, fs, fs_read)
# tmp = octave.activlev(audio.tolist(), fs, "n")
# audio, _ = tmp[:-1].squeeze(), tmp[-1]
peak = np.max(np.abs(audio))
audio = audio / peak
audio = torch.from_numpy(audio).float()
relative_path = os.path.join(
Path(f).relative_to(Path(input_folder)).parent,
Path(f).relative_to(Path(input_folder)).stem
+ "_peak_{}.wav".format(peak),
)
os.makedirs(
Path(
os.path.join(
output_folder, Path(f).relative_to(Path(input_folder))
)
).parent,
exist_ok=True,
)
torchaudio.save(
os.path.join(output_folder, relative_path),
audio.reshape(1, -1),
fs,
)
if __name__ == "__main__":
args = parser.parse_args()
resample_folder(
args.input_folder, args.output_folder, int(args.fs), args.regex
)
| 2,732 | 27.175258 | 80 | py |
speechbrain | speechbrain-main/recipes/ESC50/esc50_prepare.py | """
Creates data manifest files for ESC50
If the data does not exist in the specified --data_folder, we download the data automatically.
https://urbansounddataset.weebly.com/urbansound8k.htm://github.com/karolpiczak/ESC-50
Authors:
* Cem Subakan 2022, 2023
* Francesco Paissan 2022, 2023
Adapted from the Urbansound8k recipe.
"""
import os
import shutil
import json
import logging
import torchaudio
from speechbrain.dataio.dataio import read_audio
from speechbrain.dataio.dataio import load_data_csv
from speechbrain.pretrained import fetch
logger = logging.getLogger(__name__)
ESC50_DOWNLOAD_URL = "https://github.com/karoldvl/ESC-50/archive/master.zip"
MODIFIED_METADATA_FILE_NAME = "esc50_speechbrain.csv"
ACCEPTABLE_FOLD_NUMS = [1, 2, 3, 4, 5]
def download_esc50(data_path):
"""
This function automatically downloads the ESC50 dataset to the specified data path in the data_path variable
Arguments
---------
data_path: str or Path
Directory used to save the dataset.
"""
if not os.path.exists(os.path.join(data_path, "meta")):
print(
f"ESC50 is missing. We are now downloading it. Be patient it's a 600M file. You can check {data_path}/temp_download to see the download progression"
)
temp_path = os.path.join(data_path, "temp_download")
# download the data
fetch(
"master.zip",
"https://github.com/karoldvl/ESC-50/archive/",
savedir=temp_path,
)
# unpack the .zip file
shutil.unpack_archive(os.path.join(temp_path, "master.zip"), data_path)
# move the files up to the datapath
files = os.listdir(os.path.join(data_path, "ESC-50-master"))
for fl in files:
shutil.move(os.path.join(data_path, "ESC-50-master", fl), data_path)
# remove the unused datapath
shutil.rmtree(os.path.join(data_path, "temp_download"))
shutil.rmtree(os.path.join(data_path, "ESC-50-master"))
print(f"ESC50 is downloaded in {data_path}")
def prepare_esc50(
data_folder,
audio_data_folder,
save_json_train,
save_json_valid,
save_json_test,
train_fold_nums=[1, 2, 3],
valid_fold_nums=[4],
test_fold_nums=[5],
skip_manifest_creation=False,
):
"""
Prepares the json files for the ESC50 dataset.
Prompts to download the dataset if it is not found in the `data_folder`.
Arguments
---------
data_folder : str
Path to the folder where the ESC50 dataset (including the metadata) is stored.
audio_data_folder: str
Path to the folder where the ESC50 dataset audio files are stored.
save_json_train : str
Path where the train data specification file will be saved.
save_json_valid : str
Path where the validation data specification file will be saved.
save_json_test : str
Path where the test data specification file will be saved.
train_folds: list or int (integers [1,5])
A list of integers defining which pre-defined "folds" to use for training. Must be
exclusive of valid_folds and test_folds.
valid_folds: list or int (integers [1,5])
A list of integers defining which pre-defined "folds" to use for validation. Must be
exclusive of train_folds and test_folds.
test_folds: list or int (integers [1,5])
A list of integers defining which pre-defined "folds" to use for test. Must be
exclusive of train_folds and valid_folds.
Example
-------
>>> data_folder = '/path/to/ESC-50-master'
>>> prepare_urban_sound_8k(data_folder, 'train.json', 'valid.json', 'test.json', [1,2,3], [4], [5])
"""
download_esc50(data_folder)
# Tease params to correct type if necessary
if type(train_fold_nums) is int:
train_fold_nums = [train_fold_nums]
if type(valid_fold_nums) is int:
valid_fold_nums = [valid_fold_nums]
if type(test_fold_nums) is int:
test_fold_nums = [test_fold_nums]
# Validate passed fold params
for fold_num in train_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
for fold_num in valid_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
for fold_num in test_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
# Check if train, and valid and train and test folds are exclusive
if folds_overlap(train_fold_nums, valid_fold_nums):
print(
f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!"
)
logger.info(
f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!"
)
return
if folds_overlap(train_fold_nums, test_fold_nums):
print(
f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!"
)
logger.info(
f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!"
)
return
# If the dataset doesn't exist yet, prompt the user to set or download it
# Don't need to do this every single time
if skip_manifest_creation is True:
return
# If our modified metadata file does not exist, create it
esc50_speechbrain_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "metadata/", MODIFIED_METADATA_FILE_NAME
)
if not os.path.exists(esc50_speechbrain_metadata_csv_path):
esc50_speechbrain_metadata_csv_path = create_metadata_speechbrain_file(
data_folder
)
# Read the metadata into a dictionary
# Every key of this dictionary is now one of the sound filenames, without the ".wav" suffix
metadata = load_data_csv(esc50_speechbrain_metadata_csv_path)
# List files and create manifest from list
logger.info(
f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}"
)
# Creating json files
create_json(metadata, audio_data_folder, train_fold_nums, save_json_train)
create_json(metadata, audio_data_folder, valid_fold_nums, save_json_valid)
create_json(metadata, audio_data_folder, test_fold_nums, save_json_test)
def create_json(metadata, audio_data_folder, folds_list, json_file):
"""
Creates the json file given a list of wav files.
Arguments
---------
metadata: dict
A dictionary containing the ESC50 metadata file modified for the
SpeechBrain, such that keys are IDs (which are the .wav file names without the file extension).
audio_data_folder : str or Path
Data folder that stores ESC50 samples.
folds_list : list of int
The list of folds [1,5] to include in this batch
json_file : str
The path of the output json file
"""
# Processing all the wav files in the list
json_dict = {}
for ID, sample_metadata in metadata.items():
fold_num = int(sample_metadata["fold"])
if fold_num in folds_list:
# Reading the signal (to retrieve duration in seconds)
wav_file = os.path.join(
os.path.abspath(audio_data_folder),
# "fold" + str(fold_num) + "/",
sample_metadata["filename"],
)
try:
signal = read_audio(wav_file)
file_info = torchaudio.info(wav_file)
# If we're using sox/soundfile backend, file_info will have the old type
if isinstance(
file_info, torchaudio.backend.common.AudioMetaData
):
duration = signal.shape[0] / file_info.sample_rate
else:
duration = signal.shape[0] / file_info[0].rate
# Create entry for this sample ONLY if we have successfully read-in the file using SpeechBrain/torchaudio
json_dict[ID] = {
"wav": sample_metadata["filename"],
"classID": int(sample_metadata["target"]),
"class_string": sample_metadata["class_string"],
# "salience": int(sample_metadata["salience"]),
"fold": sample_metadata["fold"],
"duration": duration,
}
except Exception:
print(
f"There was a problem reading the file:{wav_file}. Skipping duration field for it."
)
logger.exception(
f"There was a problem reading the file:{wav_file}. Skipping it."
)
# Writing the dictionary to the json file
# Need to make sure sub folder "manifest" exists, if not create it
parent_dir = os.path.dirname(json_file)
if not os.path.exists(parent_dir):
os.mkdir(parent_dir)
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, indent=2)
logger.info(f"{json_file} successfully created!")
def folds_overlap(list1, list2):
"""Returns True if any passed lists has incorrect type OR has items in common.
Arguments
----------
list1 : list
First list for comparison.
list2 : list
Second list for comparison.
"""
if (type(list1) != list) or (type(list2) != list):
return True
if any(item in list1 for item in list2):
return True
return False
def check_folders(*folders):
"""Returns False if any passed folder does not exist.
Arguments
---------
folders: list
Folders to check.
"""
for folder in folders:
if not os.path.exists(folder):
return False
return True
def full_path_to_audio_file(data_folder, slice_file_name, fold_num):
"""Get path to file given slice file name and fold number
Arguments
---------
data_foder : str
Folder that contains the dataset.
slice_file_name : str
Filename.
fold_num : int
Fold number.
Returns
------
string containing absolute path to corresponding file
"""
return os.path.join(
os.path.abspath(data_folder),
"audio/",
"fold" + str(fold_num) + "/",
slice_file_name,
)
def create_metadata_speechbrain_file(data_folder):
"""Get path to file given slice file name and fold number
Arguments
---------
data_folder : str
ESC50 data folder.
Returns
------
string containing absolute path to metadata csv file modified for SpeechBrain or None if source file not found
"""
import pandas as pd
esc50_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "meta/esc50.csv"
)
if not os.path.exists(esc50_metadata_csv_path):
return None
esc50_metadata_df = pd.read_csv(esc50_metadata_csv_path)
# SpeechBrain wants an ID column
esc50_metadata_df["ID"] = esc50_metadata_df.apply(
lambda row: removesuffix(row["filename"], ".wav"), axis=1
)
esc50_metadata_df = esc50_metadata_df.rename(
columns={"category": "class_string"}
)
esc50_speechbrain_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "meta/", MODIFIED_METADATA_FILE_NAME
)
esc50_metadata_df.to_csv(esc50_speechbrain_metadata_csv_path, index=False)
return esc50_speechbrain_metadata_csv_path
def removesuffix(somestring, suffix):
"""Removed a suffix from a string
Arguments
---------
somestring : str
Any string.
suffix : str
Suffix to be removed from somestring.
Returns
------
string resulting from suffix removed from somestring, if found, unchanged otherwise
"""
if somestring.endswith(suffix):
return somestring[: -1 * len(suffix)]
else:
return somestring
| 12,776 | 33.814714 | 160 | py |
speechbrain | speechbrain-main/recipes/ESC50/classification/train_classifier.py | #!/usr/bin/python3
"""Recipe to train a classifier on ESC50 data
We employ an encoder followed by a sound classifier.
To run this recipe, use the following command:
> python train_classifier.py hparams/cnn14.yaml --data_folder yourpath/ESC-50-master
Authors
* Cem Subakan 2022, 2023
* Francesco Paissan 2022, 2023
Based on the Urban8k recipe by
* David Whipps 2021
* Ala Eddine Limame 2021
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from esc50_prepare import prepare_esc50
from sklearn.metrics import confusion_matrix
import numpy as np
from confusion_matrix_fig import create_cm_fig
class ESC50Brain(sb.core.Brain):
"""Class for classifier training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + sound classifier.
Data augmentation and environmental corruption are applied to the
input sound.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
X_stft = self.modules.compute_stft(wavs)
X_stft_power = sb.processing.features.spectral_magnitude(
X_stft, power=self.hparams.spec_mag_power
)
if self.hparams.use_melspectra:
net_input = self.modules.compute_fbank(X_stft_power)
else:
net_input = torch.log1p(X_stft_power)
# Embeddings + sound classifier
embeddings = self.modules.embedding_model(net_input)
if embeddings.ndim == 4:
embeddings = embeddings.mean((-1, -2))
outputs = self.modules.classifier(embeddings)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using class-id as label.
"""
predictions, lens = predictions
uttid = batch.id
classid, _ = batch.class_string_encoded
loss = self.hparams.compute_cost(predictions, classid, lens)
if stage != sb.Stage.TEST:
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
# Append this batch of losses to the loss metric for easy
self.loss_metric.append(
uttid, predictions, classid, lens, reduction="batch"
)
# Confusion matrices
if stage != sb.Stage.TRAIN:
y_true = classid.cpu().detach().numpy().squeeze(-1)
y_pred = predictions.cpu().detach().numpy().argmax(-1).squeeze(-1)
if stage == sb.Stage.VALID:
confusion_matix = confusion_matrix(
y_true,
y_pred,
labels=sorted(self.hparams.label_encoder.ind2lab.keys()),
)
self.valid_confusion_matrix += confusion_matix
if stage == sb.Stage.TEST:
confusion_matix = confusion_matrix(
y_true,
y_pred,
labels=sorted(self.hparams.label_encoder.ind2lab.keys()),
)
self.test_confusion_matrix += confusion_matix
# Compute Accuracy using MetricStats
self.acc_metric.append(
uttid, predict=predictions, target=classid, lengths=lens
)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, classid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of each epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Set up statistics trackers for this stage
self.loss_metric = sb.utils.metric_stats.MetricStats(
metric=sb.nnet.losses.nll_loss
)
# Compute Accuracy using MetricStats
# Define function taking (prediction, target, length) for eval
def accuracy_value(predict, target, lengths):
"""Computes Accuracy"""
nbr_correct, nbr_total = sb.utils.Accuracy.Accuracy(
predict, target, lengths
)
acc = torch.tensor([nbr_correct / nbr_total])
return acc
self.acc_metric = sb.utils.metric_stats.MetricStats(
metric=accuracy_value, n_jobs=1
)
# Confusion matrices
if stage == sb.Stage.VALID:
self.valid_confusion_matrix = np.zeros(
shape=(self.hparams.out_n_neurons, self.hparams.out_n_neurons),
dtype=int,
)
if stage == sb.Stage.TEST:
self.test_confusion_matrix = np.zeros(
shape=(self.hparams.out_n_neurons, self.hparams.out_n_neurons),
dtype=int,
)
# Set up evaluation-only statistics trackers
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Compute/store important stats
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {
"loss": self.train_loss,
"acc": self.acc_metric.summarize("average"),
}
# Summarize Valid statistics from the stage for record-keeping.
elif stage == sb.Stage.VALID:
valid_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize(
"average"
), # "acc": self.valid_acc_metric.summarize(),
"error": self.error_metrics.summarize("average"),
}
# Summarize Test statistics from the stage for record-keeping.
else:
test_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize("average"),
"error": self.error_metrics.summarize("average"),
}
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
# Tensorboard logging
if self.hparams.use_tensorboard:
self.hparams.tensorboard_train_logger.log_stats(
stats_meta={"Epoch": epoch},
train_stats=self.train_stats,
valid_stats=valid_stats,
)
# Log confusion matrix fig to tensorboard
cm_fig = create_cm_fig(
self.valid_confusion_matrix,
display_labels=list(
self.hparams.label_encoder.ind2lab.values()
),
)
self.hparams.tensorboard_train_logger.writer.add_figure(
"Validation Confusion Matrix", cm_fig, epoch
)
# Per class accuracy from Validation confusion matrix
per_class_acc_arr = np.diag(self.valid_confusion_matrix) / np.sum(
self.valid_confusion_matrix, axis=1
)
per_class_acc_arr_str = "\n" + "\n".join(
"{:}: {:.3f}".format(
self.hparams.label_encoder.decode_ndim(class_id), class_acc
)
for class_id, class_acc in enumerate(per_class_acc_arr)
)
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=valid_stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(
meta=valid_stats, min_keys=["error"]
)
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
# Per class accuracy from Test confusion matrix
per_class_acc_arr = np.diag(self.test_confusion_matrix) / np.sum(
self.test_confusion_matrix, axis=1
)
per_class_acc_arr_str = "\n" + "\n".join(
"{:}: {:.3f}".format(class_id, class_acc)
for class_id, class_acc in enumerate(per_class_acc_arr)
)
self.hparams.train_logger.log_stats(
{
"Epoch loaded": self.hparams.epoch_counter.current,
"\n Per Class Accuracy": per_class_acc_arr_str,
"\n Confusion Matrix": "\n{:}\n".format(
self.test_confusion_matrix
),
},
test_stats=test_stats,
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_audio_folder = hparams["audio_data_folder"]
config_sample_rate = hparams["sample_rate"]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
hparams["resampler"] = torchaudio.transforms.Resample(
new_freq=config_sample_rate
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
wave_file = data_audio_folder + "/{:}".format(wav)
sig, read_sr = torchaudio.load(wave_file)
# If multi-channels, downmix it to a mono channel
sig = torch.squeeze(sig)
if len(sig.shape) > 1:
sig = torch.mean(sig, dim=0)
# Convert sample rate to required config_sample_rate
if read_sr != config_sample_rate:
# Re-initialize sampler if source file sample rate changed compared to last file
if read_sr != hparams["resampler"].orig_freq:
hparams["resampler"] = torchaudio.transforms.Resample(
orig_freq=read_sr, new_freq=config_sample_rate
)
# Resample audio
sig = hparams["resampler"].forward(sig)
sig = sig.float()
sig = sig / sig.max()
return sig
# 3. Define label pipeline:
@sb.utils.data_pipeline.takes("class_string")
@sb.utils.data_pipeline.provides("class_string", "class_string_encoded")
def label_pipeline(class_string):
yield class_string
class_string_encoded = label_encoder.encode_label_torch(class_string)
yield class_string_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "class_string_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mappinng.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="class_string",
)
return datasets, label_encoder
if __name__ == "__main__":
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Tensorboard logging
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs_folder"]
)
run_on_main(
prepare_esc50,
kwargs={
"data_folder": hparams["data_folder"],
"audio_data_folder": hparams["audio_data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"train_fold_nums": hparams["train_fold_nums"],
"valid_fold_nums": hparams["valid_fold_nums"],
"test_fold_nums": hparams["test_fold_nums"],
"skip_manifest_creation": hparams["skip_manifest_creation"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
datasets, label_encoder = dataio_prep(hparams)
hparams["label_encoder"] = label_encoder
class_labels = list(label_encoder.ind2lab.values())
print("Class Labels:", class_labels)
ESC50_brain = ESC50Brain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Load pretrained model if pretrained_separator is present in the yaml
if "pretrained_encoder" in hparams and hparams["use_pretrained"]:
run_on_main(hparams["pretrained_encoder"].collect_files)
hparams["pretrained_encoder"].load_collected()
if not hparams["test_only"]:
ESC50_brain.fit(
epoch_counter=ESC50_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Load the best checkpoint for evaluation
test_stats = ESC50_brain.evaluate(
test_set=datasets["test"],
min_key="error",
progressbar=True,
test_loader_kwargs=hparams["dataloader_options"],
)
| 15,164 | 35.454327 | 92 | py |
speechbrain | speechbrain-main/recipes/ESC50/interpret/train_l2i.py | #!/usr/bin/python3
"""This recipe to train L2I (https://arxiv.org/abs/2202.11479) to interepret audio classifiers.
Authors
* Cem Subakan 2022, 2023
* Francesco Paissan 2022, 2023
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from esc50_prepare import prepare_esc50
from speechbrain.utils.metric_stats import MetricStats
from os import makedirs
import torch.nn.functional as F
from speechbrain.processing.NMF import spectral_phase
eps = 1e-10
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_audio_folder = hparams["audio_data_folder"]
config_sample_rate = hparams["sample_rate"]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
hparams["resampler"] = torchaudio.transforms.Resample(
new_freq=config_sample_rate
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
wave_file = data_audio_folder + "/{:}".format(wav)
sig, read_sr = torchaudio.load(wave_file)
# If multi-channels, downmix it to a mono channel
sig = torch.squeeze(sig)
if len(sig.shape) > 1:
sig = torch.mean(sig, dim=0)
# Convert sample rate to required config_sample_rate
if read_sr != config_sample_rate:
# Re-initialize sampler if source file sample rate changed compared to last file
if read_sr != hparams["resampler"].orig_freq:
hparams["resampler"] = torchaudio.transforms.Resample(
orig_freq=read_sr, new_freq=config_sample_rate
)
# Resample audio
sig = hparams["resampler"].forward(sig)
sig = sig.float()
sig = sig / sig.max()
return sig
# 3. Define label pipeline:
@sb.utils.data_pipeline.takes("class_string")
@sb.utils.data_pipeline.provides("class_string", "class_string_encoded")
def label_pipeline(class_string):
yield class_string
class_string_encoded = label_encoder.encode_label_torch(class_string)
yield class_string_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "class_string_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mappinng.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="class_string",
)
return datasets, label_encoder
class InterpreterESC50Brain(sb.core.Brain):
"""Class for sound class embedding training" """
def interpret_computation_steps(self, wavs):
"""computation steps to get the interpretation spectrogram"""
# compute stft and logmel, and phase
X_stft = self.modules.compute_stft(wavs)
X_stft_phase = spectral_phase(X_stft)
X_stft_power = sb.processing.features.spectral_magnitude(
X_stft, power=self.hparams.spec_mag_power
)
if self.hparams.use_melspectra:
net_input = self.modules.compute_fbank(X_stft_power)
else:
net_input = torch.log1p(X_stft_power)
# get the classifier embeddings
temp = self.hparams.embedding_model(net_input)
if isinstance(
temp, tuple
): # if embeddings are not used for interpretation
embeddings, f_I = temp
else:
embeddings, f_I = temp, temp
# get the nmf activations
psi_out = self.modules.psi(f_I)
if isinstance(psi_out, tuple):
psi_out = psi_out[0]
psi_out = psi_out.squeeze(1).permute(0, 2, 1)
# cut the length of psi in case necessary
psi_out = psi_out[:, :, : X_stft_power.shape[1]]
# get the classifier output
if embeddings.ndim == 4:
embeddings = embeddings.mean((-1, -2))
predictions = self.hparams.classifier(embeddings).squeeze(1)
pred_cl = torch.argmax(predictions, dim=1)[0].item()
nmf_dictionary = self.hparams.nmf_decoder.return_W()
# computes time activations per component
# FROM NOW ON WE FOLLOW THE PAPER'S NOTATION
psi_out = psi_out.squeeze()
z = self.modules.theta.hard_att(psi_out).squeeze()
theta_c_w = self.modules.theta.classifier[0].weight[pred_cl]
# some might be negative, relevance of component
r_c_x = theta_c_w * z / torch.abs(theta_c_w * z).max()
# define selected components by thresholding
L = (
torch.arange(r_c_x.shape[0])
.to(r_c_x.device)[r_c_x > self.hparams.relevance_th]
.tolist()
)
# get the log power spectra, this is needed as NMF is trained on log-power spectra
X_stft_power_log = (
torch.log(X_stft_power + 1).transpose(1, 2).squeeze(0)
)
X_withselected = nmf_dictionary[:, L] @ psi_out[L, :]
Xhat = nmf_dictionary @ psi_out
X_stft_power_log = X_stft_power_log[..., : Xhat.shape[1]]
# need the eps for the denominator
eps = 1e-10
X_int = (X_withselected / (Xhat + eps)) * X_stft_power_log
# get back to the standard stft
X_int = torch.exp(X_int) - 1
return X_int, X_stft_phase, pred_cl
def interpret_sample(self, wavs, batch=None):
"""get the interpratation for a given wav file."""
# get the interpretation spectrogram, phase, and the predicted class
X_int, X_stft_phase, pred_cl = self.interpret_computation_steps(wavs)
if not (batch is None):
X_stft_phase_sb = torch.cat(
(
torch.cos(X_stft_phase).unsqueeze(-1),
torch.sin(X_stft_phase).unsqueeze(-1),
),
dim=-1,
)
temp = X_int.transpose(0, 1).unsqueeze(0).unsqueeze(-1)
X_stft_phase_sb = X_stft_phase_sb[:, : temp.shape[1], ...]
X_wpsb = temp * X_stft_phase_sb
x_int_sb = self.modules.compute_istft(X_wpsb)
# save reconstructed and original spectrograms
makedirs(
os.path.join(
self.hparams.output_folder, "audios_from_interpretation",
),
exist_ok=True,
)
current_class_ind = batch.class_string_encoded.data[0].item()
current_class_name = self.hparams.label_encoder.ind2lab[
current_class_ind
]
predicted_class_name = self.hparams.label_encoder.ind2lab[pred_cl]
torchaudio.save(
os.path.join(
self.hparams.output_folder,
"audios_from_interpretation",
f"original_tc_{current_class_name}_pc_{predicted_class_name}.wav",
),
wavs[0].unsqueeze(0).cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(
self.hparams.output_folder,
"audios_from_interpretation",
f"interpretation_tc_{current_class_name}_pc_{predicted_class_name}.wav",
),
x_int_sb.cpu(),
self.hparams.sample_rate,
)
return X_int
def overlap_test(self, batch):
"""interpration test with overlapped audio"""
wavs, _ = batch.sig
wavs = wavs.to(self.device)
s1 = wavs[0]
s2 = wavs[1]
# create the mixture with s2 being the noise (lower gain)
mix = (s1 + (s2 * 0.2)).unsqueeze(0)
# get the interpretation spectrogram, phase, and the predicted class
X_int, X_stft_phase, pred_cl = self.interpret_computation_steps(mix)
X_stft_phase_sb = torch.cat(
(
torch.cos(X_stft_phase).unsqueeze(-1),
torch.sin(X_stft_phase).unsqueeze(-1),
),
dim=-1,
)
temp = X_int.transpose(0, 1).unsqueeze(0).unsqueeze(-1)
X_stft_phase_sb = X_stft_phase_sb[:, : temp.shape[1], :, :]
X_wpsb = temp * X_stft_phase_sb
x_int_sb = self.modules.compute_istft(X_wpsb)
# save reconstructed and original spectrograms
# epoch = self.hparams.epoch_counter.current
current_class_ind = batch.class_string_encoded.data[0].item()
current_class_name = self.hparams.label_encoder.ind2lab[
current_class_ind
]
predicted_class_name = self.hparams.label_encoder.ind2lab[pred_cl]
noise_class_ind = batch.class_string_encoded.data[1].item()
noise_class_name = self.hparams.label_encoder.ind2lab[noise_class_ind]
out_folder = os.path.join(
self.hparams.output_folder,
"overlap_test",
f"tc_{current_class_name}_nc_{noise_class_name}_pc_{predicted_class_name}",
)
makedirs(
out_folder, exist_ok=True,
)
torchaudio.save(
os.path.join(out_folder, "mixture.wav"),
mix.cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "source.wav"),
s1.unsqueeze(0).cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "noise.wav"),
s2.unsqueeze(0).cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "interpretation.wav"),
x_int_sb.cpu(),
self.hparams.sample_rate,
)
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + sound classifier.
Data augmentation and environmental corruption are applied to the
input sound.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
X_stft = self.modules.compute_stft(wavs)
X_stft_power = sb.processing.features.spectral_magnitude(
X_stft, power=self.hparams.spec_mag_power
)
if self.hparams.use_melspectra:
net_input = self.modules.compute_fbank(X_stft_power)
else:
net_input = torch.log1p(X_stft_power)
# Embeddings + sound classifier
temp = self.hparams.embedding_model(net_input)
if isinstance(temp, tuple):
embeddings, f_I = temp
else:
embeddings, f_I = temp, temp
if embeddings.ndim == 4:
embeddings = embeddings.mean((-1, -2))
predictions = self.hparams.classifier(embeddings).squeeze(1)
psi_out = self.modules.psi(f_I) # generate nmf activations
if isinstance(psi_out, tuple):
psi_out = psi_out[0]
psi_out = psi_out.squeeze(1).permute(0, 2, 1)
# cut the length of psi
psi_out = psi_out[:, :, : X_stft_power.shape[1]]
# generate log-mag spectrogram
reconstructed = self.hparams.nmf_decoder(psi_out).transpose(1, 2)
# generate classifications from time activations
theta_out = self.modules.theta(psi_out)
if stage == sb.Stage.VALID:
# save some samples
if (
self.hparams.epoch_counter.current
% self.hparams.interpret_period
) == 0 and self.hparams.save_interpretations:
wavs = wavs[0].unsqueeze(0)
self.interpret_sample(wavs, batch)
self.overlap_test(batch)
return (reconstructed, psi_out), (predictions, theta_out)
def compute_objectives(self, pred, batch, stage):
"""Computes the loss using class-id as label."""
(
(reconstructions, time_activations),
(classification_out, theta_out),
) = pred
uttid = batch.id
classid, _ = batch.class_string_encoded
batch = batch.to(self.device)
wavs, lens = batch.sig
X_stft = self.modules.compute_stft(wavs).to(self.device)
X_stft_power = sb.processing.features.spectral_magnitude(
X_stft, power=self.hparams.spec_mag_power
)
X_stft_logpower = torch.log1p(X_stft_power)
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.VALID or stage == sb.Stage.TEST:
self.top_3_fidelity.append(batch.id, theta_out, classification_out)
self.input_fidelity.append(batch.id, wavs, classification_out)
self.faithfulness.append(batch.id, wavs, classification_out)
self.acc_metric.append(
uttid, predict=classification_out, target=classid, length=lens
)
X_stft_logpower = X_stft_logpower[:, : reconstructions.shape[1], :]
loss_nmf = ((reconstructions - X_stft_logpower) ** 2).mean()
loss_nmf = self.hparams.alpha * loss_nmf
loss_nmf += self.hparams.beta * (time_activations).abs().mean()
if stage != sb.Stage.TEST:
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
self.last_batch = batch
self.batch_to_plot = (reconstructions.clone(), X_stft_logpower.clone())
theta_out = -torch.log(theta_out)
loss_fdi = (F.softmax(classification_out, dim=1) * theta_out).mean()
return loss_nmf + loss_fdi
def on_stage_start(self, stage, epoch=None):
def accuracy_value(predict, target, length):
"""Computes Accuracy"""
nbr_correct, nbr_total = sb.utils.Accuracy.Accuracy(
predict.unsqueeze(1), target, length
)
acc = torch.tensor([nbr_correct / nbr_total])
return acc
@torch.no_grad()
def compute_fidelity(theta_out, predictions):
"""Computes top-`k` fidelity of interpreter."""
predictions = F.softmax(predictions, dim=1)
pred_cl = torch.argmax(predictions, dim=1)
k_top = torch.topk(theta_out, k=self.hparams.k_fidelity, dim=1)[1]
# 1 element for each sample in batch, is 0 if pred_cl is in top k
temp = (k_top - pred_cl.unsqueeze(1) == 0).sum(1)
return temp
@torch.no_grad()
def compute_inp_fidelity(wavs, predictions):
"""Computes top-1 input fidelity of interpreter."""
X2 = self.interpret_sample(wavs[0].unsqueeze(0)).unsqueeze(0)
for (i, wav) in enumerate(wavs[1:, ...]):
X2 = torch.cat(
(X2, self.interpret_sample(wav.unsqueeze(0)).unsqueeze(0)),
axis=0,
)
if self.hparams.use_melspectra:
net_input = self.modules.compute_fbank(X2.transpose(1, 2))
else:
net_input = torch.log1p(X2.transpose(1, 2))
temp = self.hparams.embedding_model(net_input)
if isinstance(temp, tuple):
embeddings = temp[0]
else:
embeddings = temp
if embeddings.ndim == 4:
embeddings = embeddings.mean((-1, -2))
predictions_interpret = self.hparams.classifier(embeddings).squeeze(
1
)
predictions = F.softmax(predictions, dim=1)
predictions_interpret = F.softmax(predictions_interpret, dim=1)
pred_cl = torch.argmax(predictions, dim=1)
k_top = torch.argmax(predictions_interpret, dim=1)
return (pred_cl == k_top).float()
@torch.no_grad()
def compute_faithfulness(wavs, predictions):
X2 = self.interpret_sample(wavs[0].unsqueeze(0)).unsqueeze(0)
for (i, wav) in enumerate(wavs[1:, ...]):
X2 = torch.cat(
(X2, self.interpret_sample(wav.unsqueeze(0)).unsqueeze(0)),
axis=0,
)
if self.hparams.use_melspectra:
net_input = self.modules.compute_fbank(X2.transpose(1, 2))
else:
net_input = torch.log1p(X2.transpose(1, 2))
temp = self.hparams.embedding_model(net_input)
if isinstance(temp, tuple):
embeddings = temp[0]
else:
embeddings = temp
if embeddings.ndim == 4:
embeddings = embeddings.mean((-1, -2))
predictions_masked = self.hparams.classifier(embeddings).squeeze(1)
predictions = F.softmax(predictions, dim=1)
predictions_masked = F.softmax(predictions_masked, dim=1)
# get the prediction indices
pred_cl = predictions.argmax(dim=1, keepdim=True)
# get the corresponding output probabilities
predictions_selected = torch.gather(
predictions, dim=1, index=pred_cl
)
predictions_masked_selected = torch.gather(
predictions_masked, dim=1, index=pred_cl
)
faithfulness = (
predictions_selected - predictions_masked_selected
).squeeze(1)
return faithfulness
self.top_3_fidelity = MetricStats(metric=compute_fidelity)
self.input_fidelity = MetricStats(metric=compute_inp_fidelity)
self.faithfulness = MetricStats(metric=compute_faithfulness)
self.acc_metric = sb.utils.metric_stats.MetricStats(
metric=accuracy_value, n_jobs=1
)
return super().on_stage_start(stage, epoch)
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Plots in subplots the values of `self.batch_to_plot` and saves the
plot to the experiment folder. `self.hparams.output_folder`"""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {
"loss": self.train_loss,
"acc": self.acc_metric.summarize("average"),
}
if stage == sb.Stage.VALID:
current_fid = self.top_3_fidelity.summarize("average")
current_inpfid = self.input_fidelity.summarize("average")
old_lr, new_lr = self.hparams.lr_annealing(
[self.optimizer], epoch, -current_fid
)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
valid_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize("average"),
"top-3_fid": current_fid,
"input-fidelity": current_inpfid,
"faithfulness_median": torch.Tensor(
self.faithfulness.scores
).median(),
"faithfulness_mean": torch.Tensor(
self.faithfulness.scores
).mean(),
}
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=valid_stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(
meta=valid_stats, max_keys=["top-3_fid"]
)
if stage == sb.Stage.TEST:
current_fid = self.top_3_fidelity.summarize("average")
current_inpfid = self.input_fidelity.summarize("average")
test_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize("average"),
"top-3_fid": current_fid,
"input-fidelity": current_inpfid,
"faithfulness_median": torch.Tensor(
self.faithfulness.scores
).median(),
"faithfulness_mean": torch.Tensor(
self.faithfulness.scores
).mean(),
}
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch}, test_stats=test_stats
)
if __name__ == "__main__":
# # This flag enables the inbuilt cudnn auto-tuner
# torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# classifier is fixed here
hparams["embedding_model"].eval()
hparams["classifier"].eval()
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Tensorboard logging
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs_folder"]
)
run_on_main(
prepare_esc50,
kwargs={
"data_folder": hparams["data_folder"],
"audio_data_folder": hparams["audio_data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"train_fold_nums": hparams["train_fold_nums"],
"valid_fold_nums": hparams["valid_fold_nums"],
"test_fold_nums": hparams["test_fold_nums"],
"skip_manifest_creation": hparams["skip_manifest_creation"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
datasets, label_encoder = dataio_prep(hparams)
hparams["label_encoder"] = label_encoder
class_labels = list(label_encoder.ind2lab.values())
print("Class Labels:", class_labels)
Interpreter_brain = InterpreterESC50Brain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
if "pretrained_esc50" in hparams and hparams["use_pretrained"]:
run_on_main(hparams["pretrained_esc50"].collect_files)
hparams["pretrained_esc50"].load_collected()
# transfer the frozen parts to the model to the device
hparams["embedding_model"].to(hparams["device"])
hparams["classifier"].to(hparams["device"])
hparams["nmf_decoder"].to(hparams["device"])
hparams["embedding_model"].eval()
if not hparams["test_only"]:
Interpreter_brain.fit(
epoch_counter=Interpreter_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
else:
# Load the best checkpoint for evaluation
test_stats = Interpreter_brain.evaluate(
test_set=datasets["test"],
min_key="error",
progressbar=True,
test_loader_kwargs=hparams["dataloader_options"],
)
| 24,425 | 35.026549 | 95 | py |
speechbrain | speechbrain-main/recipes/ESC50/interpret/train_piq.py | #!/usr/bin/python3
"""This recipe to train PIQ to interepret audio classifiers.
Authors
* Cem Subakan 2022, 2023
* Francesco Paissan 2022, 2023
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from esc50_prepare import prepare_esc50
from speechbrain.utils.metric_stats import MetricStats
from os import makedirs
import torch.nn.functional as F
from speechbrain.processing.NMF import spectral_phase
import matplotlib.pyplot as plt
eps = 1e-10
class InterpreterESC50Brain(sb.core.Brain):
"""Class for sound class embedding training" """
def invert_stft_with_phase(self, X_int, X_stft_phase):
"""Inverts STFT spectra given phase."""
X_stft_phase_sb = torch.cat(
(
torch.cos(X_stft_phase).unsqueeze(-1),
torch.sin(X_stft_phase).unsqueeze(-1),
),
dim=-1,
)
X_stft_phase_sb = X_stft_phase_sb[:, : X_int.shape[1], :, :]
if X_int.ndim == 3:
X_int = X_int.unsqueeze(-1)
X_wpsb = X_int * X_stft_phase_sb
x_int_sb = self.modules.compute_istft(X_wpsb)
return x_int_sb
def preprocess(self, wavs):
"""Pre-process wavs."""
X_stft = self.modules.compute_stft(wavs)
X_stft_power = sb.processing.features.spectral_magnitude(
X_stft, power=self.hparams.spec_mag_power
)
X_stft_logpower = torch.log1p(X_stft_power)
return X_stft_logpower, X_stft, X_stft_power
def classifier_forward(self, X_stft_logpower):
hcat = self.hparams.embedding_model(X_stft_logpower)
embeddings = hcat.mean((-1, -2))
predictions = self.hparams.classifier(embeddings).squeeze(1)
class_pred = predictions.argmax(1)
return hcat, embeddings, predictions, class_pred
def interpret_computation_steps(self, wavs, print_probability=False):
"""computation steps to get the interpretation spectrogram"""
X_stft_logpower, X_stft, X_stft_power = self.preprocess(wavs)
X_stft_phase = spectral_phase(X_stft)
hcat, embeddings, predictions, class_pred = self.classifier_forward(
X_stft_logpower
)
if print_probability:
predictions = F.softmax(predictions, dim=1)
class_prob = predictions[0, class_pred].item()
print(f"classifier_prob: {class_prob}")
if self.hparams.use_vq:
xhat, hcat, _ = self.modules.psi(hcat, class_pred)
else:
xhat = self.modules.psi.decoder(hcat)
xhat = xhat.squeeze(1)
Tmax = xhat.shape[1]
if self.hparams.use_mask_output:
xhat = F.sigmoid(xhat)
X_int = xhat * X_stft_logpower[:, :Tmax, :]
else:
xhat = F.softplus(xhat)
th = xhat.max() * self.hparams.mask_th
X_int = (xhat > th) * X_stft_logpower[:, :Tmax, :]
return X_int, X_stft_phase, class_pred, X_stft_logpower, xhat
def interpret_sample(self, wavs, batch=None):
"""get the interpratation for a given wav file."""
# get the interpretation spectrogram, phase, and the predicted class
X_int, X_stft_phase, pred_cl, _, _ = self.interpret_computation_steps(
wavs
)
X_stft_phase = X_stft_phase[:, : X_int.shape[1], :]
if not (batch is None):
x_int_sb = self.invert_stft_with_phase(X_int, X_stft_phase)
# save reconstructed and original spectrograms
makedirs(
os.path.join(
self.hparams.output_folder, "audios_from_interpretation",
),
exist_ok=True,
)
current_class_ind = batch.class_string_encoded.data[0].item()
current_class_name = self.hparams.label_encoder.ind2lab[
current_class_ind
]
predicted_class_name = self.hparams.label_encoder.ind2lab[
pred_cl.item()
]
torchaudio.save(
os.path.join(
self.hparams.output_folder,
"audios_from_interpretation",
f"original_tc_{current_class_name}_pc_{predicted_class_name}.wav",
),
wavs[0].unsqueeze(0).cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(
self.hparams.output_folder,
"audios_from_interpretation",
f"interpretation_tc_{current_class_name}_pc_{predicted_class_name}.wav",
),
x_int_sb.cpu(),
self.hparams.sample_rate,
)
return X_int
def overlap_test(self, batch):
"""interpration test with overlapped audio"""
wavs, _ = batch.sig
wavs = wavs.to(self.device)
if wavs.shape[0] <= 1:
return
s1 = wavs[0]
s1 = s1 / s1.max()
s2 = wavs[1]
s2 = s2 / s2.max()
# create the mixture with s2 being the noise (lower gain)
mix = (s1 * 0.8 + (s2 * 0.2)).unsqueeze(0)
mix = mix / mix.max()
# get the interpretation spectrogram, phase, and the predicted class
(
X_int,
X_stft_phase,
pred_cl,
X_mix,
mask,
) = self.interpret_computation_steps(mix)
X_int = X_int[0, ...]
X_stft_phase = X_stft_phase[0, : X_int.shape[0], ...].unsqueeze(0)
pred_cl = pred_cl[0, ...]
mask = mask[0, ...]
temp = torch.expm1(X_int).unsqueeze(0).unsqueeze(-1)
x_int_sb = self.invert_stft_with_phase(temp, X_stft_phase)
# save reconstructed and original spectrograms
current_class_ind = batch.class_string_encoded.data[0].item()
current_class_name = self.hparams.label_encoder.ind2lab[
current_class_ind
]
predicted_class_name = self.hparams.label_encoder.ind2lab[
pred_cl.item()
]
noise_class_ind = batch.class_string_encoded.data[1].item()
noise_class_name = self.hparams.label_encoder.ind2lab[noise_class_ind]
out_folder = os.path.join(
self.hparams.output_folder,
"overlap_test",
f"tc_{current_class_name}_nc_{noise_class_name}_pc_{predicted_class_name}",
)
makedirs(
out_folder, exist_ok=True,
)
torchaudio.save(
os.path.join(out_folder, "mixture.wav"),
mix.data.cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "source.wav"),
s1.unsqueeze(0).data.cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "noise.wav"),
s2.unsqueeze(0).data.cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "interpretation.wav"),
x_int_sb.data.cpu(),
self.hparams.sample_rate,
)
plt.figure(figsize=(10, 5), dpi=100)
plt.subplot(141)
X_target = X_mix[0].permute(1, 0)[:, : X_int.shape[1]].cpu()
plt.imshow(X_target)
plt.colorbar()
plt.subplot(142)
plt.imshow(mask.data.cpu().permute(1, 0))
plt.title("Estimated Mask")
plt.colorbar()
plt.subplot(143)
plt.imshow(X_int.data.cpu().permute(1, 0).data.cpu())
plt.colorbar()
plt.title("masked")
plt.savefig(os.path.join(out_folder, "specs.png"))
plt.close()
def debug_files(self, X_stft, xhat, X_stft_logpower, batch, wavs):
X_stft_phase = spectral_phase(X_stft)
temp = xhat[0].transpose(0, 1).unsqueeze(0).unsqueeze(-1)
Xspec_est = torch.expm1(temp.permute(0, 2, 1, 3))
xhat_tm = self.invert_stft_with_phase(Xspec_est, X_stft_phase)
Tmax = Xspec_est.shape[1]
if self.hparams.use_mask_output:
X_masked = xhat[0] * X_stft_logpower[0, :Tmax, :]
else:
th = xhat[0].max() * 0.15
X_masked = (xhat[0] > th) * X_stft_logpower[0, :Tmax, :]
X_est_masked = torch.expm1(X_masked).unsqueeze(0).unsqueeze(-1)
xhat_tm_masked = self.invert_stft_with_phase(X_est_masked, X_stft_phase)
plt.figure(figsize=(10, 5), dpi=100)
plt.subplot(141)
X_target = X_stft_logpower[0].permute(1, 0)[:, : xhat.shape[1]].cpu()
plt.imshow(X_target)
plt.colorbar()
plt.subplot(142)
input_masked = X_target > (
X_target.max(keepdim=True, dim=-1)[0].max(keepdim=True, dim=-2)[0]
* self.hparams.mask_th
)
plt.imshow(input_masked)
plt.title("input masked")
plt.colorbar()
plt.subplot(143)
if self.hparams.use_mask_output:
mask = xhat[0]
else:
mask = xhat[0] > th # (xhat[0] / xhat[0] + 1e-10)
X_masked = mask * X_stft_logpower[0, :Tmax, :]
plt.imshow(X_masked.permute(1, 0).data.cpu())
plt.colorbar()
plt.title("masked")
plt.subplot(144)
plt.imshow(mask.permute(1, 0).data.cpu())
plt.colorbar()
plt.title("mask")
out_folder = os.path.join(
self.hparams.output_folder, "reconstructions/" f"{batch.id[0]}",
)
makedirs(
out_folder, exist_ok=True,
)
plt.savefig(
os.path.join(out_folder, "reconstructions.png"), format="png",
)
plt.close()
torchaudio.save(
os.path.join(out_folder, "reconstruction.wav"),
xhat_tm.data.cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "reconstruction_masked.wav"),
xhat_tm_masked.data.cpu(),
self.hparams.sample_rate,
)
torchaudio.save(
os.path.join(out_folder, "true.wav"),
wavs[0:1].data.cpu(),
self.hparams.sample_rate,
)
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + sound classifier.
Data augmentation and environmental corruption are applied to the
input sound.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
X_stft_logpower, X_stft, X_stft_power = self.preprocess(wavs)
# Embeddings + sound classifier
hcat, embeddings, predictions, class_pred = self.classifier_forward(
X_stft_logpower
)
if self.hparams.use_vq:
xhat, hcat, z_q_x = self.modules.psi(hcat, class_pred)
else:
xhat = self.modules.psi.decoder(hcat)
z_q_x = None
xhat = xhat.squeeze(1)
if self.hparams.use_mask_output:
xhat = F.sigmoid(xhat)
else:
xhat = F.softplus(xhat)
garbage = 0
if stage == sb.Stage.VALID:
# save some samples
if (
self.hparams.epoch_counter.current
% self.hparams.interpret_period
) == 0 and self.hparams.save_interpretations:
wavs = wavs[0].unsqueeze(0)
self.interpret_sample(wavs, batch)
self.overlap_test(batch)
self.debug_files(X_stft, xhat, X_stft_logpower, batch, wavs)
return predictions, xhat, hcat, z_q_x, garbage
def compute_objectives(self, pred, batch, stage):
predictions, xhat, hcat, z_q_x, garbage = pred
batch = batch.to(self.device)
wavs, lens = batch.sig
uttid = batch.id
classid, _ = batch.class_string_encoded
X_stft_logpower, X_stft, X_stft_power = self.preprocess(wavs)
Tmax = xhat.shape[1]
hcat_theta, embeddings, theta_out, _ = self.classifier_forward(
xhat * X_stft_logpower[:, :Tmax, :]
)
# if there is a separator, we need to add sigmoid to the sum
loss_fid = 0
if self.hparams.use_mask_output:
eps = 1e-10
target_spec = X_stft_logpower[:, : xhat.shape[1], :]
# target_mask = target_spec > (target_spec.max() * self.hparams.mask_th)
target_mask = target_spec > (
target_spec.max(keepdim=True, dim=-1)[0].max(
keepdim=True, dim=-2
)[0]
* self.hparams.mask_th
)
target_mask = target_mask.float()
rec_loss = (
-target_mask * torch.log(xhat + eps)
- (1 - target_mask) * torch.log(1 - xhat + eps)
).mean()
else:
rec_loss = (
(X_stft_logpower[:, : xhat.shape[1], :] - xhat).pow(2).mean()
)
if self.hparams.use_vq:
loss_vq = F.mse_loss(z_q_x, hcat.detach())
loss_commit = F.mse_loss(hcat, z_q_x.detach())
else:
loss_vq = 0
loss_commit = 0
self.acc_metric.append(
uttid, predict=predictions, target=classid, length=lens
)
self.recons_err.append(
uttid, xhat, X_stft_logpower[:, : xhat.shape[1], :]
)
if self.hparams.use_mask_output:
self.mask_ll.append(uttid, xhat, target_mask)
if stage == sb.Stage.VALID or stage == sb.Stage.TEST:
self.top_3_fidelity.append(
[batch.id] * theta_out.shape[0], theta_out, predictions
)
self.faithfulness.append(batch.id, wavs, predictions)
if stage != sb.Stage.TEST:
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
return (
self.hparams.rec_loss_coef * rec_loss
+ loss_vq
+ loss_commit
+ loss_fid
)
def on_stage_start(self, stage, epoch=None):
@torch.no_grad()
def accuracy_value(predict, target, length):
"""Computes Accuracy"""
# predict = predict.argmax(1, keepdim=True)
nbr_correct, nbr_total = sb.utils.Accuracy.Accuracy(
predict.unsqueeze(1), target, length
)
acc = torch.tensor([nbr_correct / nbr_total])
return acc
@torch.no_grad()
def compute_fidelity(theta_out, predictions):
"""Computes top-`k` fidelity of interpreter."""
predictions = F.softmax(predictions, dim=1)
theta_out = F.softmax(theta_out, dim=1)
pred_cl = torch.argmax(predictions, dim=1)
k_top = torch.argmax(theta_out, dim=1)
# 1 element for each sample in batch, is 0 if pred_cl is in top k
temp = (k_top == pred_cl).float()
return temp
@torch.no_grad()
def compute_faithfulness(wavs, predictions):
X_stft_logpower, X_stft, X_stft_power = self.preprocess(wavs)
X2 = self.interpret_computation_steps(wavs)[0]
_, _, predictions_masked, _ = self.classifier_forward(X2)
predictions = F.softmax(predictions, dim=1)
predictions_masked = F.softmax(predictions_masked, dim=1)
# get the prediction indices
pred_cl = predictions.argmax(dim=1, keepdim=True)
# get the corresponding output probabilities
predictions_selected = torch.gather(
predictions, dim=1, index=pred_cl
)
predictions_masked_selected = torch.gather(
predictions_masked, dim=1, index=pred_cl
)
faithfulness = (
predictions_selected - predictions_masked_selected
).squeeze(1)
return faithfulness
@torch.no_grad()
def compute_rec_error(preds, specs, length=None):
if self.hparams.use_mask_output:
preds = specs * preds
return (specs - preds).pow(2).mean((-2, -1))
@torch.no_grad()
def compute_bern_ll(xhat, target_mask, length=None):
eps = 1e-10
rec_loss = (
-target_mask * torch.log(xhat + eps)
- (1 - target_mask) * torch.log(1 - xhat + eps)
).mean((-2, -1))
return rec_loss
self.top_3_fidelity = MetricStats(metric=compute_fidelity)
self.faithfulness = MetricStats(metric=compute_faithfulness)
self.acc_metric = sb.utils.metric_stats.MetricStats(
metric=accuracy_value, n_jobs=1
)
self.recons_err = sb.utils.metric_stats.MetricStats(
metric=compute_rec_error
)
if self.hparams.use_mask_output:
self.mask_ll = sb.utils.metric_stats.MetricStats(
metric=compute_bern_ll
)
return super().on_stage_start(stage, epoch)
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch.
Plots in subplots the values of `self.batch_to_plot` and saves the
plot to the experiment folder. `self.hparams.output_folder`"""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {
"loss": self.train_loss,
"acc": self.acc_metric.summarize("average"),
"rec_error": self.recons_err.summarize("average"),
}
if self.hparams.use_mask_output:
self.train_stats["mask_ll"] = self.mask_ll.summarize("average")
if stage == sb.Stage.VALID:
current_fid = self.top_3_fidelity.summarize("average")
old_lr, new_lr = self.hparams.lr_annealing(
[self.optimizer], epoch, -current_fid
)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
valid_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize("average"),
"input_fidelity": current_fid,
"rec_error": self.recons_err.summarize("average"),
"faithfulness_median": torch.Tensor(
self.faithfulness.scores
).median(),
"faithfulness_mean": torch.Tensor(
self.faithfulness.scores
).mean(),
}
if self.hparams.use_mask_output:
valid_stats["mask_ll"] = self.mask_ll.summarize("average")
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=valid_stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(
meta=valid_stats, max_keys=["top-3_fid"]
)
if stage == sb.Stage.TEST:
current_fid = self.top_3_fidelity.summarize("average")
test_stats = {
"loss": stage_loss,
"acc": self.acc_metric.summarize("average"),
"input_fidelity": current_fid,
"faithfulness_median": torch.Tensor(
self.faithfulness.scores
).median(),
"faithfulness_mean": torch.Tensor(
self.faithfulness.scores
).mean(),
}
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch}, test_stats=test_stats
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_audio_folder = hparams["audio_data_folder"]
config_sample_rate = hparams["sample_rate"]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
hparams["resampler"] = torchaudio.transforms.Resample(
new_freq=config_sample_rate
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
"""Load the signal, and pass it and its length to the corruption class.
This is done on the CPU in the `collate_fn`."""
wave_file = data_audio_folder + "/{:}".format(wav)
sig, read_sr = torchaudio.load(wave_file)
# If multi-channels, downmix it to a mono channel
sig = torch.squeeze(sig)
if len(sig.shape) > 1:
sig = torch.mean(sig, dim=0)
# Convert sample rate to required config_sample_rate
if read_sr != config_sample_rate:
# Re-initialize sampler if source file sample rate changed compared to last file
if read_sr != hparams["resampler"].orig_freq:
hparams["resampler"] = torchaudio.transforms.Resample(
orig_freq=read_sr, new_freq=config_sample_rate
)
# Resample audio
sig = hparams["resampler"].forward(sig)
sig = sig.float()
sig = sig / sig.max()
return sig
# 3. Define label pipeline:
@sb.utils.data_pipeline.takes("class_string")
@sb.utils.data_pipeline.provides("class_string", "class_string_encoded")
def label_pipeline(class_string):
yield class_string
class_string_encoded = label_encoder.encode_label_torch(class_string)
yield class_string_encoded
# Define datasets. We also connect the dataset with the data processing
# functions defined above.
datasets = {}
data_info = {
"train": hparams["train_annotation"],
"valid": hparams["valid_annotation"],
"test": hparams["test_annotation"],
}
for dataset in data_info:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_info[dataset],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, label_pipeline],
output_keys=["id", "sig", "class_string_encoded"],
)
# Load or compute the label encoder (with multi-GPU DDP support)
# Please, take a look into the lab_enc_file to see the label to index
# mappinng.
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[datasets["train"]],
output_key="class_string",
)
return datasets, label_encoder
if __name__ == "__main__":
# # This flag enables the inbuilt cudnn auto-tuner
# torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# classifier is fixed here
hparams["embedding_model"].eval()
hparams["classifier"].eval()
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Tensorboard logging
if hparams["use_tensorboard"]:
from speechbrain.utils.train_logger import TensorboardLogger
hparams["tensorboard_train_logger"] = TensorboardLogger(
hparams["tensorboard_logs_folder"]
)
run_on_main(
prepare_esc50,
kwargs={
"data_folder": hparams["data_folder"],
"audio_data_folder": hparams["audio_data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"train_fold_nums": hparams["train_fold_nums"],
"valid_fold_nums": hparams["valid_fold_nums"],
"test_fold_nums": hparams["test_fold_nums"],
"skip_manifest_creation": hparams["skip_manifest_creation"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
datasets, label_encoder = dataio_prep(hparams)
hparams["label_encoder"] = label_encoder
class_labels = list(label_encoder.ind2lab.values())
print("Class Labels:", class_labels)
Interpreter_brain = InterpreterESC50Brain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
if "pretrained_esc50" in hparams and hparams["use_pretrained"]:
print("Loading model...")
run_on_main(hparams["pretrained_esc50"].collect_files)
hparams["pretrained_esc50"].load_collected()
hparams["embedding_model"].to(hparams["device"])
hparams["classifier"].to(hparams["device"])
hparams["embedding_model"].eval()
if not hparams["test_only"]:
Interpreter_brain.fit(
epoch_counter=Interpreter_brain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
else:
# Load the best checkpoint for evaluation
Interpreter_brain.checkpointer.recover_if_possible(
max_key="valid_top-3_fid",
device=torch.device(Interpreter_brain.device),
)
test_stats = Interpreter_brain.evaluate(
test_set=datasets["test"],
min_key="loss",
progressbar=True,
test_loader_kwargs=hparams["dataloader_options"],
)
| 26,143 | 33.627815 | 92 | py |
speechbrain | speechbrain-main/recipes/ESC50/interpret/train_nmf.py | #!/usr/bin/python3
"""The recipe to train an NMF model with amortized inference on ESC50 data.
To run this recipe, use the following command:
> python train_nmf.py hparams/nmf.yaml --data_folder /yourpath/ESC-50-master
Authors
* Cem Subakan 2022, 2023
* Francesco Paissan 2022, 2023
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from esc50_prepare import prepare_esc50
from train_l2i import dataio_prep
class NMFBrain(sb.core.Brain):
"""
The SpeechBrain class to train Non-Negative Factorization with Amortized Inference
"""
def compute_forward(self, batch, stage=sb.Stage.TRAIN):
"""
This function calculates the forward pass for NMF
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
X_stft = self.hparams.compute_stft(wavs)
X_stft_power = self.hparams.compute_stft_mag(X_stft)
X_stft_tf = torch.log1p(X_stft_power)
z = self.hparams.nmf_encoder(X_stft_tf.permute(0, 2, 1))
Xhat = self.hparams.nmf_decoder(z)
return Xhat
def compute_objectives(self, predictions, batch, stage=sb.Stage.TRAIN):
"""
this function computes the l2-error to train the NMF model.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
X_stft = self.hparams.compute_stft(wavs)
X_stft_power = self.hparams.compute_stft_mag(X_stft)
target = torch.log1p(X_stft_power).permute(0, 2, 1)
loss = ((target.squeeze() - predictions) ** 2).mean()
return loss
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
# Compute/store important stats
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
self.train_stats = {
"loss": self.train_loss,
}
# Summarize Valid statistics from the stage for record-keeping.
elif stage == sb.Stage.VALID:
valid_stats = {
"loss": stage_loss,
}
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# The train_logger writes a summary to stdout and to the logfile.
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch},
train_stats=self.train_stats,
valid_stats=valid_stats,
)
# Save the current checkpoint and delete previous checkpoints,
self.checkpointer.save_and_keep_only(
meta=valid_stats, min_keys=["loss"]
)
if __name__ == "__main__":
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
run_on_main(
prepare_esc50,
kwargs={
"data_folder": hparams["data_folder"],
"audio_data_folder": hparams["audio_data_folder"],
"save_json_train": hparams["train_annotation"],
"save_json_valid": hparams["valid_annotation"],
"save_json_test": hparams["test_annotation"],
"train_fold_nums": hparams["train_fold_nums"],
"valid_fold_nums": hparams["valid_fold_nums"],
"test_fold_nums": hparams["test_fold_nums"],
"skip_manifest_creation": hparams["skip_manifest_creation"],
},
)
datasets, _ = dataio_prep(hparams)
nmfbrain = NMFBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
if not hparams["test_only"]:
nmfbrain.fit(
epoch_counter=nmfbrain.hparams.epoch_counter,
train_set=datasets["train"],
valid_set=datasets["valid"],
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
test_stats = nmfbrain.evaluate(
test_set=datasets["test"],
min_key="loss",
progressbar=True,
test_loader_kwargs=hparams["dataloader_options"],
)
if hparams["save_nmfdictionary"]:
torch.save(hparams["nmf_decoder"].return_W(), hparams["nmf_savepath"])
| 4,783 | 32.222222 | 86 | py |
speechbrain | speechbrain-main/recipes/AISHELL-1/ASR/seq2seq/train.py | #!/usr/bin/env/python3
"""
AISHELL-1 seq2seq model recipe. (Adapted from the LibriSpeech recipe.)
"""
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, p_seq, wav_lens
else:
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_search(x, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_seq, wav_lens = predictions
else:
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# Add ctc loss if necessary
if (
stage == sb.Stage.TRAIN
and current_epoch <= self.hparams.number_of_ctc_epochs
):
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
else:
loss = loss_seq
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = [
tokenizer.decode_ids(utt_seq).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
if self.hparams.remove_spaces:
predicted_words = ["".join(p) for p in predicted_words]
target_words = ["".join(t) for t in target_words]
if stage != sb.Stage.TRAIN:
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_idx += 1
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_idx = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["CER"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"CER": stage_stats["CER"]}, min_keys=["CER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.cer_file, "w") as w:
self.cer_metric.write_stats(w)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# Defining tokenizer and loading it
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("transcript")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"],
)
# 5. If Dynamic Batching is used, we instantiate the needed samplers.
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_data,
tokenizer,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# 1. # Dataset prep (parsing Librispeech)
from aishell_prepare import prepare_aishell # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_aishell,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_data,
valid_data,
test_data,
tokenizer,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = tokenizer
# Changing the samplers if dynamic batching is activated
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
if train_bsampler is not None:
train_dataloader_opts = {
"batch_sampler": train_bsampler,
"num_workers": hparams["num_workers"],
}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
asr_brain.evaluate(
test_data, test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 13,009 | 34.162162 | 89 | py |
speechbrain | speechbrain-main/recipes/AISHELL-1/ASR/CTC/train_with_wav2vec.py | #!/usr/bin/env/python3
"""AISHELL-1 CTC recipe.
The system employs a wav2vec2 encoder and a CTC decoder.
Decoding is performed with greedy decoding.
To run this recipe, do the following:
> python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml
With the default hyperparameters, the system employs a pretrained wav2vec2 encoder.
The wav2vec2 model is pretrained on 10k hours Chinese data
The network is trained with CTC on characters extracted from a pretrained tokenizer.
Authors
* Yingzhi WANG 2022
"""
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "SpeedPerturb"):
wavs = self.hparams.SpeedPerturb(wavs, wav_lens)
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
# Forward pass
feats = self.modules.wav2vec2(wavs, wav_lens)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "SpecAugment"):
feats = self.hparams.SpecAugment(feats)
x = self.modules.enc(feats)
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, wav_lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the CTC loss given predictions and targets."""
p_ctc, wav_lens = predictions
ids = batch.id
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
if stage != sb.Stage.TRAIN:
# Decode token terms to words
sequences = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
predicted_words_list = []
target_words_list = [list(wrd) for wrd in batch.wrd]
for sequence in sequences:
# Decode token terms to words
predicted_tokens = self.tokenizer.convert_ids_to_tokens(
sequence
)
predicted_words = []
for c in predicted_tokens:
if c == "[CLS]":
continue
elif c == "[SEP]" or c == "[PAD]":
break
else:
predicted_words.append(c)
predicted_words_list.append(predicted_words)
self.cer_metric.append(
ids=ids, predict=predicted_words_list, target=target_words_list,
)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.step()
self.model_optimizer.step()
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
self.batch_idx = 0
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
if not self.hparams.wav2vec2.freeze:
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"CER": stage_stats["CER"]}, min_keys=["CER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.cer_file, "w") as w:
self.cer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
# If the wav2vec encoder is unfrozen, we create the optimizer
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def zero_grad(self, set_to_none=False):
if not self.hparams.wav2vec2.freeze:
self.wav2vec_optimizer.zero_grad(set_to_none)
self.model_optimizer.zero_grad(set_to_none)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# Defining tokenizer and loading it
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("transcript")
@sb.utils.data_pipeline.provides("wrd", "tokens_list", "tokens")
def text_pipeline(wrd):
wrd = "".join(wrd.split(" "))
yield wrd
tokens_list = tokenizer(wrd)["input_ids"]
yield tokens_list
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens"],
)
# 5. If Dynamic Batching is used, we instantiate the needed samplers.
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_data,
tokenizer,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# 1. # Dataset prep (parsing Librispeech)
from aishell_prepare import prepare_aishell # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_aishell,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_data,
valid_data,
test_data,
tokenizer,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = tokenizer
# Changing the samplers if dynamic batching is activated
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
if train_bsampler is not None:
train_dataloader_opts = {
"batch_sampler": train_bsampler,
"num_workers": hparams["num_workers"],
}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
asr_brain.evaluate(
test_data,
min_key="CER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 13,335 | 33.282776 | 89 | py |
speechbrain | speechbrain-main/recipes/AISHELL-1/ASR/transformer/train_with_wav2vect.py | #!/usr/bin/env/python3
"""
AISHELL-1 transformer model recipe. (Adapted from the LibriSpeech recipe.).
It is designed to work with wav2vec2 pre-training.
"""
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
# compute features
feats = self.modules.wav2vec2(wavs, wav_lens)
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
enc_out, pred = self.hparams.Transformer(
feats, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index
)
# output layer for ctc log-probabilities
logits = self.hparams.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.hparams.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
# for the sake of efficiency, we only perform beamsearch with limited capacity
# and no LM to give user some idea of how the AM is doing
hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, hyps,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = [
tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
if self.hparams.remove_spaces:
predicted_words = ["".join(p) for p in predicted_words]
target_words = ["".join(t) for t in target_words]
self.cer_metric.append(ids, predicted_words, target_words)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# check if we need to switch optimizer
# if so change the optimizer from Adam to SGD
self.check_and_reset_optimizer()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer_wav2vect.step()
self.optimizer.zero_grad()
self.optimizer_wav2vect.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
self.hparams.noam_annealing_wav2vect(self.optimizer_wav2vect)
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.cer_metric = self.hparams.cer_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
# report different epoch stages according current stage
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.stage_one_epochs:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
optimizer = self.optimizer.__class__.__name__
else:
lr = self.hparams.lr_sgd
steps = -1
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
num_to_keep=10,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.cer_file, "w") as w:
self.cer_metric.write_stats(w)
# save the averaged checkpoint at the end of the evaluation stage
# delete the rest of the intermediate checkpoints
# ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint
self.checkpointer.save_and_keep_only(
meta={"ACC": 1.1, "epoch": epoch},
max_keys=["ACC"],
num_to_keep=1,
)
def check_and_reset_optimizer(self):
"""reset the optimizer if training enters stage 2"""
current_epoch = self.hparams.epoch_counter.current
if not hasattr(self, "switched"):
self.switched = False
if isinstance(self.optimizer, torch.optim.SGD):
self.switched = True
if self.switched is True:
return
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
self.switched = True
def on_fit_start(self):
"""Initialize the right optimizer on the training start"""
super().on_fit_start()
# if the model is resumed from stage two, reinitialize the optimizer
current_epoch = self.hparams.epoch_counter.current
current_optimizer = self.optimizer
if current_epoch > self.hparams.stage_one_epochs:
del self.optimizer
self.optimizer = self.hparams.SGD(self.modules.parameters())
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
# do not reload the weights if training is interrupted right before stage 2
group = current_optimizer.param_groups[0]
if "momentum" not in group:
return
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
def on_evaluate_start(self, max_key=None, min_key=None):
"""perform checkpoint averge if needed"""
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(
max_key=max_key, min_key=min_key
)
ckpt = sb.utils.checkpoints.average_checkpoints(
ckpts, recoverable_name="model", device=self.device
)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.optimizer_wav2vect = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.optimizer = self.hparams.Adam(self.hparams.model.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.optimizer_wav2vect
)
self.checkpointer.add_recoverable("modelopt", self.optimizer)
def zero_grad(self, set_to_none=False):
self.optimizer_wav2vect.zero_grad(set_to_none)
self.optimizer.zero_grad(set_to_none)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# Defining tokenizer and loading it
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("transcript")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"],
)
# 5. If Dynamic Batching is used, we instantiate the needed samplers.
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_data,
tokenizer,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# 1. # Dataset prep (parsing Librispeech)
from aishell_prepare import prepare_aishell # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_aishell,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_data,
valid_data,
test_data,
tokenizer,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = tokenizer
# Changing the samplers if dynamic batching is activated
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
if train_bsampler is not None:
train_dataloader_opts = {
"batch_sampler": train_bsampler,
"num_workers": hparams["num_workers"],
}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
asr_brain.evaluate(
test_data, test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 17,879 | 35.341463 | 94 | py |
speechbrain | speechbrain-main/recipes/AISHELL-1/ASR/transformer/train.py | #!/usr/bin/env/python3
"""
AISHELL-1 transformer model recipe. (Adapted from the LibriSpeech recipe.)
"""
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.hparams.normalize(feats, wav_lens, epoch=current_epoch)
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
src = self.modules.CNN(feats)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index
)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
# for the sake of efficiency, we only perform beamsearch with limited capacity
# and no LM to give user some idea of how the AM is doing
hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, hyps,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = [
tokenizer.decode_ids(utt_seq).split(" ") for utt_seq in hyps
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
if self.hparams.remove_spaces:
predicted_words = ["".join(p) for p in predicted_words]
target_words = ["".join(t) for t in target_words]
self.cer_metric.append(ids, predicted_words, target_words)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# check if we need to switch optimizer
# if so change the optimizer from Adam to SGD
self.check_and_reset_optimizer()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.cer_metric = self.hparams.cer_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
# report different epoch stages according current stage
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.stage_one_epochs:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
optimizer = self.optimizer.__class__.__name__
else:
lr = self.hparams.lr_sgd
steps = -1
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
num_to_keep=10,
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.cer_file, "w") as w:
self.cer_metric.write_stats(w)
# save the averaged checkpoint at the end of the evaluation stage
# delete the rest of the intermediate checkpoints
# ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint
self.checkpointer.save_and_keep_only(
meta={"ACC": 1.1, "epoch": epoch},
max_keys=["ACC"],
num_to_keep=1,
)
def check_and_reset_optimizer(self):
"""reset the optimizer if training enters stage 2"""
current_epoch = self.hparams.epoch_counter.current
if not hasattr(self, "switched"):
self.switched = False
if isinstance(self.optimizer, torch.optim.SGD):
self.switched = True
if self.switched is True:
return
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
self.switched = True
def on_fit_start(self):
"""Initialize the right optimizer on the training start"""
super().on_fit_start()
# if the model is resumed from stage two, reinitialize the optimizer
current_epoch = self.hparams.epoch_counter.current
current_optimizer = self.optimizer
if current_epoch > self.hparams.stage_one_epochs:
del self.optimizer
self.optimizer = self.hparams.SGD(self.modules.parameters())
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
# do not reload the weights if training is interrupted right before stage 2
group = current_optimizer.param_groups[0]
if "momentum" not in group:
return
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
def on_evaluate_start(self, max_key=None, min_key=None):
"""perform checkpoint averge if needed"""
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(
max_key=max_key, min_key=min_key
)
ckpt = sb.utils.checkpoints.average_checkpoints(
ckpts, recoverable_name="model", device=self.device
)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_data"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_data"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# Defining tokenizer and loading it
tokenizer = hparams["tokenizer"]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("transcript")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "wrd", "tokens_bos", "tokens_eos", "tokens"],
)
# 5. If Dynamic Batching is used, we instantiate the needed samplers.
train_batch_sampler = None
valid_batch_sampler = None
if hparams["dynamic_batching"]:
from speechbrain.dataio.sampler import DynamicBatchSampler # noqa
dynamic_hparams = hparams["dynamic_batch_sampler"]
num_buckets = dynamic_hparams["num_buckets"]
train_batch_sampler = DynamicBatchSampler(
train_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
valid_batch_sampler = DynamicBatchSampler(
valid_data,
dynamic_hparams["max_batch_len"],
num_buckets=num_buckets,
length_func=lambda x: x["duration"],
shuffle=dynamic_hparams["shuffle_ex"],
batch_ordering=dynamic_hparams["batch_ordering"],
)
return (
train_data,
valid_data,
test_data,
tokenizer,
train_batch_sampler,
valid_batch_sampler,
)
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If --distributed_launch then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# 1. # Dataset prep (parsing Librispeech)
from aishell_prepare import prepare_aishell # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_aishell,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["output_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
(
train_data,
valid_data,
test_data,
tokenizer,
train_bsampler,
valid_bsampler,
) = dataio_prepare(hparams)
# We download and pretrain the tokenizer
run_on_main(hparams["pretrainer"].collect_files)
hparams["pretrainer"].load_collected(device=run_opts["device"])
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = tokenizer
# Changing the samplers if dynamic batching is activated
train_dataloader_opts = hparams["train_dataloader_opts"]
valid_dataloader_opts = hparams["valid_dataloader_opts"]
if train_bsampler is not None:
train_dataloader_opts = {
"batch_sampler": train_bsampler,
"num_workers": hparams["num_workers"],
}
if valid_bsampler is not None:
valid_dataloader_opts = {"batch_sampler": valid_bsampler}
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=train_dataloader_opts,
valid_loader_kwargs=valid_dataloader_opts,
)
# Testing
asr_brain.evaluate(
test_data, test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 17,133 | 35.147679 | 94 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.