id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
17,506 | import re
from unidecode import unidecode
from .numbers import normalize_numbers
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text)
def convert_to_ascii(text):
return unidecode(text)
The provided code snippet includes necessary dependencies for implementing the `english_cleaners` function. Write a Python function `def english_cleaners(text)` to solve the following problem:
Pipeline for English text, including number and abbreviation expansion.
Here is the function:
def english_cleaners(text):
"""Pipeline for English text, including number and abbreviation expansion."""
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text | Pipeline for English text, including number and abbreviation expansion. |
17,507 | from pathlib import Path
from typing import List, Tuple
import os
import numpy as np
import torch
from text.symbol_table import SymbolTable
from text import text_to_sequence
class TextTokenCollator:
def __init__(
self,
text_tokens: List[str],
add_eos: bool = True,
add_bos: bool = True,
pad_symbol: str = "<pad>",
bos_symbol: str = "<bos>",
eos_symbol: str = "<eos>",
):
self.pad_symbol = pad_symbol
self.add_eos = add_eos
self.add_bos = add_bos
self.bos_symbol = bos_symbol
self.eos_symbol = eos_symbol
unique_tokens = [pad_symbol]
if add_bos:
unique_tokens.append(bos_symbol)
if add_eos:
unique_tokens.append(eos_symbol)
unique_tokens.extend(sorted(text_tokens))
self.token2idx = {token: idx for idx, token in enumerate(unique_tokens)}
self.idx2token = unique_tokens
def index(self, tokens_list: List[str]) -> Tuple[torch.Tensor, torch.Tensor]:
seqs, seq_lens = [], []
for tokens in tokens_list:
assert all([True if s in self.token2idx else False for s in tokens]) is True
seq = (
([self.bos_symbol] if self.add_bos else [])
+ list(tokens)
+ ([self.eos_symbol] if self.add_eos else [])
)
seqs.append(seq)
seq_lens.append(len(seq))
max_len = max(seq_lens)
for k, (seq, seq_len) in enumerate(zip(seqs, seq_lens)):
seq.extend([self.pad_symbol] * (max_len - seq_len))
tokens = torch.from_numpy(
np.array(
[[self.token2idx[token] for token in seq] for seq in seqs],
dtype=np.int64,
)
)
tokens_lens = torch.IntTensor(seq_lens)
return tokens, tokens_lens
def __call__(self, text):
tokens_seq = [p for p in text]
seq = (
([self.bos_symbol] if self.add_bos else [])
+ tokens_seq
+ ([self.eos_symbol] if self.add_eos else [])
)
token_ids = [self.token2idx[token] for token in seq]
token_lens = len(tokens_seq) + self.add_eos + self.add_bos
return token_ids, token_lens
class SymbolTable(Generic[Symbol]):
"""SymbolTable that maps symbol IDs, found on the FSA arcs to
actual objects. These objects can be arbitrary Python objects
that can serve as keys in a dictionary (i.e. they need to be
hashable and immutable).
The SymbolTable can only be read to/written from disk if the
symbols are strings.
"""
_id2sym: Dict[int, Symbol] = field(default_factory=dict)
"""Map an integer to a symbol.
"""
_sym2id: Dict[Symbol, int] = field(default_factory=dict)
"""Map a symbol to an integer.
"""
_next_available_id: int = 1
"""A helper internal field that helps adding new symbols
to the table efficiently.
"""
eps: Symbol = "<eps>"
"""Null symbol, always mapped to index 0.
"""
def __post_init__(self):
assert all(self._sym2id[sym] == idx for idx, sym in self._id2sym.items())
assert all(self._id2sym[idx] == sym for sym, idx in self._sym2id.items())
assert 0 not in self._id2sym or self._id2sym[0] == self.eps
self._next_available_id = max(self._id2sym, default=0) + 1
self._id2sym.setdefault(0, self.eps)
self._sym2id.setdefault(self.eps, 0)
def from_str(s: str) -> "SymbolTable":
"""Build a symbol table from a string.
The string consists of lines. Every line has two fields separated
by space(s), tab(s) or both. The first field is the symbol and the
second the integer id of the symbol.
Args:
s:
The input string with the format described above.
Returns:
An instance of :class:`SymbolTable`.
"""
id2sym: Dict[int, str] = dict()
sym2id: Dict[str, int] = dict()
for line in s.split("\n"):
fields = line.split()
if len(fields) == 0:
continue # skip empty lines
assert (
len(fields) == 2
), f"Expect a line with 2 fields. Given: {len(fields)}"
sym, idx = fields[0], int(fields[1])
assert sym not in sym2id, f"Duplicated symbol {sym}"
assert idx not in id2sym, f"Duplicated id {idx}"
id2sym[idx] = sym
sym2id[sym] = idx
eps = id2sym.get(0, "<eps>")
return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=eps)
def from_file(filename: str) -> "SymbolTable":
"""Build a symbol table from file.
Every line in the symbol table file has two fields separated by
space(s), tab(s) or both. The following is an example file:
.. code-block::
<eps> 0
a 1
b 2
c 3
Args:
filename:
Name of the symbol table file. Its format is documented above.
Returns:
An instance of :class:`SymbolTable`.
"""
with open(filename, "r", encoding="utf-8") as f:
return SymbolTable.from_str(f.read().strip())
def to_str(self) -> str:
"""
Returns:
Return a string representation of this object. You can pass
it to the method ``from_str`` to recreate an identical object.
"""
s = ""
for idx, symbol in sorted(self._id2sym.items()):
s += f"{symbol} {idx}\n"
return s
def to_file(self, filename: str):
"""Serialize the SymbolTable to a file.
Every line in the symbol table file has two fields separated by
space(s), tab(s) or both. The following is an example file:
.. code-block::
<eps> 0
a 1
b 2
c 3
Args:
filename:
Name of the symbol table file. Its format is documented above.
"""
with open(filename, "w") as f:
for idx, symbol in sorted(self._id2sym.items()):
print(symbol, idx, file=f)
def add(self, symbol: Symbol, index: Optional[int] = None) -> int:
"""Add a new symbol to the SymbolTable.
Args:
symbol:
The symbol to be added.
index:
Optional int id to which the symbol should be assigned.
If it is not available, a ValueError will be raised.
Returns:
The int id to which the symbol has been assigned.
"""
# Already in the table? Return its ID.
if symbol in self._sym2id:
return self._sym2id[symbol]
# Specific ID not provided - use next available.
if index is None:
index = self._next_available_id
# Specific ID provided but not available.
if index in self._id2sym:
raise ValueError(
f"Cannot assign id '{index}' to '{symbol}' - "
f"already occupied by {self._id2sym[index]}"
)
self._sym2id[symbol] = index
self._id2sym[index] = symbol
# Update next available ID if needed
if self._next_available_id <= index:
self._next_available_id = index + 1
return index
def get(self, k: Union[int, Symbol]) -> Union[Symbol, int]:
"""Get a symbol for an id or get an id for a symbol
Args:
k:
If it is an id, it tries to find the symbol corresponding
to the id; if it is a symbol, it tries to find the id
corresponding to the symbol.
Returns:
An id or a symbol depending on the given `k`.
"""
if isinstance(k, int):
return self._id2sym[k]
else:
return self._sym2id[k]
def merge(self, other: "SymbolTable") -> "SymbolTable":
"""Create a union of two SymbolTables.
Raises an AssertionError if the same IDs are occupied by
different symbols.
Args:
other:
A symbol table to merge with ``self``.
Returns:
A new symbol table.
"""
self._check_compatible(other)
return SymbolTable(
_id2sym={**self._id2sym, **other._id2sym},
_sym2id={**self._sym2id, **other._sym2id},
eps=self.eps,
)
def _check_compatible(self, other: "SymbolTable") -> None:
# Epsilon compatibility
assert self.eps == other.eps, (
f"Mismatched epsilon symbol: " f"{self.eps} != {other.eps}"
)
# IDs compatibility
common_ids = set(self._id2sym).intersection(other._id2sym)
for idx in common_ids:
assert self[idx] == other[idx], (
f"ID conflict for id: {idx}, "
f'self[idx] = "{self[idx]}", '
f'other[idx] = "{other[idx]}"'
)
# Symbols compatibility
common_symbols = set(self._sym2id).intersection(other._sym2id)
for sym in common_symbols:
assert self[sym] == other[sym], (
f"ID conflict for id: {sym}, "
f'self[sym] = "{self[sym]}", '
f'other[sym] = "{other[sym]}"'
)
def __getitem__(self, item: Union[int, Symbol]) -> Union[Symbol, int]:
return self.get(item)
def __contains__(self, item: Union[int, Symbol]) -> bool:
if isinstance(item, int):
return item in self._id2sym
else:
return item in self._sym2id
def __len__(self) -> int:
return len(self._id2sym)
def __eq__(self, other: "SymbolTable") -> bool:
if len(self) != len(other):
return False
for s in self.symbols:
if self[s] != other[s]:
return False
return True
def ids(self) -> List[int]:
"""Returns a list of integer IDs corresponding to the symbols."""
ans = list(self._id2sym.keys())
ans.sort()
return ans
def symbols(self) -> List[Symbol]:
"""Returns a list of symbols (e.g., strings) corresponding to
the integer IDs.
"""
ans = list(self._sym2id.keys())
ans.sort()
return ans
def get_text_token_collater(text_tokens_file: str) -> TextTokenCollator:
text_tokens_path = Path(text_tokens_file)
unique_tokens = SymbolTable.from_file(text_tokens_path)
collater = TextTokenCollator(unique_tokens.symbols, add_bos=True, add_eos=True)
token2idx = collater.token2idx
return collater, token2idx | null |
17,508 | import re
_alt_re = re.compile(r"\([0-9]+\)")
def _get_pronunciation(s):
parts = s.strip().split(" ")
for part in parts:
if part not in _valid_symbol_set:
return None
return " ".join(parts)
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"):
parts = line.split(" ")
word = re.sub(_alt_re, "", parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict | null |
17,509 | import argparse
import torch
from models.vocoders.gan.gan_vocoder_trainer import GANVocoderTrainer
from models.vocoders.diffusion.diffusion_vocoder_trainer import DiffusionVocoderTrainer
from utils.util import load_config
class GANVocoderTrainer(VocoderTrainer):
def __init__(self, args, cfg):
super().__init__()
self.args = args
self.cfg = cfg
cfg.exp_name = args.exp_name
# Init accelerator
self._init_accelerator()
self.accelerator.wait_for_everyone()
# Init logger
with self.accelerator.main_process_first():
self.logger = get_logger(args.exp_name, log_level=args.log_level)
self.logger.info("=" * 56)
self.logger.info("||\t\t" + "New training process started." + "\t\t||")
self.logger.info("=" * 56)
self.logger.info("\n")
self.logger.debug(f"Using {args.log_level.upper()} logging level.")
self.logger.info(f"Experiment name: {args.exp_name}")
self.logger.info(f"Experiment directory: {self.exp_dir}")
self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint")
if self.accelerator.is_main_process:
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}")
# Init training status
self.batch_count: int = 0
self.step: int = 0
self.epoch: int = 0
self.max_epoch = (
self.cfg.train.max_epoch if self.cfg.train.max_epoch > 0 else float("inf")
)
self.logger.info(
"Max epoch: {}".format(
self.max_epoch if self.max_epoch < float("inf") else "Unlimited"
)
)
# Check potential erorrs
if self.accelerator.is_main_process:
self._check_basic_configs()
self.save_checkpoint_stride = self.cfg.train.save_checkpoint_stride
self.checkpoints_path = [
[] for _ in range(len(self.save_checkpoint_stride))
]
self.run_eval = self.cfg.train.run_eval
# Set random seed
with self.accelerator.main_process_first():
start = time.monotonic_ns()
self._set_random_seed(self.cfg.train.random_seed)
end = time.monotonic_ns()
self.logger.debug(
f"Setting random seed done in {(end - start) / 1e6:.2f}ms"
)
self.logger.debug(f"Random seed: {self.cfg.train.random_seed}")
# Build dataloader
with self.accelerator.main_process_first():
self.logger.info("Building dataset...")
start = time.monotonic_ns()
self.train_dataloader, self.valid_dataloader = self._build_dataloader()
end = time.monotonic_ns()
self.logger.info(f"Building dataset done in {(end - start) / 1e6:.2f}ms")
# Build model
with self.accelerator.main_process_first():
self.logger.info("Building model...")
start = time.monotonic_ns()
self.generator, self.discriminators = self._build_model()
end = time.monotonic_ns()
self.logger.debug(self.generator)
for _, discriminator in self.discriminators.items():
self.logger.debug(discriminator)
self.logger.info(f"Building model done in {(end - start) / 1e6:.2f}ms")
self.logger.info(f"Model parameters: {self._count_parameters()/1e6:.2f}M")
# Build optimizers and schedulers
with self.accelerator.main_process_first():
self.logger.info("Building optimizer and scheduler...")
start = time.monotonic_ns()
(
self.generator_optimizer,
self.discriminator_optimizer,
) = self._build_optimizer()
(
self.generator_scheduler,
self.discriminator_scheduler,
) = self._build_scheduler()
end = time.monotonic_ns()
self.logger.info(
f"Building optimizer and scheduler done in {(end - start) / 1e6:.2f}ms"
)
# Accelerator preparing
self.logger.info("Initializing accelerate...")
start = time.monotonic_ns()
(
self.train_dataloader,
self.valid_dataloader,
self.generator,
self.generator_optimizer,
self.discriminator_optimizer,
self.generator_scheduler,
self.discriminator_scheduler,
) = self.accelerator.prepare(
self.train_dataloader,
self.valid_dataloader,
self.generator,
self.generator_optimizer,
self.discriminator_optimizer,
self.generator_scheduler,
self.discriminator_scheduler,
)
for key, discriminator in self.discriminators.items():
self.discriminators[key] = self.accelerator.prepare_model(discriminator)
end = time.monotonic_ns()
self.logger.info(f"Initializing accelerate done in {(end - start) / 1e6:.2f}ms")
# Build criterions
with self.accelerator.main_process_first():
self.logger.info("Building criterion...")
start = time.monotonic_ns()
self.criterions = self._build_criterion()
end = time.monotonic_ns()
self.logger.info(f"Building criterion done in {(end - start) / 1e6:.2f}ms")
# Resume checkpoints
with self.accelerator.main_process_first():
if args.resume_type:
self.logger.info("Resuming from checkpoint...")
start = time.monotonic_ns()
ckpt_path = Path(args.checkpoint)
if self._is_valid_pattern(ckpt_path.parts[-1]):
ckpt_path = self._load_model(
None, args.checkpoint, args.resume_type
)
else:
ckpt_path = self._load_model(
args.checkpoint, resume_type=args.resume_type
)
end = time.monotonic_ns()
self.logger.info(
f"Resuming from checkpoint done in {(end - start) / 1e6:.2f}ms"
)
self.checkpoints_path = json.load(
open(os.path.join(ckpt_path, "ckpts.json"), "r")
)
self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint")
if self.accelerator.is_main_process:
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}")
# Save config
self.config_save_path = os.path.join(self.exp_dir, "args.json")
def _build_dataset(self):
return GANVocoderDataset, GANVocoderCollator
def _build_criterion(self):
class feature_criterion(torch.nn.Module):
def __init__(self, cfg):
super(feature_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(self, fmap_r, fmap_g):
loss = 0
if self.cfg.model.generator in [
"hifigan",
"nsfhifigan",
"bigvgan",
"apnet",
]:
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
loss = loss * 2
elif self.cfg.model.generator in ["melgan"]:
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += self.l1Loss(rl, gl)
loss = loss * 10
elif self.cfg.model.generator in ["codec"]:
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss = loss + self.l1Loss(rl, gl) / torch.mean(
torch.abs(rl)
)
KL_scale = len(fmap_r) * len(fmap_r[0])
loss = 3 * loss / KL_scale
else:
raise NotImplementedError
return loss
class discriminator_criterion(torch.nn.Module):
def __init__(self, cfg):
super(discriminator_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(self, disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
if self.cfg.model.generator in [
"hifigan",
"nsfhifigan",
"bigvgan",
"apnet",
]:
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg**2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
elif self.cfg.model.generator in ["melgan"]:
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean(self.relu(1 - dr))
g_loss = torch.mean(self.relu(1 + dg))
loss = loss + r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
elif self.cfg.model.generator in ["codec"]:
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean(self.relu(1 - dr))
g_loss = torch.mean(self.relu(1 + dg))
loss = loss + r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
loss = loss / len(disc_real_outputs)
else:
raise NotImplementedError
return loss, r_losses, g_losses
class generator_criterion(torch.nn.Module):
def __init__(self, cfg):
super(generator_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(self, disc_outputs):
loss = 0
gen_losses = []
if self.cfg.model.generator in [
"hifigan",
"nsfhifigan",
"bigvgan",
"apnet",
]:
for dg in disc_outputs:
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
elif self.cfg.model.generator in ["melgan"]:
for dg in disc_outputs:
l = -torch.mean(dg)
gen_losses.append(l)
loss += l
elif self.cfg.model.generator in ["codec"]:
for dg in disc_outputs:
l = torch.mean(self.relu(1 - dg)) / len(disc_outputs)
gen_losses.append(l)
loss += l
else:
raise NotImplementedError
return loss, gen_losses
class mel_criterion(torch.nn.Module):
def __init__(self, cfg):
super(mel_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(self, y_gt, y_pred):
loss = 0
if self.cfg.model.generator in [
"hifigan",
"nsfhifigan",
"bigvgan",
"melgan",
"codec",
"apnet",
]:
y_gt_mel = extract_mel_features(y_gt, self.cfg.preprocess)
y_pred_mel = extract_mel_features(
y_pred.squeeze(1), self.cfg.preprocess
)
loss = self.l1Loss(y_gt_mel, y_pred_mel) * 45
else:
raise NotImplementedError
return loss
class wav_criterion(torch.nn.Module):
def __init__(self, cfg):
super(wav_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(self, y_gt, y_pred):
loss = 0
if self.cfg.model.generator in [
"hifigan",
"nsfhifigan",
"bigvgan",
"apnet",
]:
loss = self.l2Loss(y_gt, y_pred.squeeze(1)) * 100
elif self.cfg.model.generator in ["melgan"]:
loss = self.l1Loss(y_gt, y_pred.squeeze(1)) / 10
elif self.cfg.model.generator in ["codec"]:
loss = self.l1Loss(y_gt, y_pred.squeeze(1)) + self.l2Loss(
y_gt, y_pred.squeeze(1)
)
loss /= 10
else:
raise NotImplementedError
return loss
class phase_criterion(torch.nn.Module):
def __init__(self, cfg):
super(phase_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(self, phase_gt, phase_pred):
n_fft = self.cfg.preprocess.n_fft
frames = phase_gt.size()[-1]
GD_matrix = (
torch.triu(torch.ones(n_fft // 2 + 1, n_fft // 2 + 1), diagonal=1)
- torch.triu(torch.ones(n_fft // 2 + 1, n_fft // 2 + 1), diagonal=2)
- torch.eye(n_fft // 2 + 1)
)
GD_matrix = GD_matrix.to(phase_pred.device)
GD_r = torch.matmul(phase_gt.permute(0, 2, 1), GD_matrix)
GD_g = torch.matmul(phase_pred.permute(0, 2, 1), GD_matrix)
PTD_matrix = (
torch.triu(torch.ones(frames, frames), diagonal=1)
- torch.triu(torch.ones(frames, frames), diagonal=2)
- torch.eye(frames)
)
PTD_matrix = PTD_matrix.to(phase_pred.device)
PTD_r = torch.matmul(phase_gt, PTD_matrix)
PTD_g = torch.matmul(phase_pred, PTD_matrix)
IP_loss = torch.mean(-torch.cos(phase_gt - phase_pred))
GD_loss = torch.mean(-torch.cos(GD_r - GD_g))
PTD_loss = torch.mean(-torch.cos(PTD_r - PTD_g))
return 100 * (IP_loss + GD_loss + PTD_loss)
class amplitude_criterion(torch.nn.Module):
def __init__(self, cfg):
super(amplitude_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(self, log_amplitude_gt, log_amplitude_pred):
amplitude_loss = self.l2Loss(log_amplitude_gt, log_amplitude_pred)
return 45 * amplitude_loss
class consistency_criterion(torch.nn.Module):
def __init__(self, cfg):
super(consistency_criterion, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
self.l2Loss = torch.nn.MSELoss(reduction="mean")
self.relu = torch.nn.ReLU()
def __call__(
self,
rea_gt,
rea_pred,
rea_pred_final,
imag_gt,
imag_pred,
imag_pred_final,
):
C_loss = torch.mean(
torch.mean(
(rea_pred - rea_pred_final) ** 2
+ (imag_pred - imag_pred_final) ** 2,
(1, 2),
)
)
L_R = self.l1Loss(rea_gt, rea_pred)
L_I = self.l1Loss(imag_gt, imag_pred)
return 20 * (C_loss + 2.25 * (L_R + L_I))
criterions = dict()
for key in self.cfg.train.criterions:
if key == "feature":
criterions["feature"] = feature_criterion(self.cfg)
elif key == "discriminator":
criterions["discriminator"] = discriminator_criterion(self.cfg)
elif key == "generator":
criterions["generator"] = generator_criterion(self.cfg)
elif key == "mel":
criterions["mel"] = mel_criterion(self.cfg)
elif key == "wav":
criterions["wav"] = wav_criterion(self.cfg)
elif key == "phase":
criterions["phase"] = phase_criterion(self.cfg)
elif key == "amplitude":
criterions["amplitude"] = amplitude_criterion(self.cfg)
elif key == "consistency":
criterions["consistency"] = consistency_criterion(self.cfg)
else:
raise NotImplementedError
return criterions
def _build_model(self):
generator = supported_generators[self.cfg.model.generator](self.cfg)
discriminators = dict()
for key in self.cfg.model.discriminators:
discriminators[key] = supported_discriminators[key](self.cfg)
return generator, discriminators
def _build_optimizer(self):
optimizer_params_generator = [dict(params=self.generator.parameters())]
generator_optimizer = AdamW(
optimizer_params_generator,
lr=self.cfg.train.adamw.lr,
betas=(self.cfg.train.adamw.adam_b1, self.cfg.train.adamw.adam_b2),
)
optimizer_params_discriminator = []
for discriminator in self.discriminators.keys():
optimizer_params_discriminator.append(
dict(params=self.discriminators[discriminator].parameters())
)
discriminator_optimizer = AdamW(
optimizer_params_discriminator,
lr=self.cfg.train.adamw.lr,
betas=(self.cfg.train.adamw.adam_b1, self.cfg.train.adamw.adam_b2),
)
return generator_optimizer, discriminator_optimizer
def _build_scheduler(self):
discriminator_scheduler = ExponentialLR(
self.discriminator_optimizer,
gamma=self.cfg.train.exponential_lr.lr_decay,
last_epoch=self.epoch - 1,
)
generator_scheduler = ExponentialLR(
self.generator_optimizer,
gamma=self.cfg.train.exponential_lr.lr_decay,
last_epoch=self.epoch - 1,
)
return generator_scheduler, discriminator_scheduler
def train_loop(self):
"""Training process"""
self.accelerator.wait_for_everyone()
# Dump config
if self.accelerator.is_main_process:
self._dump_cfg(self.config_save_path)
self.generator.train()
for key in self.discriminators.keys():
self.discriminators[key].train()
self.generator_optimizer.zero_grad()
self.discriminator_optimizer.zero_grad()
# Sync and start training
self.accelerator.wait_for_everyone()
while self.epoch < self.max_epoch:
self.logger.info("\n")
self.logger.info("-" * 32)
self.logger.info("Epoch {}: ".format(self.epoch))
# Train and Validate
train_total_loss, train_losses = self._train_epoch()
for key, loss in train_losses.items():
self.logger.info(" |- Train/{} Loss: {:.6f}".format(key, loss))
self.accelerator.log(
{"Epoch/Train {} Loss".format(key): loss},
step=self.epoch,
)
valid_total_loss, valid_losses = self._valid_epoch()
for key, loss in valid_losses.items():
self.logger.info(" |- Valid/{} Loss: {:.6f}".format(key, loss))
self.accelerator.log(
{"Epoch/Valid {} Loss".format(key): loss},
step=self.epoch,
)
self.accelerator.log(
{
"Epoch/Train Total Loss": train_total_loss,
"Epoch/Valid Total Loss": valid_total_loss,
},
step=self.epoch,
)
# Update scheduler
self.accelerator.wait_for_everyone()
self.generator_scheduler.step()
self.discriminator_scheduler.step()
# Check save checkpoint interval
run_eval = False
if self.accelerator.is_main_process:
save_checkpoint = False
for i, num in enumerate(self.save_checkpoint_stride):
if self.epoch % num == 0:
save_checkpoint = True
run_eval |= self.run_eval[i]
# Save checkpoints
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process and save_checkpoint:
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_total_loss
),
)
self.accelerator.save_state(path)
json.dump(
self.checkpoints_path,
open(os.path.join(path, "ckpts.json"), "w"),
ensure_ascii=False,
indent=4,
)
# Save eval audios
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process and run_eval:
for i in range(len(self.valid_dataloader.dataset.eval_audios)):
if self.cfg.preprocess.use_frame_pitch:
eval_audio = self._inference(
self.valid_dataloader.dataset.eval_mels[i],
eval_pitch=self.valid_dataloader.dataset.eval_pitchs[i],
use_pitch=True,
)
else:
eval_audio = self._inference(
self.valid_dataloader.dataset.eval_mels[i]
)
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}_eval_audio_{}.wav".format(
self.epoch,
self.step,
valid_total_loss,
self.valid_dataloader.dataset.eval_dataset_names[i],
),
)
path_gt = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}_eval_audio_{}_gt.wav".format(
self.epoch,
self.step,
valid_total_loss,
self.valid_dataloader.dataset.eval_dataset_names[i],
),
)
save_audio(path, eval_audio, self.cfg.preprocess.sample_rate)
save_audio(
path_gt,
self.valid_dataloader.dataset.eval_audios[i],
self.cfg.preprocess.sample_rate,
)
self.accelerator.wait_for_everyone()
self.epoch += 1
# Finish training
self.accelerator.wait_for_everyone()
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_total_loss
),
)
self.accelerator.save_state(path)
def _train_epoch(self):
"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
self.generator.train()
for key, _ in self.discriminators.items():
self.discriminators[key].train()
epoch_losses: dict = {}
epoch_total_loss: int = 0
for batch in tqdm(
self.train_dataloader,
desc=f"Training Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Get losses
total_loss, losses = self._train_step(batch)
self.batch_count += 1
# Log info
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
self.accelerator.log(
{
"Step/Generator Learning Rate": self.generator_optimizer.param_groups[
0
][
"lr"
],
"Step/Discriminator Learning Rate": self.discriminator_optimizer.param_groups[
0
][
"lr"
],
},
step=self.step,
)
for key, _ in losses.items():
self.accelerator.log(
{
"Step/Train {} Loss".format(key): losses[key],
},
step=self.step,
)
if not epoch_losses:
epoch_losses = losses
else:
for key, value in losses.items():
epoch_losses[key] += value
epoch_total_loss += total_loss
self.step += 1
# Get and log total losses
self.accelerator.wait_for_everyone()
epoch_total_loss = (
epoch_total_loss
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
for key in epoch_losses.keys():
epoch_losses[key] = (
epoch_losses[key]
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
return epoch_total_loss, epoch_losses
def _train_step(self, data):
"""Training forward step. Should return average loss of a sample over
one batch. Provoke ``_forward_step`` is recommended except for special case.
See ``_train_epoch`` for usage.
"""
# Init losses
train_losses = {}
total_loss = 0
generator_losses = {}
generator_total_loss = 0
discriminator_losses = {}
discriminator_total_loss = 0
# Use input feature to get predictions
mel_input = data["mel"]
audio_gt = data["audio"]
if self.cfg.preprocess.extract_amplitude_phase:
logamp_gt = data["logamp"]
pha_gt = data["pha"]
rea_gt = data["rea"]
imag_gt = data["imag"]
if self.cfg.preprocess.use_frame_pitch:
pitch_input = data["frame_pitch"]
if self.cfg.preprocess.use_frame_pitch:
pitch_input = pitch_input.float()
audio_pred = self.generator.forward(mel_input, pitch_input)
elif self.cfg.preprocess.extract_amplitude_phase:
(
logamp_pred,
pha_pred,
rea_pred,
imag_pred,
audio_pred,
) = self.generator.forward(mel_input)
from utils.mel import amplitude_phase_spectrum
_, _, rea_pred_final, imag_pred_final = amplitude_phase_spectrum(
audio_pred.squeeze(1), self.cfg.preprocess
)
else:
audio_pred = self.generator.forward(mel_input)
# Calculate and BP Discriminator losses
self.discriminator_optimizer.zero_grad()
for key, _ in self.discriminators.items():
y_r, y_g, _, _ = self.discriminators[key].forward(
audio_gt.unsqueeze(1), audio_pred.detach()
)
(
discriminator_losses["{}_discriminator".format(key)],
_,
_,
) = self.criterions["discriminator"](y_r, y_g)
discriminator_total_loss += discriminator_losses[
"{}_discriminator".format(key)
]
self.accelerator.backward(discriminator_total_loss)
self.discriminator_optimizer.step()
# Calculate and BP Generator losses
self.generator_optimizer.zero_grad()
for key, _ in self.discriminators.items():
y_r, y_g, f_r, f_g = self.discriminators[key].forward(
audio_gt.unsqueeze(1), audio_pred
)
generator_losses["{}_feature".format(key)] = self.criterions["feature"](
f_r, f_g
)
generator_losses["{}_generator".format(key)], _ = self.criterions[
"generator"
](y_g)
generator_total_loss += generator_losses["{}_feature".format(key)]
generator_total_loss += generator_losses["{}_generator".format(key)]
if "mel" in self.criterions.keys():
generator_losses["mel"] = self.criterions["mel"](audio_gt, audio_pred)
generator_total_loss += generator_losses["mel"]
if "wav" in self.criterions.keys():
generator_losses["wav"] = self.criterions["wav"](audio_gt, audio_pred)
generator_total_loss += generator_losses["wav"]
if "amplitude" in self.criterions.keys():
generator_losses["amplitude"] = self.criterions["amplitude"](
logamp_gt, logamp_pred
)
generator_total_loss += generator_losses["amplitude"]
if "phase" in self.criterions.keys():
generator_losses["phase"] = self.criterions["phase"](pha_gt, pha_pred)
generator_total_loss += generator_losses["phase"]
if "consistency" in self.criterions.keys():
generator_losses["consistency"] = self.criterions["consistency"](
rea_gt, rea_pred, rea_pred_final, imag_gt, imag_pred, imag_pred_final
)
generator_total_loss += generator_losses["consistency"]
self.accelerator.backward(generator_total_loss)
self.generator_optimizer.step()
# Get the total losses
total_loss = discriminator_total_loss + generator_total_loss
train_losses.update(discriminator_losses)
train_losses.update(generator_losses)
for key, _ in train_losses.items():
train_losses[key] = train_losses[key].item()
return total_loss.item(), train_losses
def _valid_epoch(self):
"""Testing epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
self.generator.eval()
for key, _ in self.discriminators.items():
self.discriminators[key].eval()
epoch_losses: dict = {}
epoch_total_loss: int = 0
for batch in tqdm(
self.valid_dataloader,
desc=f"Validating Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Get losses
total_loss, losses = self._valid_step(batch)
# Log info
for key, _ in losses.items():
self.accelerator.log(
{
"Step/Valid {} Loss".format(key): losses[key],
},
step=self.step,
)
if not epoch_losses:
epoch_losses = losses
else:
for key, value in losses.items():
epoch_losses[key] += value
epoch_total_loss += total_loss
# Get and log total losses
self.accelerator.wait_for_everyone()
epoch_total_loss = epoch_total_loss / len(self.valid_dataloader)
for key in epoch_losses.keys():
epoch_losses[key] = epoch_losses[key] / len(self.valid_dataloader)
return epoch_total_loss, epoch_losses
def _valid_step(self, data):
"""Testing forward step. Should return average loss of a sample over
one batch. Provoke ``_forward_step`` is recommended except for special case.
See ``_test_epoch`` for usage.
"""
# Init losses
valid_losses = {}
total_loss = 0
generator_losses = {}
generator_total_loss = 0
discriminator_losses = {}
discriminator_total_loss = 0
# Use feature inputs to get the predicted audio
mel_input = data["mel"]
audio_gt = data["audio"]
if self.cfg.preprocess.extract_amplitude_phase:
logamp_gt = data["logamp"]
pha_gt = data["pha"]
rea_gt = data["rea"]
imag_gt = data["imag"]
if self.cfg.preprocess.use_frame_pitch:
pitch_input = data["frame_pitch"]
if self.cfg.preprocess.use_frame_pitch:
pitch_input = pitch_input.float()
audio_pred = self.generator.forward(mel_input, pitch_input)
elif self.cfg.preprocess.extract_amplitude_phase:
(
logamp_pred,
pha_pred,
rea_pred,
imag_pred,
audio_pred,
) = self.generator.forward(mel_input)
from utils.mel import amplitude_phase_spectrum
_, _, rea_pred_final, imag_pred_final = amplitude_phase_spectrum(
audio_pred.squeeze(1), self.cfg.preprocess
)
else:
audio_pred = self.generator.forward(mel_input)
# Get Discriminator losses
for key, _ in self.discriminators.items():
y_r, y_g, _, _ = self.discriminators[key].forward(
audio_gt.unsqueeze(1), audio_pred
)
(
discriminator_losses["{}_discriminator".format(key)],
_,
_,
) = self.criterions["discriminator"](y_r, y_g)
discriminator_total_loss += discriminator_losses[
"{}_discriminator".format(key)
]
for key, _ in self.discriminators.items():
y_r, y_g, f_r, f_g = self.discriminators[key].forward(
audio_gt.unsqueeze(1), audio_pred
)
generator_losses["{}_feature".format(key)] = self.criterions["feature"](
f_r, f_g
)
generator_losses["{}_generator".format(key)], _ = self.criterions[
"generator"
](y_g)
generator_total_loss += generator_losses["{}_feature".format(key)]
generator_total_loss += generator_losses["{}_generator".format(key)]
if "mel" in self.criterions.keys():
generator_losses["mel"] = self.criterions["mel"](audio_gt, audio_pred)
generator_total_loss += generator_losses["mel"]
if "mel" in self.criterions.keys():
generator_losses["mel"] = self.criterions["mel"](audio_gt, audio_pred)
generator_total_loss += generator_losses["mel"]
if "wav" in self.criterions.keys():
generator_losses["wav"] = self.criterions["wav"](audio_gt, audio_pred)
generator_total_loss += generator_losses["wav"]
if "wav" in self.criterions.keys():
generator_losses["wav"] = self.criterions["wav"](audio_gt, audio_pred)
generator_total_loss += generator_losses["wav"]
if "amplitude" in self.criterions.keys():
generator_losses["amplitude"] = self.criterions["amplitude"](
logamp_gt, logamp_pred
)
generator_total_loss += generator_losses["amplitude"]
if "phase" in self.criterions.keys():
generator_losses["phase"] = self.criterions["phase"](pha_gt, pha_pred)
generator_total_loss += generator_losses["phase"]
if "consistency" in self.criterions.keys():
generator_losses["consistency"] = self.criterions["consistency"](
rea_gt,
rea_pred,
rea_pred_final,
imag_gt,
imag_pred,
imag_pred_final,
)
generator_total_loss += generator_losses["consistency"]
total_loss = discriminator_total_loss + generator_total_loss
valid_losses.update(discriminator_losses)
valid_losses.update(generator_losses)
for item in valid_losses:
valid_losses[item] = valid_losses[item].item()
return total_loss.item(), valid_losses
def _inference(self, eval_mel, eval_pitch=None, use_pitch=False):
"""Inference during training for test audios."""
if use_pitch:
eval_pitch = align_length(eval_pitch, eval_mel.shape[1])
eval_audio = vocoder_inference(
self.cfg,
self.generator,
torch.from_numpy(eval_mel).unsqueeze(0),
f0s=torch.from_numpy(eval_pitch).unsqueeze(0).float(),
device=next(self.generator.parameters()).device,
).squeeze(0)
else:
eval_audio = vocoder_inference(
self.cfg,
self.generator,
torch.from_numpy(eval_mel).unsqueeze(0),
device=next(self.generator.parameters()).device,
).squeeze(0)
return eval_audio
def _load_model(self, checkpoint_dir, checkpoint_path=None, resume_type="resume"):
"""Load model from checkpoint. If checkpoint_path is None, it will
load the latest checkpoint in checkpoint_dir. If checkpoint_path is not
None, it will load the checkpoint specified by checkpoint_path. **Only use this
method after** ``accelerator.prepare()``.
"""
if checkpoint_path is None:
ls = [str(i) for i in Path(checkpoint_dir).glob("*")]
ls.sort(key=lambda x: int(x.split("_")[-3].split("-")[-1]), reverse=True)
checkpoint_path = ls[0]
if resume_type == "resume":
self.accelerator.load_state(checkpoint_path)
self.epoch = int(checkpoint_path.split("_")[-3].split("-")[-1]) + 1
self.step = int(checkpoint_path.split("_")[-2].split("-")[-1]) + 1
elif resume_type == "finetune":
accelerate.load_checkpoint_and_dispatch(
self.accelerator.unwrap_model(self.generator),
os.path.join(checkpoint_path, "pytorch_model.bin"),
)
for key, _ in self.discriminators.items():
accelerate.load_checkpoint_and_dispatch(
self.accelerator.unwrap_model(self.discriminators[key]),
os.path.join(checkpoint_path, "pytorch_model.bin"),
)
self.logger.info("Load model weights for finetune SUCCESS!")
else:
raise ValueError("Unsupported resume type: {}".format(resume_type))
return checkpoint_path
def _count_parameters(self):
result = sum(p.numel() for p in self.generator.parameters())
for _, discriminator in self.discriminators.items():
result += sum(p.numel() for p in discriminator.parameters())
return result
class DiffusionVocoderTrainer(VocoderTrainer):
def __init__(self, args, cfg):
super().__init__()
self.args = args
self.cfg = cfg
cfg.exp_name = args.exp_name
# Diffusion
self.cfg.model.diffwave.noise_schedule = np.linspace(
self.cfg.model.diffwave.noise_schedule_factors[0],
self.cfg.model.diffwave.noise_schedule_factors[1],
self.cfg.model.diffwave.noise_schedule_factors[2],
)
beta = np.array(self.cfg.model.diffwave.noise_schedule)
noise_level = np.cumprod(1 - beta)
self.noise_level = torch.tensor(noise_level.astype(np.float32))
# Init accelerator
self._init_accelerator()
self.accelerator.wait_for_everyone()
# Init logger
with self.accelerator.main_process_first():
self.logger = get_logger(args.exp_name, log_level=args.log_level)
self.logger.info("=" * 56)
self.logger.info("||\t\t" + "New training process started." + "\t\t||")
self.logger.info("=" * 56)
self.logger.info("\n")
self.logger.debug(f"Using {args.log_level.upper()} logging level.")
self.logger.info(f"Experiment name: {args.exp_name}")
self.logger.info(f"Experiment directory: {self.exp_dir}")
self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint")
if self.accelerator.is_main_process:
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}")
# Init training status
self.batch_count: int = 0
self.step: int = 0
self.epoch: int = 0
self.max_epoch = (
self.cfg.train.max_epoch if self.cfg.train.max_epoch > 0 else float("inf")
)
self.logger.info(
"Max epoch: {}".format(
self.max_epoch if self.max_epoch < float("inf") else "Unlimited"
)
)
# Check potential erorrs
if self.accelerator.is_main_process:
self._check_basic_configs()
self.save_checkpoint_stride = self.cfg.train.save_checkpoint_stride
self.checkpoints_path = [
[] for _ in range(len(self.save_checkpoint_stride))
]
self.run_eval = self.cfg.train.run_eval
# Set random seed
with self.accelerator.main_process_first():
start = time.monotonic_ns()
self._set_random_seed(self.cfg.train.random_seed)
end = time.monotonic_ns()
self.logger.debug(
f"Setting random seed done in {(end - start) / 1e6:.2f}ms"
)
self.logger.debug(f"Random seed: {self.cfg.train.random_seed}")
# Build dataloader
with self.accelerator.main_process_first():
self.logger.info("Building dataset...")
start = time.monotonic_ns()
self.train_dataloader, self.valid_dataloader = self._build_dataloader()
end = time.monotonic_ns()
self.logger.info(f"Building dataset done in {(end - start) / 1e6:.2f}ms")
# Build model
with self.accelerator.main_process_first():
self.logger.info("Building model...")
start = time.monotonic_ns()
self.model = self._build_model()
end = time.monotonic_ns()
self.logger.debug(self.model)
self.logger.info(f"Building model done in {(end - start) / 1e6:.2f}ms")
self.logger.info(f"Model parameters: {self._count_parameters()/1e6:.2f}M")
# Build optimizers and schedulers
with self.accelerator.main_process_first():
self.logger.info("Building optimizer and scheduler...")
start = time.monotonic_ns()
self.optimizer = self._build_optimizer()
self.scheduler = self._build_scheduler()
end = time.monotonic_ns()
self.logger.info(
f"Building optimizer and scheduler done in {(end - start) / 1e6:.2f}ms"
)
# Accelerator preparing
self.logger.info("Initializing accelerate...")
start = time.monotonic_ns()
(
self.train_dataloader,
self.valid_dataloader,
self.model,
self.optimizer,
self.scheduler,
) = self.accelerator.prepare(
self.train_dataloader,
self.valid_dataloader,
self.model,
self.optimizer,
self.scheduler,
)
end = time.monotonic_ns()
self.logger.info(f"Initializing accelerate done in {(end - start) / 1e6:.2f}ms")
# Build criterions
with self.accelerator.main_process_first():
self.logger.info("Building criterion...")
start = time.monotonic_ns()
self.criterion = self._build_criterion()
end = time.monotonic_ns()
self.logger.info(f"Building criterion done in {(end - start) / 1e6:.2f}ms")
# Resume checkpoints
with self.accelerator.main_process_first():
if args.resume_type:
self.logger.info("Resuming from checkpoint...")
start = time.monotonic_ns()
ckpt_path = Path(args.checkpoint)
if self._is_valid_pattern(ckpt_path.parts[-1]):
ckpt_path = self._load_model(
None, args.checkpoint, args.resume_type
)
else:
ckpt_path = self._load_model(
args.checkpoint, resume_type=args.resume_type
)
end = time.monotonic_ns()
self.logger.info(
f"Resuming from checkpoint done in {(end - start) / 1e6:.2f}ms"
)
self.checkpoints_path = json.load(
open(os.path.join(ckpt_path, "ckpts.json"), "r")
)
self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint")
if self.accelerator.is_main_process:
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}")
# Save config
self.config_save_path = os.path.join(self.exp_dir, "args.json")
# Device
self.device = next(self.model.parameters()).device
self.noise_level = self.noise_level.to(self.device)
def _build_dataset(self):
return DiffusionVocoderDataset, DiffusionVocoderCollator
def _build_criterion(self):
criterion = nn.L1Loss()
return criterion
def _build_model(self):
model = supported_models[self.cfg.model.generator](self.cfg)
return model
def _build_optimizer(self):
optimizer = AdamW(
self.model.parameters(),
lr=self.cfg.train.adamw.lr,
betas=(self.cfg.train.adamw.adam_b1, self.cfg.train.adamw.adam_b2),
)
return optimizer
def _build_scheduler(self):
scheduler = ExponentialLR(
self.optimizer,
gamma=self.cfg.train.exponential_lr.lr_decay,
last_epoch=self.epoch - 1,
)
return scheduler
def train_loop(self):
"""Training process"""
self.accelerator.wait_for_everyone()
# Dump config
if self.accelerator.is_main_process:
self._dump_cfg(self.config_save_path)
self.model.train()
self.optimizer.zero_grad()
# Sync and start training
self.accelerator.wait_for_everyone()
while self.epoch < self.max_epoch:
self.logger.info("\n")
self.logger.info("-" * 32)
self.logger.info("Epoch {}: ".format(self.epoch))
# Train and Validate
train_total_loss = self._train_epoch()
valid_total_loss = self._valid_epoch()
self.accelerator.log(
{
"Epoch/Train Total Loss": train_total_loss,
"Epoch/Valid Total Loss": valid_total_loss,
},
step=self.epoch,
)
# Update scheduler
self.accelerator.wait_for_everyone()
self.scheduler.step()
# Check save checkpoint interval
run_eval = False
if self.accelerator.is_main_process:
save_checkpoint = False
for i, num in enumerate(self.save_checkpoint_stride):
if self.epoch % num == 0:
save_checkpoint = True
run_eval |= self.run_eval[i]
# Save checkpoints
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process and save_checkpoint:
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_total_loss
),
)
self.accelerator.save_state(path)
json.dump(
self.checkpoints_path,
open(os.path.join(path, "ckpts.json"), "w"),
ensure_ascii=False,
indent=4,
)
# Save eval audios
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process and run_eval:
for i in range(len(self.valid_dataloader.dataset.eval_audios)):
if self.cfg.preprocess.use_frame_pitch:
eval_audio = self._inference(
self.valid_dataloader.dataset.eval_mels[i],
eval_pitch=self.valid_dataloader.dataset.eval_pitchs[i],
use_pitch=True,
)
else:
eval_audio = self._inference(
self.valid_dataloader.dataset.eval_mels[i]
)
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}_eval_audio_{}.wav".format(
self.epoch,
self.step,
valid_total_loss,
self.valid_dataloader.dataset.eval_dataset_names[i],
),
)
path_gt = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}_eval_audio_{}_gt.wav".format(
self.epoch,
self.step,
valid_total_loss,
self.valid_dataloader.dataset.eval_dataset_names[i],
),
)
save_audio(path, eval_audio, self.cfg.preprocess.sample_rate)
save_audio(
path_gt,
self.valid_dataloader.dataset.eval_audios[i],
self.cfg.preprocess.sample_rate,
)
self.accelerator.wait_for_everyone()
self.epoch += 1
# Finish training
self.accelerator.wait_for_everyone()
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_total_loss
),
)
self.accelerator.save_state(path)
def _train_epoch(self):
"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
self.model.train()
epoch_total_loss: int = 0
for batch in tqdm(
self.train_dataloader,
desc=f"Training Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Get losses
total_loss = self._train_step(batch)
self.batch_count += 1
# Log info
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
self.accelerator.log(
{
"Step/Learning Rate": self.optimizer.param_groups[0]["lr"],
},
step=self.step,
)
epoch_total_loss += total_loss
self.step += 1
# Get and log total losses
self.accelerator.wait_for_everyone()
epoch_total_loss = (
epoch_total_loss
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
return epoch_total_loss
def _train_step(self, data):
"""Training forward step. Should return average loss of a sample over
one batch. Provoke ``_forward_step`` is recommended except for special case.
See ``_train_epoch`` for usage.
"""
# Init losses
total_loss = 0
# Use input feature to get predictions
mel_input = data["mel"]
audio_gt = data["audio"]
if self.cfg.preprocess.use_frame_pitch:
pitch_input = data["frame_pitch"]
self.optimizer.zero_grad()
N = audio_gt.shape[0]
t = torch.randint(
0, len(self.cfg.model.diffwave.noise_schedule), [N], device=self.device
)
noise_scale = self.noise_level[t].unsqueeze(1)
noise_scale_sqrt = noise_scale**0.5
noise = torch.randn_like(audio_gt).to(self.device)
noisy_audio = noise_scale_sqrt * audio_gt + (1.0 - noise_scale) ** 0.5 * noise
audio_pred = self.model(noisy_audio, t, mel_input)
total_loss = self.criterion(noise, audio_pred.squeeze(1))
self.accelerator.backward(total_loss)
self.optimizer.step()
return total_loss.item()
def _valid_epoch(self):
"""Testing epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
self.model.eval()
epoch_total_loss: int = 0
for batch in tqdm(
self.valid_dataloader,
desc=f"Validating Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Get losses
total_loss = self._valid_step(batch)
# Log info
epoch_total_loss += total_loss
# Get and log total losses
self.accelerator.wait_for_everyone()
epoch_total_loss = epoch_total_loss / len(self.valid_dataloader)
return epoch_total_loss
def _valid_step(self, data):
"""Testing forward step. Should return average loss of a sample over
one batch. Provoke ``_forward_step`` is recommended except for special case.
See ``_test_epoch`` for usage.
"""
# Init losses
total_loss = 0
# Use feature inputs to get the predicted audio
mel_input = data["mel"]
audio_gt = data["audio"]
if self.cfg.preprocess.use_frame_pitch:
pitch_input = data["frame_pitch"]
N = audio_gt.shape[0]
t = torch.randint(
0, len(self.cfg.model.diffwave.noise_schedule), [N], device=self.device
)
noise_scale = self.noise_level[t].unsqueeze(1)
noise_scale_sqrt = noise_scale**0.5
noise = torch.randn_like(audio_gt)
noisy_audio = noise_scale_sqrt * audio_gt + (1.0 - noise_scale) ** 0.5 * noise
audio_pred = self.model(noisy_audio, t, mel_input)
total_loss = self.criterion(noise, audio_pred.squeeze(1))
return total_loss.item()
def _inference(self, eval_mel, eval_pitch=None, use_pitch=False):
"""Inference during training for test audios."""
if use_pitch:
eval_pitch = align_length(eval_pitch, eval_mel.shape[1])
eval_audio = vocoder_inference(
self.cfg,
self.model,
torch.from_numpy(eval_mel).unsqueeze(0),
f0s=torch.from_numpy(eval_pitch).unsqueeze(0).float(),
device=next(self.model.parameters()).device,
).squeeze(0)
else:
eval_audio = vocoder_inference(
self.cfg,
self.model,
torch.from_numpy(eval_mel).unsqueeze(0),
device=next(self.model.parameters()).device,
).squeeze(0)
return eval_audio
def _load_model(self, checkpoint_dir, checkpoint_path=None, resume_type="resume"):
"""Load model from checkpoint. If checkpoint_path is None, it will
load the latest checkpoint in checkpoint_dir. If checkpoint_path is not
None, it will load the checkpoint specified by checkpoint_path. **Only use this
method after** ``accelerator.prepare()``.
"""
if checkpoint_path is None:
ls = [str(i) for i in Path(checkpoint_dir).glob("*")]
ls.sort(key=lambda x: int(x.split("_")[-3].split("-")[-1]), reverse=True)
checkpoint_path = ls[0]
if resume_type == "resume":
self.accelerator.load_state(checkpoint_path)
self.epoch = int(checkpoint_path.split("_")[-3].split("-")[-1]) + 1
self.step = int(checkpoint_path.split("_")[-2].split("-")[-1]) + 1
elif resume_type == "finetune":
accelerate.load_checkpoint_and_dispatch(
self.accelerator.unwrap_model(self.model),
os.path.join(checkpoint_path, "pytorch_model.bin"),
)
self.logger.info("Load model weights for finetune SUCCESS!")
else:
raise ValueError("Unsupported resume type: {}".format(resume_type))
return checkpoint_path
def _count_parameters(self):
result = sum(p.numel() for p in self.model.parameters())
return result
def build_trainer(args, cfg):
supported_trainer = {
"GANVocoder": GANVocoderTrainer,
"DiffusionVocoder": DiffusionVocoderTrainer,
}
trainer_class = supported_trainer[cfg.model_type]
trainer = trainer_class(args, cfg)
return trainer | null |
17,510 | import argparse
import torch
from models.vocoders.gan.gan_vocoder_trainer import GANVocoderTrainer
from models.vocoders.diffusion.diffusion_vocoder_trainer import DiffusionVocoderTrainer
from utils.util import load_config
def cuda_relevant(deterministic=False):
torch.cuda.empty_cache()
# TF32 on Ampere and above
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.allow_tf32 = True
# Deterministic
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
torch.use_deterministic_algorithms(deterministic) | null |
17,511 | import argparse
import os
import torch
from models.vocoders.vocoder_inference import VocoderInference
from utils.util import load_config
class VocoderInference(object):
def __init__(self, args=None, cfg=None, infer_type="from_dataset"):
super().__init__()
start = time.monotonic_ns()
self.args = args
self.cfg = cfg
self.infer_type = infer_type
# Init accelerator
self.accelerator = accelerate.Accelerator()
self.accelerator.wait_for_everyone()
# Get logger
with self.accelerator.main_process_first():
self.logger = get_logger("inference", log_level=args.log_level)
# Log some info
self.logger.info("=" * 56)
self.logger.info("||\t\t" + "New inference process started." + "\t\t||")
self.logger.info("=" * 56)
self.logger.info("\n")
self.vocoder_dir = args.vocoder_dir
self.logger.debug(f"Vocoder dir: {args.vocoder_dir}")
os.makedirs(args.output_dir, exist_ok=True)
if os.path.exists(os.path.join(args.output_dir, "pred")):
shutil.rmtree(os.path.join(args.output_dir, "pred"))
if os.path.exists(os.path.join(args.output_dir, "gt")):
shutil.rmtree(os.path.join(args.output_dir, "gt"))
os.makedirs(os.path.join(args.output_dir, "pred"), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, "gt"), exist_ok=True)
# Set random seed
with self.accelerator.main_process_first():
start = time.monotonic_ns()
self._set_random_seed(self.cfg.train.random_seed)
end = time.monotonic_ns()
self.logger.debug(
f"Setting random seed done in {(end - start) / 1e6:.2f}ms"
)
self.logger.debug(f"Random seed: {self.cfg.train.random_seed}")
# Setup inference mode
if self.infer_type == "infer_from_dataset":
self.cfg.dataset = self.args.infer_datasets
elif self.infer_type == "infer_from_feature":
self._build_tmp_dataset_from_feature()
self.cfg.dataset = ["tmp"]
elif self.infer_type == "infer_from_audio":
self._build_tmp_dataset_from_audio()
self.cfg.dataset = ["tmp"]
# Setup data loader
with self.accelerator.main_process_first():
self.logger.info("Building dataset...")
start = time.monotonic_ns()
self.test_dataloader = self._build_dataloader()
end = time.monotonic_ns()
self.logger.info(f"Building dataset done in {(end - start) / 1e6:.2f}ms")
# Build model
with self.accelerator.main_process_first():
self.logger.info("Building model...")
start = time.monotonic_ns()
self.model = self._build_model()
end = time.monotonic_ns()
self.logger.info(f"Building model done in {(end - start) / 1e6:.3f}ms")
# Init with accelerate
self.logger.info("Initializing accelerate...")
start = time.monotonic_ns()
self.accelerator = accelerate.Accelerator()
(self.model, self.test_dataloader) = self.accelerator.prepare(
self.model, self.test_dataloader
)
end = time.monotonic_ns()
self.accelerator.wait_for_everyone()
self.logger.info(f"Initializing accelerate done in {(end - start) / 1e6:.3f}ms")
with self.accelerator.main_process_first():
self.logger.info("Loading checkpoint...")
start = time.monotonic_ns()
if os.path.isdir(args.vocoder_dir):
if os.path.isdir(os.path.join(args.vocoder_dir, "checkpoint")):
self._load_model(os.path.join(args.vocoder_dir, "checkpoint"))
else:
self._load_model(os.path.join(args.vocoder_dir))
else:
self._load_model(os.path.join(args.vocoder_dir))
end = time.monotonic_ns()
self.logger.info(f"Loading checkpoint done in {(end - start) / 1e6:.3f}ms")
self.model.eval()
self.accelerator.wait_for_everyone()
def _build_tmp_dataset_from_feature(self):
if os.path.exists(os.path.join(self.cfg.preprocess.processed_dir, "tmp")):
shutil.rmtree(os.path.join(self.cfg.preprocess.processed_dir, "tmp"))
utts = []
mels = glob(os.path.join(self.args.feature_folder, "mels", "*.npy"))
for i, mel in enumerate(mels):
uid = mel.split("/")[-1].split(".")[0]
utt = {"Dataset": "tmp", "Uid": uid, "index": i}
utts.append(utt)
os.makedirs(os.path.join(self.cfg.preprocess.processed_dir, "tmp"))
with open(
os.path.join(self.cfg.preprocess.processed_dir, "tmp", "test.json"), "w"
) as f:
json.dump(utts, f)
meta_info = {"dataset": "tmp", "test": {"size": len(utts)}}
with open(
os.path.join(self.cfg.preprocess.processed_dir, "tmp", "meta_info.json"),
"w",
) as f:
json.dump(meta_info, f)
features = glob(os.path.join(self.args.feature_folder, "*"))
for feature in features:
feature_name = feature.split("/")[-1]
if os.path.isfile(feature):
continue
shutil.copytree(
os.path.join(self.args.feature_folder, feature_name),
os.path.join(self.cfg.preprocess.processed_dir, "tmp", feature_name),
)
def _build_tmp_dataset_from_audio(self):
if os.path.exists(os.path.join(self.cfg.preprocess.processed_dir, "tmp")):
shutil.rmtree(os.path.join(self.cfg.preprocess.processed_dir, "tmp"))
utts = []
audios = glob(os.path.join(self.args.audio_folder, "*"))
for i, audio in enumerate(audios):
uid = audio.split("/")[-1].split(".")[0]
utt = {"Dataset": "tmp", "Uid": uid, "index": i, "Path": audio}
utts.append(utt)
os.makedirs(os.path.join(self.cfg.preprocess.processed_dir, "tmp"))
with open(
os.path.join(self.cfg.preprocess.processed_dir, "tmp", "test.json"), "w"
) as f:
json.dump(utts, f)
meta_info = {"dataset": "tmp", "test": {"size": len(utts)}}
with open(
os.path.join(self.cfg.preprocess.processed_dir, "tmp", "meta_info.json"),
"w",
) as f:
json.dump(meta_info, f)
from processors import acoustic_extractor
acoustic_extractor.extract_utt_acoustic_features_serial(
utts, os.path.join(self.cfg.preprocess.processed_dir, "tmp"), self.cfg
)
def _build_test_dataset(self):
return VocoderDataset, VocoderCollator
def _build_model(self):
model = _vocoders[self.cfg.model.generator](self.cfg)
return model
def _build_dataloader(self):
"""Build dataloader which merges a series of datasets."""
Dataset, Collator = self._build_test_dataset()
datasets_list = []
for dataset in self.cfg.dataset:
subdataset = Dataset(self.cfg, dataset, is_valid=True)
datasets_list.append(subdataset)
test_dataset = VocoderConcatDataset(datasets_list, full_audio_inference=False)
test_collate = Collator(self.cfg)
test_batch_size = min(self.cfg.inference.batch_size, len(test_dataset))
test_dataloader = DataLoader(
test_dataset,
collate_fn=test_collate,
num_workers=1,
batch_size=test_batch_size,
shuffle=False,
)
self.test_batch_size = test_batch_size
self.test_dataset = test_dataset
return test_dataloader
def _load_model(self, checkpoint_dir, from_multi_gpu=False):
"""Load model from checkpoint. If a folder is given, it will
load the latest checkpoint in checkpoint_dir. If a path is given
it will load the checkpoint specified by checkpoint_path.
**Only use this method after** ``accelerator.prepare()``.
"""
if os.path.isdir(checkpoint_dir):
if "epoch" in checkpoint_dir and "step" in checkpoint_dir:
checkpoint_path = checkpoint_dir
else:
# Load the latest accelerator state dicts
ls = [
str(i)
for i in Path(checkpoint_dir).glob("*")
if not "audio" in str(i)
]
ls.sort(
key=lambda x: int(x.split("/")[-1].split("_")[0].split("-")[-1]),
reverse=True,
)
checkpoint_path = ls[0]
accelerate.load_checkpoint_and_dispatch(
self.accelerator.unwrap_model(self.model),
os.path.join(checkpoint_path, "pytorch_model.bin"),
)
return str(checkpoint_path)
else:
# Load old .pt checkpoints
if self.cfg.model.generator in [
"bigvgan",
"hifigan",
"melgan",
"nsfhifigan",
]:
ckpt = torch.load(
checkpoint_dir,
map_location=(
torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
),
)
if from_multi_gpu:
pretrained_generator_dict = ckpt["generator_state_dict"]
generator_dict = self.model.state_dict()
new_generator_dict = {
k.split("module.")[-1]: v
for k, v in pretrained_generator_dict.items()
if (
k.split("module.")[-1] in generator_dict
and v.shape == generator_dict[k.split("module.")[-1]].shape
)
}
generator_dict.update(new_generator_dict)
self.model.load_state_dict(generator_dict)
else:
self.model.load_state_dict(ckpt["generator_state_dict"])
else:
self.model.load_state_dict(torch.load(checkpoint_dir)["state_dict"])
return str(checkpoint_dir)
def inference(self):
"""Inference via batches"""
for i, batch in tqdm(enumerate(self.test_dataloader)):
if self.cfg.preprocess.use_frame_pitch:
audio_pred = _vocoder_forward_funcs[self.cfg.model.generator](
self.cfg,
self.model,
batch["mel"].transpose(-1, -2),
f0s=batch["frame_pitch"].float(),
device=next(self.model.parameters()).device,
)
else:
audio_pred = _vocoder_forward_funcs[self.cfg.model.generator](
self.cfg,
self.model,
batch["mel"].transpose(-1, -2),
device=next(self.model.parameters()).device,
)
audio_ls = audio_pred.chunk(self.test_batch_size)
audio_gt_ls = batch["audio"].cpu().chunk(self.test_batch_size)
length_ls = batch["target_len"].cpu().chunk(self.test_batch_size)
j = 0
for it, it_gt, l in zip(audio_ls, audio_gt_ls, length_ls):
l = l.item()
it = it.squeeze(0).squeeze(0)[: l * self.cfg.preprocess.hop_size]
it_gt = it_gt.squeeze(0)[: l * self.cfg.preprocess.hop_size]
uid = self.test_dataset.metadata[i * self.test_batch_size + j]["Uid"]
save_audio(
os.path.join(self.args.output_dir, "pred", "{}.wav").format(uid),
it,
self.cfg.preprocess.sample_rate,
)
save_audio(
os.path.join(self.args.output_dir, "gt", "{}.wav").format(uid),
it_gt,
self.cfg.preprocess.sample_rate,
)
j += 1
if os.path.exists(os.path.join(self.cfg.preprocess.processed_dir, "tmp")):
shutil.rmtree(os.path.join(self.cfg.preprocess.processed_dir, "tmp"))
def _set_random_seed(self, seed):
"""Set random seed for all possible random modules."""
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
def _count_parameters(self, model):
return sum(p.numel() for p in model.parameters())
def _dump_cfg(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
json5.dump(
self.cfg,
open(path, "w"),
indent=4,
sort_keys=True,
ensure_ascii=False,
quote_keys=True,
)
def build_inference(args, cfg, infer_type="infer_from_dataset"):
supported_inference = {
"GANVocoder": VocoderInference,
"DiffusionVocoder": VocoderInference,
}
inference_class = supported_inference[cfg.model_type]
return inference_class(args, cfg, infer_type) | null |
17,512 | import argparse
import os
import torch
from models.vocoders.vocoder_inference import VocoderInference
from utils.util import load_config
def cuda_relevant(deterministic=False):
torch.cuda.empty_cache()
# TF32 on Ampere and above
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.allow_tf32 = True
# Deterministic
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
torch.use_deterministic_algorithms(deterministic) | null |
17,513 | import argparse
import os
import torch
from models.vocoders.vocoder_inference import VocoderInference
from utils.util import load_config
The provided code snippet includes necessary dependencies for implementing the `build_parser` function. Write a Python function `def build_parser()` to solve the following problem:
r"""Build argument parser for inference.py. Anything else should be put in an extra config YAML file.
Here is the function:
def build_parser():
r"""Build argument parser for inference.py.
Anything else should be put in an extra config YAML file.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
type=str,
required=True,
help="JSON/YAML file for configurations.",
)
parser.add_argument(
"--infer_mode",
type=str,
required=None,
)
parser.add_argument(
"--infer_datasets",
nargs="+",
default=None,
)
parser.add_argument(
"--feature_folder",
type=str,
default=None,
)
parser.add_argument(
"--audio_folder",
type=str,
default=None,
)
parser.add_argument(
"--vocoder_dir",
type=str,
required=True,
help="Vocoder checkpoint directory. Searching behavior is the same as "
"the acoustics one.",
)
parser.add_argument(
"--output_dir",
type=str,
default="result",
help="Output directory. Default: ./result",
)
parser.add_argument(
"--log_level",
type=str,
default="warning",
help="Logging level. Default: warning",
)
parser.add_argument(
"--keep_cache",
action="store_true",
default=False,
help="Keep cache files. Only applicable to inference from files.",
)
return parser | r"""Build argument parser for inference.py. Anything else should be put in an extra config YAML file. |
17,514 | import faulthandler
import os
import argparse
import json
import pyworld as pw
from multiprocessing import cpu_count
from utils.util import load_config
from preprocessors.processor import preprocess_dataset, prepare_align
from preprocessors.metadata import cal_metadata
from processors import acoustic_extractor, content_extractor, data_augment
def extract_acoustic_features(dataset, output_path, cfg, n_workers=1):
"""Extract acoustic features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
n_workers (int, optional): num of processes to extract features in parallel. Defaults to 1.
"""
types = ["train", "test"] if "eval" not in dataset else ["test"]
metadata = []
for dataset_type in types:
dataset_output = os.path.join(output_path, dataset)
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
acoustic_extractor.extract_utt_acoustic_features_serial(
metadata, dataset_output, cfg
)
def preprocess_dataset(
dataset, dataset_path, output_path, cfg, task_type, is_custom_dataset=False
):
"""Call specific function to handle specific dataset
Args:
dataset (str): name of a dataset, e.g. opencpop, m4singer
dataset_path (str): path to dataset
output_path (str): path to store preprocessing result files
"""
if is_custom_dataset:
if task_type == "svc":
customsvcdataset.main(output_path, dataset_path, dataset_name=dataset)
else:
raise NotImplementedError(
"Custom dataset for {} task not implemented!".format(cfg.task_type)
)
if re.match("opencpop*", dataset):
opencpop.main(dataset, output_path, dataset_path)
if dataset == "m4singer":
m4singer.main(output_path, dataset_path)
if dataset == "svcc":
svcc.main(output_path, dataset_path)
if dataset == "pjs":
pjs.main(output_path, dataset_path)
if dataset == "popbutfy":
popbutfy.main(output_path, dataset_path)
if dataset == "opensinger":
opensinger.main(output_path, dataset_path)
if dataset == "popcs":
popcs.main(output_path, dataset_path)
if dataset == "kising":
kising.main(output_path, dataset_path)
if dataset == "csd":
csd.main(output_path, dataset_path)
if dataset == "opera":
opera.main(output_path, dataset_path)
if dataset == "nus48e":
nus48e.main(output_path, dataset_path)
if dataset == "vctk":
vctk.main(output_path, dataset_path)
if dataset == "svcceval":
svcceval.main(output_path, dataset_path)
if dataset == "libritts":
libritts.main(output_path, dataset_path)
if dataset == "lijian":
lijian.main(output_path, dataset_path)
if dataset == "cdmusiceval":
cdmusiceval.main(output_path, dataset_path)
if dataset == "LJSpeech":
ljspeech.main(output_path, dataset_path, cfg)
if dataset == "ljspeech":
ljspeech_vocoder.main(output_path, dataset_path)
if dataset == "coco":
coco.main(output_path, dataset_path)
if dataset == "cocoeval":
cocoeval.main(output_path, dataset_path)
if dataset == "vocalist":
vocalist.main(output_path, dataset_path)
if dataset == "librilight":
librilight.main(output_path, dataset_path, cfg)
if dataset == "hifitts":
hifitts.main(output_path, dataset_path)
def cal_metadata(cfg, dataset_types=["train", "test"]):
"""
Dump metadata (singers.json, meta_info.json, utt2singer) for singer dataset or multi-datasets.
"""
from collections import Counter
datasets = cfg.dataset
print("-" * 10)
print("Preparing metadata...")
print("Including: \n{}\n".format("\n".join(datasets)))
datasets.sort()
for dataset in tqdm(datasets):
save_dir = os.path.join(cfg.preprocess.processed_dir, dataset)
assert os.path.exists(save_dir)
# 'train.json' and 'test.json' and 'valid.json' of target dataset
meta_info = dict()
utterances_dict = dict()
all_utterances = list()
duration = dict()
total_duration = 0.0
for dataset_type in dataset_types:
metadata = os.path.join(save_dir, "{}.json".format(dataset_type))
# Sort the metadata as the duration order
with open(metadata, "r", encoding="utf-8") as f:
utterances = json.load(f)
utterances = sorted(utterances, key=lambda x: x["Duration"])
utterances_dict[dataset_type] = utterances
all_utterances.extend(utterances)
# Write back the sorted metadata
with open(metadata, "w") as f:
json.dump(utterances, f, indent=4, ensure_ascii=False)
# Get the total duration and singer names for train and test utterances
duration[dataset_type] = sum(utt["Duration"] for utt in utterances)
total_duration += duration[dataset_type]
# Paths of metadata needed to be generated
singer_dict_file = os.path.join(save_dir, cfg.preprocess.spk2id)
utt2singer_file = os.path.join(save_dir, cfg.preprocess.utt2spk)
singer_names = set(
f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
for utt in all_utterances
)
# Write the utt2singer file and sort the singer names
with open(utt2singer_file, "w", encoding="utf-8") as f:
for utt in all_utterances:
f.write(
f"{utt['Dataset']}_{utt['Uid']}\t{replace_augment_name(utt['Dataset'])}_{utt['Singer']}\n"
)
singer_names = sorted(singer_names)
singer_lut = {name: i for i, name in enumerate(singer_names)}
# dump singers.json
with open(singer_dict_file, "w", encoding="utf-8") as f:
json.dump(singer_lut, f, indent=4, ensure_ascii=False)
meta_info = {
"dataset": dataset,
"statistics": {
"size": len(all_utterances),
"hours": round(total_duration / 3600, 4),
},
}
for dataset_type in dataset_types:
meta_info[dataset_type] = {
"size": len(utterances_dict[dataset_type]),
"hours": round(duration[dataset_type] / 3600, 4),
}
meta_info["singers"] = {"size": len(singer_lut)}
# Use Counter to count the minutes for each singer
total_singer2mins = Counter()
training_singer2mins = Counter()
for dataset_type in dataset_types:
for utt in utterances_dict[dataset_type]:
k = f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
if dataset_type == "train":
training_singer2mins[k] += utt["Duration"] / 60
total_singer2mins[k] += utt["Duration"] / 60
training_singer2mins = dict(
sorted(training_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
training_singer2mins = {k: round(v, 2) for k, v in training_singer2mins.items()}
meta_info["singers"]["training_minutes"] = training_singer2mins
total_singer2mins = dict(
sorted(total_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
total_singer2mins = {k: round(v, 2) for k, v in total_singer2mins.items()}
meta_info["singers"]["minutes"] = total_singer2mins
with open(os.path.join(save_dir, "meta_info.json"), "w") as f:
json.dump(meta_info, f, indent=4, ensure_ascii=False)
for singer, min in training_singer2mins.items():
print(f"Speaker/Singer {singer}: {min} mins for training")
print("-" * 10, "\n")
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess(cfg, args)` to solve the following problem:
Proprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers
Here is the function:
def preprocess(cfg, args):
"""Proprocess raw data of single or multiple datasets (in cfg.dataset)
Args:
cfg (dict): dictionary that stores configurations
args (ArgumentParser): specify the configuration file and num_workers
"""
# Specify the output root path to save the processed data
output_path = cfg.preprocess.processed_dir
os.makedirs(output_path, exist_ok=True)
## Split train and test sets
for dataset in cfg.dataset:
print("Preprocess {}...".format(dataset))
preprocess_dataset(
dataset,
cfg.dataset_path[dataset],
output_path,
cfg.preprocess,
cfg.task_type,
is_custom_dataset=dataset in cfg.use_custom_dataset,
)
# Data augmentation: create new wav files with pitch shift, formant shift, equalizer, time stretch
try:
assert isinstance(
cfg.preprocess.data_augment, list
), "Please provide a list of datasets need to be augmented."
if len(cfg.preprocess.data_augment) > 0:
new_datasets_list = []
for dataset in cfg.preprocess.data_augment:
new_datasets = data_augment.augment_dataset(cfg, dataset)
new_datasets_list.extend(new_datasets)
cfg.dataset.extend(new_datasets_list)
print("Augmentation datasets: ", cfg.dataset)
except:
print("No Data Augmentation.")
# Dump metadata of datasets (singers, train/test durations, etc.)
cal_metadata(cfg)
## Prepare the acoustic features
for dataset in cfg.dataset:
# Skip augmented datasets which do not need to extract acoustic features
# We will copy acoustic features from the original dataset later
if (
"pitch_shift" in dataset
or "formant_shift" in dataset
or "equalizer" in dataset in dataset
):
continue
print(
"Extracting acoustic features for {} using {} workers ...".format(
dataset, args.num_workers
)
)
extract_acoustic_features(dataset, output_path, cfg, args.num_workers)
# Calculate the statistics of acoustic features
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
# Copy acoustic features for augmented datasets by creating soft-links
for dataset in cfg.dataset:
if "pitch_shift" in dataset:
src_dataset = dataset.replace("_pitch_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "formant_shift" in dataset:
src_dataset = dataset.replace("_formant_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "equalizer" in dataset:
src_dataset = dataset.replace("_equalizer", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
else:
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
print("Copying acoustic features for {}...".format(dataset))
acoustic_extractor.copy_acoustic_features(
metadata, dataset_dir, src_dataset_dir, cfg
)
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
if cfg.preprocess.extract_pitch:
acoustic_extractor.cal_pitch_statistics(dataset, output_path, cfg) | Proprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers |
17,515 | import argparse
import os
import torch
from models.tta.autoencoder.autoencoder_trainer import AutoencoderKLTrainer
from models.tta.ldm.audioldm_trainer import AudioLDMTrainer
from utils.util import load_config
class AutoencoderKLTrainer(BaseTrainer):
def __init__(self, args, cfg):
BaseTrainer.__init__(self, args, cfg)
self.cfg = cfg
self.save_config_file()
def build_dataset(self):
return AutoencoderKLDataset, AutoencoderKLCollator
def build_optimizer(self):
opt_ae = torch.optim.AdamW(self.model.parameters(), **self.cfg.train.adam)
opt_disc = torch.optim.AdamW(
self.criterion.discriminator.parameters(), **self.cfg.train.adam
)
optimizer = {"opt_ae": opt_ae, "opt_disc": opt_disc}
return optimizer
def build_data_loader(self):
Dataset, Collator = self.build_dataset()
# build dataset instance for each dataset and combine them by ConcatDataset
datasets_list = []
for dataset in self.cfg.dataset:
subdataset = Dataset(self.cfg, dataset, is_valid=False)
datasets_list.append(subdataset)
train_dataset = ConcatDataset(datasets_list)
train_collate = Collator(self.cfg)
# use batch_sampler argument instead of (sampler, shuffle, drop_last, batch_size)
train_loader = DataLoader(
train_dataset,
collate_fn=train_collate,
num_workers=self.args.num_workers,
batch_size=self.cfg.train.batch_size,
pin_memory=False,
)
if not self.cfg.train.ddp or self.args.local_rank == 0:
datasets_list = []
for dataset in self.cfg.dataset:
subdataset = Dataset(self.cfg, dataset, is_valid=True)
datasets_list.append(subdataset)
valid_dataset = ConcatDataset(datasets_list)
valid_collate = Collator(self.cfg)
valid_loader = DataLoader(
valid_dataset,
collate_fn=valid_collate,
num_workers=1,
batch_size=self.cfg.train.batch_size,
)
else:
raise NotImplementedError("DDP is not supported yet.")
# valid_loader = None
data_loader = {"train": train_loader, "valid": valid_loader}
return data_loader
# TODO: check it...
def build_scheduler(self):
return None
# return ReduceLROnPlateau(self.optimizer["opt_ae"], **self.cfg.train.lronPlateau)
def write_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar(key, value, self.step)
def write_valid_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar(key, value, self.step)
def build_criterion(self):
return AutoencoderLossWithDiscriminator(self.cfg.model.loss)
def get_state_dict(self):
if self.scheduler != None:
state_dict = {
"model": self.model.state_dict(),
"optimizer_ae": self.optimizer["opt_ae"].state_dict(),
"optimizer_disc": self.optimizer["opt_disc"].state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
else:
state_dict = {
"model": self.model.state_dict(),
"optimizer_ae": self.optimizer["opt_ae"].state_dict(),
"optimizer_disc": self.optimizer["opt_disc"].state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
return state_dict
def load_model(self, checkpoint):
self.step = checkpoint["step"]
self.epoch = checkpoint["epoch"]
self.model.load_state_dict(checkpoint["model"])
self.optimizer["opt_ae"].load_state_dict(checkpoint["optimizer_ae"])
self.optimizer["opt_disc"].load_state_dict(checkpoint["optimizer_disc"])
if self.scheduler != None:
self.scheduler.load_state_dict(checkpoint["scheduler"])
def build_model(self):
self.model = AutoencoderKL(self.cfg.model.autoencoderkl)
return self.model
# TODO: train step
def train_step(self, data):
global_step = self.step
optimizer_idx = global_step % 2
train_losses = {}
total_loss = 0
train_states = {}
inputs = data["melspec"].unsqueeze(1) # (B, 80, T) -> (B, 1, 80, T)
reconstructions, posterior = self.model(inputs)
# train_stats.update(stat)
train_losses = self.criterion(
inputs=inputs,
reconstructions=reconstructions,
posteriors=posterior,
optimizer_idx=optimizer_idx,
global_step=global_step,
last_layer=self.model.get_last_layer(),
split="train",
)
if optimizer_idx == 0:
total_loss = train_losses["loss"]
self.optimizer["opt_ae"].zero_grad()
total_loss.backward()
self.optimizer["opt_ae"].step()
else:
total_loss = train_losses["d_loss"]
self.optimizer["opt_disc"].zero_grad()
total_loss.backward()
self.optimizer["opt_disc"].step()
for item in train_losses:
train_losses[item] = train_losses[item].item()
return train_losses, train_states, total_loss.item()
# TODO: eval step
def eval_step(self, data, index):
valid_loss = {}
total_valid_loss = 0
valid_stats = {}
inputs = data["melspec"].unsqueeze(1) # (B, 80, T) -> (B, 1, 80, T)
reconstructions, posterior = self.model(inputs)
loss = F.l1_loss(inputs, reconstructions)
valid_loss["loss"] = loss
total_valid_loss += loss
for item in valid_loss:
valid_loss[item] = valid_loss[item].item()
return valid_loss, valid_stats, total_valid_loss.item()
class AudioLDMTrainer(BaseTrainer):
def __init__(self, args, cfg):
BaseTrainer.__init__(self, args, cfg)
self.cfg = cfg
self.build_autoencoderkl()
self.build_textencoder()
self.nosie_scheduler = self.build_noise_scheduler()
self.save_config_file()
def build_autoencoderkl(self):
self.autoencoderkl = AutoencoderKL(self.cfg.model.autoencoderkl)
self.autoencoder_path = self.cfg.model.autoencoder_path
checkpoint = torch.load(self.autoencoder_path, map_location="cpu")
self.autoencoderkl.load_state_dict(checkpoint["model"])
self.autoencoderkl.cuda(self.args.local_rank)
self.autoencoderkl.requires_grad_(requires_grad=False)
self.autoencoderkl.eval()
def build_textencoder(self):
self.text_encoder = T5EncoderModel.from_pretrained("t5-base")
self.text_encoder.cuda(self.args.local_rank)
self.text_encoder.requires_grad_(requires_grad=False)
self.text_encoder.eval()
def build_noise_scheduler(self):
nosie_scheduler = DDPMScheduler(
num_train_timesteps=self.cfg.model.noise_scheduler.num_train_timesteps,
beta_start=self.cfg.model.noise_scheduler.beta_start,
beta_end=self.cfg.model.noise_scheduler.beta_end,
beta_schedule=self.cfg.model.noise_scheduler.beta_schedule,
clip_sample=self.cfg.model.noise_scheduler.clip_sample,
# steps_offset=self.cfg.model.noise_scheduler.steps_offset,
# set_alpha_to_one=self.cfg.model.noise_scheduler.set_alpha_to_one,
# skip_prk_steps=self.cfg.model.noise_scheduler.skip_prk_steps,
prediction_type=self.cfg.model.noise_scheduler.prediction_type,
)
return nosie_scheduler
def build_dataset(self):
return AudioLDMDataset, AudioLDMCollator
def build_data_loader(self):
Dataset, Collator = self.build_dataset()
# build dataset instance for each dataset and combine them by ConcatDataset
datasets_list = []
for dataset in self.cfg.dataset:
subdataset = Dataset(self.cfg, dataset, is_valid=False)
datasets_list.append(subdataset)
train_dataset = ConcatDataset(datasets_list)
train_collate = Collator(self.cfg)
# use batch_sampler argument instead of (sampler, shuffle, drop_last, batch_size)
train_loader = DataLoader(
train_dataset,
collate_fn=train_collate,
num_workers=self.args.num_workers,
batch_size=self.cfg.train.batch_size,
pin_memory=False,
)
if not self.cfg.train.ddp or self.args.local_rank == 0:
datasets_list = []
for dataset in self.cfg.dataset:
subdataset = Dataset(self.cfg, dataset, is_valid=True)
datasets_list.append(subdataset)
valid_dataset = ConcatDataset(datasets_list)
valid_collate = Collator(self.cfg)
valid_loader = DataLoader(
valid_dataset,
collate_fn=valid_collate,
num_workers=1,
batch_size=self.cfg.train.batch_size,
)
else:
raise NotImplementedError("DDP is not supported yet.")
# valid_loader = None
data_loader = {"train": train_loader, "valid": valid_loader}
return data_loader
def build_optimizer(self):
optimizer = torch.optim.AdamW(self.model.parameters(), **self.cfg.train.adam)
return optimizer
# TODO: check it...
def build_scheduler(self):
return None
# return ReduceLROnPlateau(self.optimizer["opt_ae"], **self.cfg.train.lronPlateau)
def write_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar(key, value, self.step)
def write_valid_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar(key, value, self.step)
def build_criterion(self):
criterion = nn.MSELoss(reduction="mean")
return criterion
def get_state_dict(self):
if self.scheduler != None:
state_dict = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
else:
state_dict = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
return state_dict
def load_model(self, checkpoint):
self.step = checkpoint["step"]
self.epoch = checkpoint["epoch"]
self.model.load_state_dict(checkpoint["model"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scheduler != None:
self.scheduler.load_state_dict(checkpoint["scheduler"])
def build_model(self):
self.model = AudioLDM(self.cfg.model.audioldm)
return self.model
def mel_to_latent(self, melspec):
posterior = self.autoencoderkl.encode(melspec)
latent = posterior.sample() # (B, 4, 5, 78)
return latent
def get_text_embedding(self, text_input_ids, text_attention_mask):
text_embedding = self.text_encoder(
input_ids=text_input_ids, attention_mask=text_attention_mask
).last_hidden_state
return text_embedding # (B, T, 768)
def train_step(self, data):
train_losses = {}
total_loss = 0
train_stats = {}
melspec = data["melspec"].unsqueeze(1) # (B, 80, T) -> (B, 1, 80, T)
latents = self.mel_to_latent(melspec)
text_embedding = self.get_text_embedding(
data["text_input_ids"], data["text_attention_mask"]
)
noise = torch.randn_like(latents).float()
bsz = latents.shape[0]
timesteps = torch.randint(
0,
self.cfg.model.noise_scheduler.num_train_timesteps,
(bsz,),
device=latents.device,
)
timesteps = timesteps.long()
with torch.no_grad():
noisy_latents = self.nosie_scheduler.add_noise(latents, noise, timesteps)
model_pred = self.model(
noisy_latents, timesteps=timesteps, context=text_embedding
)
loss = self.criterion(model_pred, noise)
train_losses["loss"] = loss
total_loss += loss
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
for item in train_losses:
train_losses[item] = train_losses[item].item()
return train_losses, train_stats, total_loss.item()
# TODO: eval step
def eval_step(self, data, index):
valid_loss = {}
total_valid_loss = 0
valid_stats = {}
melspec = data["melspec"].unsqueeze(1) # (B, 80, T) -> (B, 1, 80, T)
latents = self.mel_to_latent(melspec)
text_embedding = self.get_text_embedding(
data["text_input_ids"], data["text_attention_mask"]
)
noise = torch.randn_like(latents).float()
bsz = latents.shape[0]
timesteps = torch.randint(
0,
self.cfg.model.noise_scheduler.num_train_timesteps,
(bsz,),
device=latents.device,
)
timesteps = timesteps.long()
noisy_latents = self.nosie_scheduler.add_noise(latents, noise, timesteps)
model_pred = self.model(noisy_latents, timesteps, text_embedding)
loss = self.criterion(model_pred, noise)
valid_loss["loss"] = loss
total_valid_loss += loss
for item in valid_loss:
valid_loss[item] = valid_loss[item].item()
return valid_loss, valid_stats, total_valid_loss.item()
def build_trainer(args, cfg):
supported_trainer = {
"AutoencoderKL": AutoencoderKLTrainer,
"AudioLDM": AudioLDMTrainer,
}
trainer_class = supported_trainer[cfg.model_type]
trainer = trainer_class(args, cfg)
return trainer | null |
17,516 | import argparse
from argparse import ArgumentParser
import os
from models.tta.ldm.audioldm_inference import AudioLDMInference
from utils.util import save_config, load_model_config, load_config
import numpy as np
import torch
class AudioLDMInference:
def __init__(self, args, cfg):
def build_autoencoderkl(self):
def build_textencoder(self):
def build_vocoder(self):
def build_model(self):
def load_state_dict(self):
def get_text_embedding(self):
def inference(self):
def build_inference(args, cfg):
supported_inference = {
"AudioLDM": AudioLDMInference,
}
inference_class = supported_inference[cfg.model_type]
inference = inference_class(args, cfg)
return inference | null |
17,517 | import argparse
from argparse import ArgumentParser
import os
from models.tta.ldm.audioldm_inference import AudioLDMInference
from utils.util import save_config, load_model_config, load_config
import numpy as np
import torch
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
type=str,
required=True,
help="JSON/YAML file for configurations.",
)
parser.add_argument(
"--text",
help="Text to be synthesized",
type=str,
default="Text to be synthesized.",
)
parser.add_argument(
"--checkpoint_path",
type=str,
)
parser.add_argument(
"--vocoder_path", type=str, help="Checkpoint path of the vocoder"
)
parser.add_argument(
"--vocoder_config_path", type=str, help="Config path of the vocoder"
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Output dir for saving generated results",
)
parser.add_argument(
"--num_steps",
type=int,
default=200,
help="The total number of denosing steps",
)
parser.add_argument(
"--guidance_scale",
type=float,
default=4.0,
help="The scale of classifer free guidance",
)
parser.add_argument("--local_rank", default=-1, type=int)
return parser | null |
17,518 | import faulthandler
import os
import argparse
import json
import pyworld as pw
from multiprocessing import cpu_count
from utils.util import load_config
from preprocessors.processor import preprocess_dataset, prepare_align
from preprocessors.metadata import cal_metadata
from processors import acoustic_extractor, content_extractor, data_augment
def extract_acoustic_features(dataset, output_path, cfg, n_workers=1):
"""Extract acoustic features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
n_workers (int, optional): num of processes to extract features in parallel. Defaults to 1.
"""
types = ["train", "test"] if "eval" not in dataset else ["test"]
metadata = []
for dataset_type in types:
dataset_output = os.path.join(output_path, dataset)
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
# acoustic_extractor.extract_utt_acoustic_features_parallel(
# metadata, dataset_output, cfg, n_workers=n_workers
# )
acoustic_extractor.extract_utt_acoustic_features_serial(
metadata, dataset_output, cfg
)
def extract_content_features(dataset, output_path, cfg, num_workers=1):
"""Extract content features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
"""
types = ["train", "test"] if "eval" not in dataset else ["test"]
metadata = []
for dataset_type in types:
dataset_output = os.path.join(output_path, dataset)
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
content_extractor.extract_utt_content_features_dataloader(
cfg, metadata, num_workers
)
def preprocess_dataset(
dataset, dataset_path, output_path, cfg, task_type, is_custom_dataset=False
):
"""Call specific function to handle specific dataset
Args:
dataset (str): name of a dataset, e.g. opencpop, m4singer
dataset_path (str): path to dataset
output_path (str): path to store preprocessing result files
"""
if is_custom_dataset:
if task_type == "svc":
customsvcdataset.main(output_path, dataset_path, dataset_name=dataset)
else:
raise NotImplementedError(
"Custom dataset for {} task not implemented!".format(cfg.task_type)
)
if re.match("opencpop*", dataset):
opencpop.main(dataset, output_path, dataset_path)
if dataset == "m4singer":
m4singer.main(output_path, dataset_path)
if dataset == "svcc":
svcc.main(output_path, dataset_path)
if dataset == "pjs":
pjs.main(output_path, dataset_path)
if dataset == "popbutfy":
popbutfy.main(output_path, dataset_path)
if dataset == "opensinger":
opensinger.main(output_path, dataset_path)
if dataset == "popcs":
popcs.main(output_path, dataset_path)
if dataset == "kising":
kising.main(output_path, dataset_path)
if dataset == "csd":
csd.main(output_path, dataset_path)
if dataset == "opera":
opera.main(output_path, dataset_path)
if dataset == "nus48e":
nus48e.main(output_path, dataset_path)
if dataset == "vctk":
vctk.main(output_path, dataset_path)
if dataset == "svcceval":
svcceval.main(output_path, dataset_path)
if dataset == "libritts":
libritts.main(output_path, dataset_path)
if dataset == "lijian":
lijian.main(output_path, dataset_path)
if dataset == "cdmusiceval":
cdmusiceval.main(output_path, dataset_path)
if dataset == "LJSpeech":
ljspeech.main(output_path, dataset_path, cfg)
if dataset == "ljspeech":
ljspeech_vocoder.main(output_path, dataset_path)
if dataset == "coco":
coco.main(output_path, dataset_path)
if dataset == "cocoeval":
cocoeval.main(output_path, dataset_path)
if dataset == "vocalist":
vocalist.main(output_path, dataset_path)
if dataset == "librilight":
librilight.main(output_path, dataset_path, cfg)
if dataset == "hifitts":
hifitts.main(output_path, dataset_path)
def prepare_align(dataset, dataset_path, cfg, output_path):
"""Call specific function to handle specific dataset
Args:
dataset (str): name of a dataset, e.g. ljspeech
dataset_path (str): path to dataset
output_path (str): path to store preprocessing result files
"""
if dataset == "LJSpeech":
ljspeech.prepare_align(dataset, dataset_path, cfg, output_path)
def cal_metadata(cfg, dataset_types=["train", "test"]):
"""
Dump metadata (singers.json, meta_info.json, utt2singer) for singer dataset or multi-datasets.
"""
from collections import Counter
datasets = cfg.dataset
print("-" * 10)
print("Preparing metadata...")
print("Including: \n{}\n".format("\n".join(datasets)))
datasets.sort()
for dataset in tqdm(datasets):
save_dir = os.path.join(cfg.preprocess.processed_dir, dataset)
assert os.path.exists(save_dir)
# 'train.json' and 'test.json' and 'valid.json' of target dataset
meta_info = dict()
utterances_dict = dict()
all_utterances = list()
duration = dict()
total_duration = 0.0
for dataset_type in dataset_types:
metadata = os.path.join(save_dir, "{}.json".format(dataset_type))
# Sort the metadata as the duration order
with open(metadata, "r", encoding="utf-8") as f:
utterances = json.load(f)
utterances = sorted(utterances, key=lambda x: x["Duration"])
utterances_dict[dataset_type] = utterances
all_utterances.extend(utterances)
# Write back the sorted metadata
with open(metadata, "w") as f:
json.dump(utterances, f, indent=4, ensure_ascii=False)
# Get the total duration and singer names for train and test utterances
duration[dataset_type] = sum(utt["Duration"] for utt in utterances)
total_duration += duration[dataset_type]
# Paths of metadata needed to be generated
singer_dict_file = os.path.join(save_dir, cfg.preprocess.spk2id)
utt2singer_file = os.path.join(save_dir, cfg.preprocess.utt2spk)
singer_names = set(
f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
for utt in all_utterances
)
# Write the utt2singer file and sort the singer names
with open(utt2singer_file, "w", encoding="utf-8") as f:
for utt in all_utterances:
f.write(
f"{utt['Dataset']}_{utt['Uid']}\t{replace_augment_name(utt['Dataset'])}_{utt['Singer']}\n"
)
singer_names = sorted(singer_names)
singer_lut = {name: i for i, name in enumerate(singer_names)}
# dump singers.json
with open(singer_dict_file, "w", encoding="utf-8") as f:
json.dump(singer_lut, f, indent=4, ensure_ascii=False)
meta_info = {
"dataset": dataset,
"statistics": {
"size": len(all_utterances),
"hours": round(total_duration / 3600, 4),
},
}
for dataset_type in dataset_types:
meta_info[dataset_type] = {
"size": len(utterances_dict[dataset_type]),
"hours": round(duration[dataset_type] / 3600, 4),
}
meta_info["singers"] = {"size": len(singer_lut)}
# Use Counter to count the minutes for each singer
total_singer2mins = Counter()
training_singer2mins = Counter()
for dataset_type in dataset_types:
for utt in utterances_dict[dataset_type]:
k = f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
if dataset_type == "train":
training_singer2mins[k] += utt["Duration"] / 60
total_singer2mins[k] += utt["Duration"] / 60
training_singer2mins = dict(
sorted(training_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
training_singer2mins = {k: round(v, 2) for k, v in training_singer2mins.items()}
meta_info["singers"]["training_minutes"] = training_singer2mins
total_singer2mins = dict(
sorted(total_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
total_singer2mins = {k: round(v, 2) for k, v in total_singer2mins.items()}
meta_info["singers"]["minutes"] = total_singer2mins
with open(os.path.join(save_dir, "meta_info.json"), "w") as f:
json.dump(meta_info, f, indent=4, ensure_ascii=False)
for singer, min in training_singer2mins.items():
print(f"Speaker/Singer {singer}: {min} mins for training")
print("-" * 10, "\n")
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess(cfg, args)` to solve the following problem:
Proprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers
Here is the function:
def preprocess(cfg, args):
"""Proprocess raw data of single or multiple datasets (in cfg.dataset)
Args:
cfg (dict): dictionary that stores configurations
args (ArgumentParser): specify the configuration file and num_workers
"""
# Specify the output root path to save the processed data
output_path = cfg.preprocess.processed_dir
os.makedirs(output_path, exist_ok=True)
## Split train and test sets
for dataset in cfg.dataset:
print("Preprocess {}...".format(dataset))
if args.prepare_alignment:
## Prepare alignment with MFA
print("Prepare alignment {}...".format(dataset))
prepare_align(
dataset, cfg.dataset_path[dataset], cfg.preprocess, output_path
)
preprocess_dataset(
dataset,
cfg.dataset_path[dataset],
output_path,
cfg.preprocess,
cfg.task_type,
is_custom_dataset=dataset in cfg.use_custom_dataset,
)
# Data augmentation: create new wav files with pitch shift, formant shift, equalizer, time stretch
try:
assert isinstance(
cfg.preprocess.data_augment, list
), "Please provide a list of datasets need to be augmented."
if len(cfg.preprocess.data_augment) > 0:
new_datasets_list = []
for dataset in cfg.preprocess.data_augment:
new_datasets = data_augment.augment_dataset(cfg, dataset)
new_datasets_list.extend(new_datasets)
cfg.dataset.extend(new_datasets_list)
print("Augmentation datasets: ", cfg.dataset)
except:
print("No Data Augmentation.")
# Dump metadata of datasets (singers, train/test durations, etc.)
cal_metadata(cfg)
## Prepare the acoustic features
for dataset in cfg.dataset:
# Skip augmented datasets which do not need to extract acoustic features
# We will copy acoustic features from the original dataset later
if (
"pitch_shift" in dataset
or "formant_shift" in dataset
or "equalizer" in dataset in dataset
):
continue
print(
"Extracting acoustic features for {} using {} workers ...".format(
dataset, args.num_workers
)
)
extract_acoustic_features(dataset, output_path, cfg, args.num_workers)
# Calculate the statistics of acoustic features
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
if cfg.preprocess.extract_pitch:
acoustic_extractor.cal_pitch_statistics(dataset, output_path, cfg)
if cfg.preprocess.extract_energy:
acoustic_extractor.cal_energy_statistics(dataset, output_path, cfg)
if cfg.preprocess.align_mel_duration:
acoustic_extractor.align_duration_mel(dataset, output_path, cfg)
# Copy acoustic features for augmented datasets by creating soft-links
for dataset in cfg.dataset:
if "pitch_shift" in dataset:
src_dataset = dataset.replace("_pitch_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "formant_shift" in dataset:
src_dataset = dataset.replace("_formant_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "equalizer" in dataset:
src_dataset = dataset.replace("_equalizer", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
else:
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
print("Copying acoustic features for {}...".format(dataset))
acoustic_extractor.copy_acoustic_features(
metadata, dataset_dir, src_dataset_dir, cfg
)
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
if cfg.preprocess.extract_pitch:
acoustic_extractor.cal_pitch_statistics(dataset, output_path, cfg)
# Prepare the content features
for dataset in cfg.dataset:
print("Extracting content features for {}...".format(dataset))
extract_content_features(dataset, output_path, cfg, args.num_workers) | Proprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers |
17,519 | import os
import sys
import numpy as np
import json
import argparse
import whisper
import torch
from glob import glob
from tqdm import tqdm
from collections import defaultdict
from evaluation.metrics.energy.energy_rmse import extract_energy_rmse
from evaluation.metrics.energy.energy_pearson_coefficients import (
extract_energy_pearson_coeffcients,
)
from evaluation.metrics.f0.f0_pearson_coefficients import extract_fpc
from evaluation.metrics.f0.f0_periodicity_rmse import extract_f0_periodicity_rmse
from evaluation.metrics.f0.f0_rmse import extract_f0rmse
from evaluation.metrics.f0.v_uv_f1 import extract_f1_v_uv
from evaluation.metrics.intelligibility.character_error_rate import extract_cer
from evaluation.metrics.intelligibility.word_error_rate import extract_wer
from evaluation.metrics.similarity.speaker_similarity import extract_similarity
from evaluation.metrics.spectrogram.frechet_distance import extract_fad
from evaluation.metrics.spectrogram.mel_cepstral_distortion import extract_mcd
from evaluation.metrics.spectrogram.multi_resolution_stft_distance import extract_mstft
from evaluation.metrics.spectrogram.pesq import extract_pesq
from evaluation.metrics.spectrogram.scale_invariant_signal_to_distortion_ratio import (
extract_si_sdr,
)
from evaluation.metrics.spectrogram.scale_invariant_signal_to_noise_ratio import (
extract_si_snr,
)
from evaluation.metrics.spectrogram.short_time_objective_intelligibility import (
extract_stoi,
)
METRIC_FUNC = {
"energy_rmse": extract_energy_rmse,
"energy_pc": extract_energy_pearson_coeffcients,
"fpc": extract_fpc,
"f0_periodicity_rmse": extract_f0_periodicity_rmse,
"f0rmse": extract_f0rmse,
"v_uv_f1": extract_f1_v_uv,
"cer": extract_cer,
"wer": extract_wer,
"similarity": extract_similarity,
"fad": extract_fad,
"mcd": extract_mcd,
"mstft": extract_mstft,
"pesq": extract_pesq,
"si_sdr": extract_si_sdr,
"si_snr": extract_si_snr,
"stoi": extract_stoi,
}
def calc_metric(
ref_dir,
deg_dir,
dump_dir,
metrics,
**kwargs,
):
result = defaultdict()
for metric in tqdm(metrics):
if metric in ["fad", "similarity"]:
result[metric] = str(METRIC_FUNC[metric](ref_dir, deg_dir, kwargs=kwargs))
continue
audios_ref = []
audios_deg = []
files = glob(deg_dir + "/*.wav")
for file in files:
audios_deg.append(file)
uid = file.split("/")[-1].split(".wav")[0]
file_gt = ref_dir + "/{}.wav".format(uid)
audios_ref.append(file_gt)
if metric in ["wer", "cer"] and kwargs["intelligibility_mode"] == "gt_content":
ltr_path = kwargs["ltr_path"]
tmpltrs = {}
with open(ltr_path, "r") as f:
for line in f:
paras = line.replace("\n", "").split("|")
paras[1] = paras[1].replace(" ", "")
paras[1] = paras[1].replace(".", "")
paras[1] = paras[1].replace("'", "")
paras[1] = paras[1].replace("-", "")
paras[1] = paras[1].replace(",", "")
paras[1] = paras[1].replace("!", "")
paras[1] = paras[1].lower()
tmpltrs[paras[0]] = paras[1]
ltrs = []
files = glob(ref_dir + "/*.wav")
for file in files:
ltrs.append(tmpltrs[os.path.basename(file)])
if metric in ["v_uv_f1"]:
tp_total = 0
fp_total = 0
fn_total = 0
for i in tqdm(range(len(audios_ref))):
audio_ref = audios_ref[i]
audio_deg = audios_deg[i]
tp, fp, fn = METRIC_FUNC[metric](audio_ref, audio_deg, kwargs=kwargs)
tp_total += tp
fp_total += fp
fn_total += fn
result[metric] = str(tp_total / (tp_total + (fp_total + fn_total) / 2))
else:
scores = []
for i in tqdm(range(len(audios_ref))):
audio_ref = audios_ref[i]
audio_deg = audios_deg[i]
if metric in ["wer", "cer"]:
model = whisper.load_model("large")
mode = kwargs["intelligibility_mode"]
if torch.cuda.is_available():
device = torch.device("cuda")
model = model.to(device)
if mode == "gt_audio":
kwargs["audio_ref"] = audio_ref
kwargs["audio_deg"] = audio_deg
score = METRIC_FUNC[metric](
model,
kwargs=kwargs,
)
elif mode == "gt_content":
kwargs["content_gt"] = ltrs[i]
kwargs["audio_deg"] = audio_deg
score = METRIC_FUNC[metric](
model,
kwargs=kwargs,
)
else:
score = METRIC_FUNC[metric](
audio_ref,
audio_deg,
kwargs=kwargs,
)
if not np.isnan(score):
scores.append(score)
scores = np.array(scores)
result["{}".format(metric)] = str(np.mean(scores))
data = json.dumps(result, indent=4)
with open(os.path.join(dump_dir, "result.json"), "w", newline="\n") as f:
f.write(data) | null |
17,520 | import argparse
import torch
from models.svc.diffusion.diffusion_trainer import DiffusionTrainer
from models.svc.comosvc.comosvc_trainer import ComoSVCTrainer
from models.svc.transformer.transformer_trainer import TransformerTrainer
from models.svc.vits.vits_trainer import VitsSVCTrainer
from utils.util import load_config
class DiffusionTrainer(SVCTrainer):
r"""The base trainer for all diffusion models. It inherits from SVCTrainer and
implements ``_build_model`` and ``_forward_step`` methods.
"""
def __init__(self, args=None, cfg=None):
SVCTrainer.__init__(self, args, cfg)
# Only for SVC tasks using diffusion
self.noise_scheduler = DDPMScheduler(
**self.cfg.model.diffusion.scheduler_settings,
)
self.diffusion_timesteps = (
self.cfg.model.diffusion.scheduler_settings.num_train_timesteps
)
### Following are methods only for diffusion models ###
def _build_model(self):
r"""Build the model for training. This function is called in ``__init__`` function."""
# TODO: sort out the config
self.cfg.model.condition_encoder.f0_min = self.cfg.preprocess.f0_min
self.cfg.model.condition_encoder.f0_max = self.cfg.preprocess.f0_max
self.condition_encoder = ConditionEncoder(self.cfg.model.condition_encoder)
self.acoustic_mapper = DiffusionWrapper(self.cfg)
model = torch.nn.ModuleList([self.condition_encoder, self.acoustic_mapper])
num_of_params_encoder = self.count_parameters(self.condition_encoder)
num_of_params_am = self.count_parameters(self.acoustic_mapper)
num_of_params = num_of_params_encoder + num_of_params_am
log = "Diffusion Model's Parameters: #Encoder is {:.2f}M, #Diffusion is {:.2f}M. The total is {:.2f}M".format(
num_of_params_encoder / 1e6, num_of_params_am / 1e6, num_of_params / 1e6
)
self.logger.info(log)
return model
def count_parameters(self, model):
model_param = 0.0
if isinstance(model, dict):
for key, value in model.items():
model_param += sum(p.numel() for p in model[key].parameters())
else:
model_param = sum(p.numel() for p in model.parameters())
return model_param
def _check_nan(self, batch, loss, y_pred, y_gt):
if torch.any(torch.isnan(loss)):
for k, v in batch.items():
self.logger.info(k)
self.logger.info(v)
super()._check_nan(loss, y_pred, y_gt)
def _forward_step(self, batch):
r"""Forward step for training and inference. This function is called
in ``_train_step`` & ``_test_step`` function.
"""
device = self.accelerator.device
if self.online_features_extraction:
# On-the-fly features extraction
batch = self._extract_svc_features(batch)
# To debug
# for k, v in batch.items():
# print(k, v.shape, v)
# exit()
mel_input = batch["mel"]
noise = torch.randn_like(mel_input, device=device, dtype=torch.float32)
batch_size = mel_input.size(0)
timesteps = torch.randint(
0,
self.diffusion_timesteps,
(batch_size,),
device=device,
dtype=torch.long,
)
noisy_mel = self.noise_scheduler.add_noise(mel_input, noise, timesteps)
conditioner = self.condition_encoder(batch)
y_pred = self.acoustic_mapper(noisy_mel, timesteps, conditioner)
loss = self._compute_loss(self.criterion, y_pred, noise, batch["mask"])
self._check_nan(batch, loss, y_pred, noise)
return loss
class ComoSVCTrainer(SVCTrainer):
r"""The base trainer for all diffusion models. It inherits from SVCTrainer and
implements ``_build_model`` and ``_forward_step`` methods.
"""
def __init__(self, args=None, cfg=None):
SVCTrainer.__init__(self, args, cfg)
self.distill = cfg.model.comosvc.distill
self.skip_diff = True
### Following are methods only for comoSVC models ###
def _load_teacher_model(self, model):
r"""Load teacher model from checkpoint file."""
self.checkpoint_file = self.teacher_model_path
self.logger.info(
"Load teacher acoustic model from {}".format(self.checkpoint_file)
)
raw_dict = torch.load(self.checkpoint_file)
model.load_state_dict(raw_dict)
def _build_model(self):
r"""Build the model for training. This function is called in ``__init__`` function."""
# TODO: sort out the config
self.cfg.model.condition_encoder.f0_min = self.cfg.preprocess.f0_min
self.cfg.model.condition_encoder.f0_max = self.cfg.preprocess.f0_max
self.condition_encoder = ConditionEncoder(self.cfg.model.condition_encoder)
self.acoustic_mapper = ComoSVC(self.cfg)
model = torch.nn.ModuleList([self.condition_encoder, self.acoustic_mapper])
if self.cfg.model.comosvc.distill:
if not self.args.resume:
# do not load teacher model when resume
self.teacher_model_path = self.cfg.model.teacher_model_path
self._load_teacher_model(model)
# build teacher & target decoder and freeze teacher
self.acoustic_mapper.decoder.init_consistency_training()
self.freeze_net(self.condition_encoder)
self.freeze_net(self.acoustic_mapper.encoder)
self.freeze_net(self.acoustic_mapper.decoder.denoise_fn_pretrained)
self.freeze_net(self.acoustic_mapper.decoder.denoise_fn_ema)
return model
def freeze_net(self, model):
r"""Freeze the model for training."""
for name, param in model.named_parameters():
param.requires_grad = False
def __build_optimizer(self):
r"""Build optimizer for training. This function is called in ``__init__`` function."""
if self.cfg.train.optimizer.lower() == "adamw":
optimizer = torch.optim.AdamW(
params=filter(lambda p: p.requires_grad, self.model.parameters()),
**self.cfg.train.adamw,
)
else:
raise NotImplementedError(
"Not support optimizer: {}".format(self.cfg.train.optimizer)
)
return optimizer
def _forward_step(self, batch):
r"""Forward step for training and inference. This function is called
in ``_train_step`` & ``_test_step`` function.
"""
loss = {}
mask = batch["mask"]
mel_input = batch["mel"]
cond = self.condition_encoder(batch)
if self.distill:
cond = cond.detach()
self.skip_diff = True if self.step < self.cfg.train.fast_steps else False
ssim_loss, prior_loss, diff_loss = self.acoustic_mapper.compute_loss(
mask, cond, mel_input, skip_diff=self.skip_diff
)
if self.distill:
loss["distil_loss"] = diff_loss
else:
loss["ssim_loss_encoder"] = ssim_loss
loss["prior_loss_encoder"] = prior_loss
loss["diffusion_loss_decoder"] = diff_loss
return loss
def _train_epoch(self):
r"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
self.model.train()
epoch_sum_loss: float = 0.0
epoch_step: int = 0
for batch in tqdm(
self.train_dataloader,
desc=f"Training Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Do training step and BP
with self.accelerator.accumulate(self.model):
loss = self._train_step(batch)
total_loss = 0
for k, v in loss.items():
total_loss += v
self.accelerator.backward(total_loss)
enc_grad_norm = torch.nn.utils.clip_grad_norm_(
self.acoustic_mapper.encoder.parameters(), max_norm=1
)
dec_grad_norm = torch.nn.utils.clip_grad_norm_(
self.acoustic_mapper.decoder.parameters(), max_norm=1
)
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
# Update info for each step
# TODO: step means BP counts or batch counts?
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
epoch_sum_loss += total_loss
log_info = {}
for k, v in loss.items():
key = "Step/Train Loss/{}".format(k)
log_info[key] = v
log_info["Step/Learning Rate"] = self.optimizer.param_groups[0]["lr"]
self.accelerator.log(
log_info,
step=self.step,
)
self.step += 1
epoch_step += 1
self.accelerator.wait_for_everyone()
return (
epoch_sum_loss
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step,
loss,
)
def train_loop(self):
r"""Training loop. The public entry of training process."""
# Wait everyone to prepare before we move on
self.accelerator.wait_for_everyone()
# dump config file
if self.accelerator.is_main_process:
self.__dump_cfg(self.config_save_path)
self.model.train()
self.optimizer.zero_grad()
# Wait to ensure good to go
self.accelerator.wait_for_everyone()
while self.epoch < self.max_epoch:
self.logger.info("\n")
self.logger.info("-" * 32)
self.logger.info("Epoch {}: ".format(self.epoch))
### TODO: change the return values of _train_epoch() to a loss dict, or (total_loss, loss_dict)
### It's inconvenient for the model with multiple losses
# Do training & validating epoch
train_loss, loss = self._train_epoch()
self.logger.info(" |- Train/Loss: {:.6f}".format(train_loss))
for k, v in loss.items():
self.logger.info(" |- Train/Loss/{}: {:.6f}".format(k, v))
valid_loss = self._valid_epoch()
self.logger.info(" |- Valid/Loss: {:.6f}".format(valid_loss))
self.accelerator.log(
{"Epoch/Train Loss": train_loss, "Epoch/Valid Loss": valid_loss},
step=self.epoch,
)
self.accelerator.wait_for_everyone()
# TODO: what is scheduler?
self.scheduler.step(valid_loss) # FIXME: use epoch track correct?
# Check if hit save_checkpoint_stride and run_eval
run_eval = False
if self.accelerator.is_main_process:
save_checkpoint = False
hit_dix = []
for i, num in enumerate(self.save_checkpoint_stride):
if self.epoch % num == 0:
save_checkpoint = True
hit_dix.append(i)
run_eval |= self.run_eval[i]
self.accelerator.wait_for_everyone()
if (
self.accelerator.is_main_process
and save_checkpoint
and (self.distill or not self.skip_diff)
):
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, train_loss
),
)
self.tmp_checkpoint_save_path = path
self.accelerator.save_state(path)
print(f"save checkpoint in {path}")
json.dump(
self.checkpoints_path,
open(os.path.join(path, "ckpts.json"), "w"),
ensure_ascii=False,
indent=4,
)
self._save_auxiliary_states()
# Remove old checkpoints
to_remove = []
for idx in hit_dix:
self.checkpoints_path[idx].append(path)
while len(self.checkpoints_path[idx]) > self.keep_last[idx]:
to_remove.append((idx, self.checkpoints_path[idx].pop(0)))
# Search conflicts
total = set()
for i in self.checkpoints_path:
total |= set(i)
do_remove = set()
for idx, path in to_remove[::-1]:
if path in total:
self.checkpoints_path[idx].insert(0, path)
else:
do_remove.add(path)
# Remove old checkpoints
for path in do_remove:
shutil.rmtree(path, ignore_errors=True)
self.logger.debug(f"Remove old checkpoint: {path}")
self.accelerator.wait_for_everyone()
if run_eval:
# TODO: run evaluation
pass
# Update info for each epoch
self.epoch += 1
# Finish training and save final checkpoint
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
self.accelerator.save_state(
os.path.join(
self.checkpoint_dir,
"final_epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_loss
),
)
)
self._save_auxiliary_states()
self.accelerator.end_training()
def _valid_epoch(self):
r"""Testing epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
self.model.eval()
epoch_sum_loss = 0.0
for batch in tqdm(
self.valid_dataloader,
desc=f"Validating Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
batch_loss = self._valid_step(batch)
for k, v in batch_loss.items():
epoch_sum_loss += v
self.accelerator.wait_for_everyone()
return epoch_sum_loss / len(self.valid_dataloader)
def __count_parameters(model):
model_param = 0.0
if isinstance(model, dict):
for key, value in model.items():
model_param += sum(p.numel() for p in model[key].parameters())
else:
model_param = sum(p.numel() for p in model.parameters())
return model_param
def __dump_cfg(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
json5.dump(
self.cfg,
open(path, "w"),
indent=4,
sort_keys=True,
ensure_ascii=False,
quote_keys=True,
)
class TransformerTrainer(SVCTrainer):
def __init__(self, args, cfg):
SVCTrainer.__init__(self, args, cfg)
self.ssim_loss = SSIM()
def _build_model(self):
self.cfg.model.condition_encoder.f0_min = self.cfg.preprocess.f0_min
self.cfg.model.condition_encoder.f0_max = self.cfg.preprocess.f0_max
self.condition_encoder = ConditionEncoder(self.cfg.model.condition_encoder)
if self.cfg.model.transformer.type == "transformer":
self.acoustic_mapper = Transformer(self.cfg.model.transformer)
elif self.cfg.model.transformer.type == "conformer":
self.acoustic_mapper = Conformer(self.cfg.model.transformer)
else:
raise NotImplementedError
model = torch.nn.ModuleList([self.condition_encoder, self.acoustic_mapper])
return model
def _forward_step(self, batch):
total_loss = 0
device = self.accelerator.device
mel = batch["mel"]
mask = batch["mask"]
condition = self.condition_encoder(batch)
mel_pred = self.acoustic_mapper(condition, mask)
l1_loss = torch.sum(torch.abs(mel_pred - mel) * batch["mask"]) / torch.sum(
batch["mask"]
)
self._check_nan(l1_loss, mel_pred, mel)
total_loss += l1_loss
ssim_loss = self.ssim_loss(mel_pred, mel)
ssim_loss = torch.sum(ssim_loss * batch["mask"]) / torch.sum(batch["mask"])
self._check_nan(ssim_loss, mel_pred, mel)
total_loss += ssim_loss
return total_loss
class VitsSVCTrainer(SVCTrainer):
def __init__(self, args, cfg):
self.args = args
self.cfg = cfg
SVCTrainer.__init__(self, args, cfg)
def _accelerator_prepare(self):
(
self.train_dataloader,
self.valid_dataloader,
) = self.accelerator.prepare(
self.train_dataloader,
self.valid_dataloader,
)
if isinstance(self.model, dict):
for key in self.model.keys():
self.model[key] = self.accelerator.prepare(self.model[key])
else:
self.model = self.accelerator.prepare(self.model)
if isinstance(self.optimizer, dict):
for key in self.optimizer.keys():
self.optimizer[key] = self.accelerator.prepare(self.optimizer[key])
else:
self.optimizer = self.accelerator.prepare(self.optimizer)
if isinstance(self.scheduler, dict):
for key in self.scheduler.keys():
self.scheduler[key] = self.accelerator.prepare(self.scheduler[key])
else:
self.scheduler = self.accelerator.prepare(self.scheduler)
def _load_model(
self,
checkpoint_dir: str = None,
checkpoint_path: str = None,
resume_type: str = "",
):
r"""Load model from checkpoint. If checkpoint_path is None, it will
load the latest checkpoint in checkpoint_dir. If checkpoint_path is not
None, it will load the checkpoint specified by checkpoint_path. **Only use this
method after** ``accelerator.prepare()``.
"""
if checkpoint_path is None:
ls = [str(i) for i in Path(checkpoint_dir).glob("*")]
ls.sort(key=lambda x: int(x.split("_")[-3].split("-")[-1]), reverse=True)
checkpoint_path = ls[0]
self.logger.info("Resume from {}...".format(checkpoint_path))
if resume_type in ["resume", ""]:
# Load all the things, including model weights, optimizer, scheduler, and random states.
self.accelerator.load_state(input_dir=checkpoint_path)
# set epoch and step
self.epoch = int(checkpoint_path.split("_")[-3].split("-")[-1]) + 1
self.step = int(checkpoint_path.split("_")[-2].split("-")[-1]) + 1
elif resume_type == "finetune":
# Load only the model weights
accelerate.load_checkpoint_and_dispatch(
self.accelerator.unwrap_model(self.model["generator"]),
os.path.join(checkpoint_path, "pytorch_model.bin"),
)
accelerate.load_checkpoint_and_dispatch(
self.accelerator.unwrap_model(self.model["discriminator"]),
os.path.join(checkpoint_path, "pytorch_model.bin"),
)
self.logger.info("Load model weights for finetune...")
else:
raise ValueError("Resume_type must be `resume` or `finetune`.")
return checkpoint_path
def _build_model(self):
net_g = SynthesizerTrn(
self.cfg.preprocess.n_fft // 2 + 1,
self.cfg.preprocess.segment_size // self.cfg.preprocess.hop_size,
# directly use cfg
self.cfg,
)
net_d = MultiPeriodDiscriminator(self.cfg.model.vits.use_spectral_norm)
model = {"generator": net_g, "discriminator": net_d}
return model
def _build_dataset(self):
return SVCOfflineDataset, SVCOfflineCollator
def _build_optimizer(self):
optimizer_g = torch.optim.AdamW(
self.model["generator"].parameters(),
self.cfg.train.learning_rate,
betas=self.cfg.train.AdamW.betas,
eps=self.cfg.train.AdamW.eps,
)
optimizer_d = torch.optim.AdamW(
self.model["discriminator"].parameters(),
self.cfg.train.learning_rate,
betas=self.cfg.train.AdamW.betas,
eps=self.cfg.train.AdamW.eps,
)
optimizer = {"optimizer_g": optimizer_g, "optimizer_d": optimizer_d}
return optimizer
def _build_scheduler(self):
scheduler_g = ExponentialLR(
self.optimizer["optimizer_g"],
gamma=self.cfg.train.lr_decay,
last_epoch=self.epoch - 1,
)
scheduler_d = ExponentialLR(
self.optimizer["optimizer_d"],
gamma=self.cfg.train.lr_decay,
last_epoch=self.epoch - 1,
)
scheduler = {"scheduler_g": scheduler_g, "scheduler_d": scheduler_d}
return scheduler
def _build_criterion(self):
class GeneratorLoss(nn.Module):
def __init__(self, cfg):
super(GeneratorLoss, self).__init__()
self.cfg = cfg
self.l1_loss = nn.L1Loss()
def generator_loss(self, disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
dg = dg.float()
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses
def feature_loss(self, fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
rl = rl.float().detach()
gl = gl.float()
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
def kl_loss(self, z_p, logs_q, m_p, logs_p, z_mask):
"""
z_p, logs_q: [b, h, t_t]
m_p, logs_p: [b, h, t_t]
"""
z_p = z_p.float()
logs_q = logs_q.float()
m_p = m_p.float()
logs_p = logs_p.float()
z_mask = z_mask.float()
kl = logs_p - logs_q - 0.5
kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
kl = torch.sum(kl * z_mask)
l = kl / torch.sum(z_mask)
return l
def forward(
self,
outputs_g,
outputs_d,
y_mel,
y_hat_mel,
):
loss_g = {}
# mel loss
loss_mel = self.l1_loss(y_mel, y_hat_mel) * self.cfg.train.c_mel
loss_g["loss_mel"] = loss_mel
# kl loss
loss_kl = (
self.kl_loss(
outputs_g["z_p"],
outputs_g["logs_q"],
outputs_g["m_p"],
outputs_g["logs_p"],
outputs_g["z_mask"],
)
* self.cfg.train.c_kl
)
loss_g["loss_kl"] = loss_kl
# feature loss
loss_fm = self.feature_loss(outputs_d["fmap_rs"], outputs_d["fmap_gs"])
loss_g["loss_fm"] = loss_fm
# gan loss
loss_gen, losses_gen = self.generator_loss(outputs_d["y_d_hat_g"])
loss_g["loss_gen"] = loss_gen
loss_g["loss_gen_all"] = loss_mel + loss_kl + loss_fm + loss_gen
return loss_g
class DiscriminatorLoss(nn.Module):
def __init__(self, cfg):
super(DiscriminatorLoss, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
def __call__(self, disc_real_outputs, disc_generated_outputs):
loss_d = {}
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
dr = dr.float()
dg = dg.float()
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg**2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
loss_d["loss_disc_all"] = loss
return loss_d
criterion = {
"generator": GeneratorLoss(self.cfg),
"discriminator": DiscriminatorLoss(self.cfg),
}
return criterion
# Keep legacy unchanged
def write_summary(
self,
losses,
stats,
images={},
audios={},
audio_sampling_rate=24000,
tag="train",
):
for key, value in losses.items():
self.sw.add_scalar(tag + "/" + key, value, self.step)
self.sw.add_scalar(
"learning_rate",
self.optimizer["optimizer_g"].param_groups[0]["lr"],
self.step,
)
if len(images) != 0:
for key, value in images.items():
self.sw.add_image(key, value, self.global_step, batchformats="HWC")
if len(audios) != 0:
for key, value in audios.items():
self.sw.add_audio(key, value, self.global_step, audio_sampling_rate)
def write_valid_summary(
self, losses, stats, images={}, audios={}, audio_sampling_rate=24000, tag="val"
):
for key, value in losses.items():
self.sw.add_scalar(tag + "/" + key, value, self.step)
if len(images) != 0:
for key, value in images.items():
self.sw.add_image(key, value, self.global_step, batchformats="HWC")
if len(audios) != 0:
for key, value in audios.items():
self.sw.add_audio(key, value, self.global_step, audio_sampling_rate)
def _get_state_dict(self):
state_dict = {
"generator": self.model["generator"].state_dict(),
"discriminator": self.model["discriminator"].state_dict(),
"optimizer_g": self.optimizer["optimizer_g"].state_dict(),
"optimizer_d": self.optimizer["optimizer_d"].state_dict(),
"scheduler_g": self.scheduler["scheduler_g"].state_dict(),
"scheduler_d": self.scheduler["scheduler_d"].state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
return state_dict
def get_state_dict(self):
state_dict = {
"generator": self.model["generator"].state_dict(),
"discriminator": self.model["discriminator"].state_dict(),
"optimizer_g": self.optimizer["optimizer_g"].state_dict(),
"optimizer_d": self.optimizer["optimizer_d"].state_dict(),
"scheduler_g": self.scheduler["scheduler_g"].state_dict(),
"scheduler_d": self.scheduler["scheduler_d"].state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
return state_dict
def load_model(self, checkpoint):
self.step = checkpoint["step"]
self.epoch = checkpoint["epoch"]
self.model["generator"].load_state_dict(checkpoint["generator"])
self.model["discriminator"].load_state_dict(checkpoint["discriminator"])
self.optimizer["optimizer_g"].load_state_dict(checkpoint["optimizer_g"])
self.optimizer["optimizer_d"].load_state_dict(checkpoint["optimizer_d"])
self.scheduler["scheduler_g"].load_state_dict(checkpoint["scheduler_g"])
self.scheduler["scheduler_d"].load_state_dict(checkpoint["scheduler_d"])
def _valid_step(self, batch):
r"""Testing forward step. Should return average loss of a sample over
one batch. Provoke ``_forward_step`` is recommended except for special case.
See ``_test_epoch`` for usage.
"""
valid_losses = {}
total_loss = 0
valid_stats = {}
# Discriminator
# Generator output
outputs_g = self.model["generator"](batch)
y_mel = slice_segments(
batch["mel"].transpose(1, 2),
outputs_g["ids_slice"],
self.cfg.preprocess.segment_size // self.cfg.preprocess.hop_size,
)
y_hat_mel = mel_spectrogram_torch(
outputs_g["y_hat"].squeeze(1), self.cfg.preprocess
)
y = slice_segments(
batch["audio"].unsqueeze(1),
outputs_g["ids_slice"] * self.cfg.preprocess.hop_size,
self.cfg.preprocess.segment_size,
)
# Discriminator output
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"].detach())
## Discriminator loss
loss_d = self.criterion["discriminator"](
outputs_d["y_d_hat_r"], outputs_d["y_d_hat_g"]
)
valid_losses.update(loss_d)
## Generator
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"])
loss_g = self.criterion["generator"](outputs_g, outputs_d, y_mel, y_hat_mel)
valid_losses.update(loss_g)
for item in valid_losses:
valid_losses[item] = valid_losses[item].item()
total_loss = loss_g["loss_gen_all"] + loss_d["loss_disc_all"]
return (
total_loss.item(),
valid_losses,
valid_stats,
)
def _valid_epoch(self):
r"""Testing epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
if isinstance(self.model, dict):
for key in self.model.keys():
self.model[key].eval()
else:
self.model.eval()
epoch_sum_loss = 0.0
epoch_losses = dict()
for batch in tqdm(
self.valid_dataloader,
desc=f"Validating Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
total_loss, valid_losses, valid_stats = self._valid_step(batch)
epoch_sum_loss += total_loss
if isinstance(valid_losses, dict):
for key, value in valid_losses.items():
if key not in epoch_losses.keys():
epoch_losses[key] = value
else:
epoch_losses[key] += value
epoch_sum_loss = epoch_sum_loss / len(self.valid_dataloader)
for key in epoch_losses.keys():
epoch_losses[key] = epoch_losses[key] / len(self.valid_dataloader)
self.accelerator.wait_for_everyone()
return epoch_sum_loss, epoch_losses
### THIS IS MAIN ENTRY ###
def train_loop(self):
r"""Training loop. The public entry of training process."""
# Wait everyone to prepare before we move on
self.accelerator.wait_for_everyone()
# dump config file
if self.accelerator.is_main_process:
self.__dump_cfg(self.config_save_path)
# self.optimizer.zero_grad()
# Wait to ensure good to go
self.accelerator.wait_for_everyone()
while self.epoch < self.max_epoch:
self.logger.info("\n")
self.logger.info("-" * 32)
self.logger.info("Epoch {}: ".format(self.epoch))
# Do training & validating epoch
train_total_loss, train_losses = self._train_epoch()
if isinstance(train_losses, dict):
for key, loss in train_losses.items():
self.logger.info(" |- Train/{} Loss: {:.6f}".format(key, loss))
self.accelerator.log(
{"Epoch/Train {} Loss".format(key): loss},
step=self.epoch,
)
valid_total_loss, valid_losses = self._valid_epoch()
if isinstance(valid_losses, dict):
for key, loss in valid_losses.items():
self.logger.info(" |- Valid/{} Loss: {:.6f}".format(key, loss))
self.accelerator.log(
{"Epoch/Train {} Loss".format(key): loss},
step=self.epoch,
)
self.logger.info(" |- Train/Loss: {:.6f}".format(train_total_loss))
self.logger.info(" |- Valid/Loss: {:.6f}".format(valid_total_loss))
self.accelerator.log(
{
"Epoch/Train Loss": train_total_loss,
"Epoch/Valid Loss": valid_total_loss,
},
step=self.epoch,
)
self.accelerator.wait_for_everyone()
# Check if hit save_checkpoint_stride and run_eval
run_eval = False
if self.accelerator.is_main_process:
save_checkpoint = False
hit_dix = []
for i, num in enumerate(self.save_checkpoint_stride):
if self.epoch % num == 0:
save_checkpoint = True
hit_dix.append(i)
run_eval |= self.run_eval[i]
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process and save_checkpoint:
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, train_total_loss
),
)
self.tmp_checkpoint_save_path = path
self.accelerator.save_state(path)
json.dump(
self.checkpoints_path,
open(os.path.join(path, "ckpts.json"), "w"),
ensure_ascii=False,
indent=4,
)
self._save_auxiliary_states()
# Remove old checkpoints
to_remove = []
for idx in hit_dix:
self.checkpoints_path[idx].append(path)
while len(self.checkpoints_path[idx]) > self.keep_last[idx]:
to_remove.append((idx, self.checkpoints_path[idx].pop(0)))
# Search conflicts
total = set()
for i in self.checkpoints_path:
total |= set(i)
do_remove = set()
for idx, path in to_remove[::-1]:
if path in total:
self.checkpoints_path[idx].insert(0, path)
else:
do_remove.add(path)
# Remove old checkpoints
for path in do_remove:
shutil.rmtree(path, ignore_errors=True)
self.logger.debug(f"Remove old checkpoint: {path}")
self.accelerator.wait_for_everyone()
if run_eval:
# TODO: run evaluation
pass
# Update info for each epoch
self.epoch += 1
# Finish training and save final checkpoint
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
path = os.path.join(
self.checkpoint_dir,
"final_epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_total_loss
),
)
self.tmp_checkpoint_save_path = path
self.accelerator.save_state(
os.path.join(
self.checkpoint_dir,
"final_epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_total_loss
),
)
)
json.dump(
self.checkpoints_path,
open(os.path.join(path, "ckpts.json"), "w"),
ensure_ascii=False,
indent=4,
)
self._save_auxiliary_states()
self.accelerator.end_training()
def _train_step(self, batch):
r"""Forward step for training and inference. This function is called
in ``_train_step`` & ``_test_step`` function.
"""
train_losses = {}
total_loss = 0
training_stats = {}
## Train Discriminator
# Generator output
outputs_g = self.model["generator"](batch)
y_mel = slice_segments(
batch["mel"].transpose(1, 2),
outputs_g["ids_slice"],
self.cfg.preprocess.segment_size // self.cfg.preprocess.hop_size,
)
y_hat_mel = mel_spectrogram_torch(
outputs_g["y_hat"].squeeze(1), self.cfg.preprocess
)
y = slice_segments(
# [1, 168418] -> [1, 1, 168418]
batch["audio"].unsqueeze(1),
outputs_g["ids_slice"] * self.cfg.preprocess.hop_size,
self.cfg.preprocess.segment_size,
)
# Discriminator output
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"].detach())
# Discriminator loss
loss_d = self.criterion["discriminator"](
outputs_d["y_d_hat_r"], outputs_d["y_d_hat_g"]
)
train_losses.update(loss_d)
# BP and Grad Updated
self.optimizer["optimizer_d"].zero_grad()
self.accelerator.backward(loss_d["loss_disc_all"])
self.optimizer["optimizer_d"].step()
## Train Generator
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"])
loss_g = self.criterion["generator"](outputs_g, outputs_d, y_mel, y_hat_mel)
train_losses.update(loss_g)
# BP and Grad Updated
self.optimizer["optimizer_g"].zero_grad()
self.accelerator.backward(loss_g["loss_gen_all"])
self.optimizer["optimizer_g"].step()
for item in train_losses:
train_losses[item] = train_losses[item].item()
total_loss = loss_g["loss_gen_all"] + loss_d["loss_disc_all"]
return (
total_loss.item(),
train_losses,
training_stats,
)
def _train_epoch(self):
r"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
epoch_sum_loss: float = 0.0
epoch_losses: dict = {}
epoch_step: int = 0
for batch in tqdm(
self.train_dataloader,
desc=f"Training Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Do training step and BP
with self.accelerator.accumulate(self.model):
total_loss, train_losses, training_stats = self._train_step(batch)
self.batch_count += 1
# Update info for each step
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
epoch_sum_loss += total_loss
for key, value in train_losses.items():
if key not in epoch_losses.keys():
epoch_losses[key] = value
else:
epoch_losses[key] += value
self.accelerator.log(
{
"Step/Generator Loss": train_losses["loss_gen_all"],
"Step/Discriminator Loss": train_losses["loss_disc_all"],
"Step/Generator Learning Rate": self.optimizer[
"optimizer_d"
].param_groups[0]["lr"],
"Step/Discriminator Learning Rate": self.optimizer[
"optimizer_g"
].param_groups[0]["lr"],
},
step=self.step,
)
self.step += 1
epoch_step += 1
self.accelerator.wait_for_everyone()
epoch_sum_loss = (
epoch_sum_loss
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
for key in epoch_losses.keys():
epoch_losses[key] = (
epoch_losses[key]
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
return epoch_sum_loss, epoch_losses
def __dump_cfg(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
json5.dump(
self.cfg,
open(path, "w"),
indent=4,
sort_keys=True,
ensure_ascii=False,
quote_keys=True,
)
def build_trainer(args, cfg):
supported_trainer = {
"DiffWaveNetSVC": DiffusionTrainer,
"DiffComoSVC": ComoSVCTrainer,
"TransformerSVC": TransformerTrainer,
"VitsSVC": VitsSVCTrainer,
}
trainer_class = supported_trainer[cfg.model_type]
trainer = trainer_class(args, cfg)
return trainer | null |
17,521 | import argparse
import torch
from models.svc.diffusion.diffusion_trainer import DiffusionTrainer
from models.svc.comosvc.comosvc_trainer import ComoSVCTrainer
from models.svc.transformer.transformer_trainer import TransformerTrainer
from models.svc.vits.vits_trainer import VitsSVCTrainer
from utils.util import load_config
def cuda_relevant(deterministic=False):
torch.cuda.empty_cache()
# TF32 on Ampere and above
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.allow_tf32 = True
# Deterministic
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
torch.use_deterministic_algorithms(deterministic) | null |
17,522 | import argparse
import os
import glob
from tqdm import tqdm
import json
import torch
import time
from models.svc.diffusion.diffusion_inference import DiffusionInference
from models.svc.comosvc.comosvc_inference import ComoSVCInference
from models.svc.transformer.transformer_inference import TransformerInference
from models.svc.vits.vits_inference import VitsInference
from utils.util import load_config
from utils.audio_slicer import split_audio, merge_segments_encodec
from processors import acoustic_extractor, content_extractor
def prepare_source_eval_file(cfg, temp_audio_dir, audio_name):
"""
Prepare the eval file (json) for an audio
"""
audio_chunks_results = split_audio(
wav_file=cfg.inference.source_audio_path,
target_sr=cfg.preprocess.sample_rate,
output_dir=os.path.join(temp_audio_dir, "wavs"),
max_duration_of_segment=cfg.inference.segments_max_duration,
overlap_duration=cfg.inference.segments_overlap_duration,
)
metadata = []
for i, res in enumerate(audio_chunks_results):
res["index"] = i
res["Dataset"] = audio_name
res["Singer"] = audio_name
res["Uid"] = "{}_{}".format(audio_name, res["Uid"])
metadata.append(res)
eval_file = os.path.join(temp_audio_dir, "eval.json")
with open(eval_file, "w") as f:
json.dump(metadata, f, indent=4, ensure_ascii=False, sort_keys=True)
return eval_file
def prepare_for_audio_file(args, cfg, num_workers=1):
preprocess_path = cfg.preprocess.processed_dir
audio_name = cfg.inference.source_audio_name
temp_audio_dir = os.path.join(preprocess_path, audio_name)
### eval file
t = time.time()
eval_file = prepare_source_eval_file(cfg, temp_audio_dir, audio_name)
args.source = eval_file
with open(eval_file, "r") as f:
metadata = json.load(f)
print("Prepare for meta eval data: {:.1f}s".format(time.time() - t))
### acoustic features
t = time.time()
acoustic_extractor.extract_utt_acoustic_features_serial(
metadata, temp_audio_dir, cfg
)
if cfg.preprocess.use_min_max_norm_mel == True:
acoustic_extractor.cal_mel_min_max(
dataset=audio_name, output_path=preprocess_path, cfg=cfg, metadata=metadata
)
acoustic_extractor.cal_pitch_statistics_svc(
dataset=audio_name, output_path=preprocess_path, cfg=cfg, metadata=metadata
)
print("Prepare for acoustic features: {:.1f}s".format(time.time() - t))
### content features
t = time.time()
content_extractor.extract_utt_content_features_dataloader(
cfg, metadata, num_workers
)
print("Prepare for content features: {:.1f}s".format(time.time() - t))
return args, cfg, temp_audio_dir | null |
17,523 | import argparse
import os
import glob
from tqdm import tqdm
import json
import torch
import time
from models.svc.diffusion.diffusion_inference import DiffusionInference
from models.svc.comosvc.comosvc_inference import ComoSVCInference
from models.svc.transformer.transformer_inference import TransformerInference
from models.svc.vits.vits_inference import VitsInference
from utils.util import load_config
from utils.audio_slicer import split_audio, merge_segments_encodec
from processors import acoustic_extractor, content_extractor
def merge_segments_encodec(wav_files, fs, output_path, overlap_duration=1.0):
"""Merge the given wav_files (may have overlaps) into a long audio
fs:
The sampling rate of the wav files.
output_path:
The output path to save the merged audio.
overlap_duration (float, optional):
Each segment has "overlap duration" (second) overlap with its previous and next segment. Defaults to 1.0.
"""
waveforms = []
for file in wav_files:
# (T,)
waveform, _ = load_audio_torch(file, fs)
waveforms.append(waveform)
if len(waveforms) == 1:
save_audio(output_path, waveforms[0], fs, add_silence=False, turn_up=False)
return
device = waveforms[0].device
dtype = waveforms[0].dtype
shape = waveforms[0].shape[:-1]
overlap_len = int(overlap_duration * fs)
segments_lens = [len(wav) for wav in waveforms]
merged_waveform_len = sum(segments_lens) - overlap_len * (len(waveforms) - 1)
sum_weight = torch.zeros(merged_waveform_len, device=device, dtype=dtype)
out = torch.zeros(*shape, merged_waveform_len, device=device, dtype=dtype)
offset = 0
for frame in waveforms:
frame_length = frame.size(-1)
t = torch.linspace(0, 1, frame_length + 2, device=device, dtype=torch.float32)[
1:-1
]
weight = 0.5 - (t - 0.5).abs()
weighted_frame = frame * weight
cur = out[..., offset : offset + frame_length]
cur += weighted_frame[..., : cur.size(-1)]
out[..., offset : offset + frame_length] = cur
cur = sum_weight[offset : offset + frame_length]
cur += weight[..., : cur.size(-1)]
sum_weight[offset : offset + frame_length] = cur
offset += frame_length - overlap_len
assert sum_weight.min() > 0
merged_waveform = out / sum_weight
save_audio(output_path, merged_waveform, fs, add_silence=False, turn_up=True)
def merge_for_audio_segments(audio_files, args, cfg):
audio_name = cfg.inference.source_audio_name
target_singer_name = args.target_singer
merge_segments_encodec(
wav_files=audio_files,
fs=cfg.preprocess.sample_rate,
output_path=os.path.join(
args.output_dir, "{}_{}.wav".format(audio_name, target_singer_name)
),
overlap_duration=cfg.inference.segments_overlap_duration,
)
for tmp_file in audio_files:
os.remove(tmp_file) | null |
17,524 | import argparse
import os
import glob
from tqdm import tqdm
import json
import torch
import time
from models.svc.diffusion.diffusion_inference import DiffusionInference
from models.svc.comosvc.comosvc_inference import ComoSVCInference
from models.svc.transformer.transformer_inference import TransformerInference
from models.svc.vits.vits_inference import VitsInference
from utils.util import load_config
from utils.audio_slicer import split_audio, merge_segments_encodec
from processors import acoustic_extractor, content_extractor
def cuda_relevant(deterministic=False):
torch.cuda.empty_cache()
# TF32 on Ampere and above
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.allow_tf32 = True
# Deterministic
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
torch.use_deterministic_algorithms(deterministic) | null |
17,525 | import argparse
import os
import glob
from tqdm import tqdm
import json
import torch
import time
from models.svc.diffusion.diffusion_inference import DiffusionInference
from models.svc.comosvc.comosvc_inference import ComoSVCInference
from models.svc.transformer.transformer_inference import TransformerInference
from models.svc.vits.vits_inference import VitsInference
from utils.util import load_config
from utils.audio_slicer import split_audio, merge_segments_encodec
from processors import acoustic_extractor, content_extractor
def build_inference(args, cfg, infer_type="from_dataset"):
supported_inference = {
"DiffWaveNetSVC": DiffusionInference,
"DiffComoSVC": ComoSVCInference,
"TransformerSVC": TransformerInference,
"VitsSVC": VitsInference,
}
inference_class = supported_inference[cfg.model_type]
return inference_class(args, cfg, infer_type)
def infer(args, cfg, infer_type):
# Build inference
t = time.time()
trainer = build_inference(args, cfg, infer_type)
print("Model Init: {:.1f}s".format(time.time() - t))
# Run inference
t = time.time()
output_audio_files = trainer.inference()
print("Model inference: {:.1f}s".format(time.time() - t))
return output_audio_files | null |
17,526 | import argparse
import os
import glob
from tqdm import tqdm
import json
import torch
import time
from models.svc.diffusion.diffusion_inference import DiffusionInference
from models.svc.comosvc.comosvc_inference import ComoSVCInference
from models.svc.transformer.transformer_inference import TransformerInference
from models.svc.vits.vits_inference import VitsInference
from utils.util import load_config
from utils.audio_slicer import split_audio, merge_segments_encodec
from processors import acoustic_extractor, content_extractor
The provided code snippet includes necessary dependencies for implementing the `build_parser` function. Write a Python function `def build_parser()` to solve the following problem:
r"""Build argument parser for inference.py. Anything else should be put in an extra config YAML file.
Here is the function:
def build_parser():
r"""Build argument parser for inference.py.
Anything else should be put in an extra config YAML file.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
type=str,
required=True,
help="JSON/YAML file for configurations.",
)
parser.add_argument(
"--acoustics_dir",
type=str,
help="Acoustics model checkpoint directory. If a directory is given, "
"search for the latest checkpoint dir in the directory. If a specific "
"checkpoint dir is given, directly load the checkpoint.",
)
parser.add_argument(
"--vocoder_dir",
type=str,
required=True,
help="Vocoder checkpoint directory. Searching behavior is the same as "
"the acoustics one.",
)
parser.add_argument(
"--target_singer",
type=str,
required=True,
help="convert to a specific singer (e.g. --target_singers singer_id).",
)
parser.add_argument(
"--trans_key",
default=0,
help="0: no pitch shift; autoshift: pitch shift; int: key shift.",
)
parser.add_argument(
"--source",
type=str,
default="source_audio",
help="Source audio file or directory. If a JSON file is given, "
"inference from dataset is applied. If a directory is given, "
"inference from all wav/flac/mp3 audio files in the directory is applied. "
"Default: inference from all wav/flac/mp3 audio files in ./source_audio",
)
parser.add_argument(
"--output_dir",
type=str,
default="conversion_results",
help="Output directory. Default: ./conversion_results",
)
parser.add_argument(
"--log_level",
type=str,
default="warning",
help="Logging level. Default: warning",
)
parser.add_argument(
"--keep_cache",
action="store_true",
default=True,
help="Keep cache files. Only applicable to inference from files.",
)
parser.add_argument(
"--diffusion_inference_steps",
type=int,
default=1000,
help="Number of inference steps. Only applicable to diffusion inference.",
)
return parser | r"""Build argument parser for inference.py. Anything else should be put in an extra config YAML file. |
17,527 | import faulthandler
import os
import argparse
import json
from multiprocessing import cpu_count
from utils.util import load_config
from preprocessors.processor import preprocess_dataset
from preprocessors.metadata import cal_metadata
from processors import acoustic_extractor, content_extractor, data_augment
def extract_acoustic_features(dataset, output_path, cfg, n_workers=1):
"""Extract acoustic features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
n_workers (int, optional): num of processes to extract features in parallel. Defaults to 1.
"""
types = ["train", "test"] if "eval" not in dataset else ["test"]
metadata = []
dataset_output = os.path.join(output_path, dataset)
for dataset_type in types:
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
# acoustic_extractor.extract_utt_acoustic_features_parallel(
# metadata, dataset_output, cfg, n_workers=n_workers
# )
acoustic_extractor.extract_utt_acoustic_features_serial(
metadata, dataset_output, cfg
)
def extract_content_features(dataset, output_path, cfg, num_workers=1):
"""Extract content features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
"""
types = ["train", "test"] if "eval" not in dataset else ["test"]
metadata = []
for dataset_type in types:
dataset_output = os.path.join(output_path, dataset)
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
content_extractor.extract_utt_content_features_dataloader(
cfg, metadata, num_workers
)
def preprocess_dataset(
dataset, dataset_path, output_path, cfg, task_type, is_custom_dataset=False
):
"""Call specific function to handle specific dataset
Args:
dataset (str): name of a dataset, e.g. opencpop, m4singer
dataset_path (str): path to dataset
output_path (str): path to store preprocessing result files
"""
if is_custom_dataset:
if task_type == "svc":
customsvcdataset.main(output_path, dataset_path, dataset_name=dataset)
else:
raise NotImplementedError(
"Custom dataset for {} task not implemented!".format(cfg.task_type)
)
if re.match("opencpop*", dataset):
opencpop.main(dataset, output_path, dataset_path)
if dataset == "m4singer":
m4singer.main(output_path, dataset_path)
if dataset == "svcc":
svcc.main(output_path, dataset_path)
if dataset == "pjs":
pjs.main(output_path, dataset_path)
if dataset == "popbutfy":
popbutfy.main(output_path, dataset_path)
if dataset == "opensinger":
opensinger.main(output_path, dataset_path)
if dataset == "popcs":
popcs.main(output_path, dataset_path)
if dataset == "kising":
kising.main(output_path, dataset_path)
if dataset == "csd":
csd.main(output_path, dataset_path)
if dataset == "opera":
opera.main(output_path, dataset_path)
if dataset == "nus48e":
nus48e.main(output_path, dataset_path)
if dataset == "vctk":
vctk.main(output_path, dataset_path)
if dataset == "svcceval":
svcceval.main(output_path, dataset_path)
if dataset == "libritts":
libritts.main(output_path, dataset_path)
if dataset == "lijian":
lijian.main(output_path, dataset_path)
if dataset == "cdmusiceval":
cdmusiceval.main(output_path, dataset_path)
if dataset == "LJSpeech":
ljspeech.main(output_path, dataset_path, cfg)
if dataset == "ljspeech":
ljspeech_vocoder.main(output_path, dataset_path)
if dataset == "coco":
coco.main(output_path, dataset_path)
if dataset == "cocoeval":
cocoeval.main(output_path, dataset_path)
if dataset == "vocalist":
vocalist.main(output_path, dataset_path)
if dataset == "librilight":
librilight.main(output_path, dataset_path, cfg)
if dataset == "hifitts":
hifitts.main(output_path, dataset_path)
def cal_metadata(cfg, dataset_types=["train", "test"]):
"""
Dump metadata (singers.json, meta_info.json, utt2singer) for singer dataset or multi-datasets.
"""
from collections import Counter
datasets = cfg.dataset
print("-" * 10)
print("Preparing metadata...")
print("Including: \n{}\n".format("\n".join(datasets)))
datasets.sort()
for dataset in tqdm(datasets):
save_dir = os.path.join(cfg.preprocess.processed_dir, dataset)
assert os.path.exists(save_dir)
# 'train.json' and 'test.json' and 'valid.json' of target dataset
meta_info = dict()
utterances_dict = dict()
all_utterances = list()
duration = dict()
total_duration = 0.0
for dataset_type in dataset_types:
metadata = os.path.join(save_dir, "{}.json".format(dataset_type))
# Sort the metadata as the duration order
with open(metadata, "r", encoding="utf-8") as f:
utterances = json.load(f)
utterances = sorted(utterances, key=lambda x: x["Duration"])
utterances_dict[dataset_type] = utterances
all_utterances.extend(utterances)
# Write back the sorted metadata
with open(metadata, "w") as f:
json.dump(utterances, f, indent=4, ensure_ascii=False)
# Get the total duration and singer names for train and test utterances
duration[dataset_type] = sum(utt["Duration"] for utt in utterances)
total_duration += duration[dataset_type]
# Paths of metadata needed to be generated
singer_dict_file = os.path.join(save_dir, cfg.preprocess.spk2id)
utt2singer_file = os.path.join(save_dir, cfg.preprocess.utt2spk)
singer_names = set(
f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
for utt in all_utterances
)
# Write the utt2singer file and sort the singer names
with open(utt2singer_file, "w", encoding="utf-8") as f:
for utt in all_utterances:
f.write(
f"{utt['Dataset']}_{utt['Uid']}\t{replace_augment_name(utt['Dataset'])}_{utt['Singer']}\n"
)
singer_names = sorted(singer_names)
singer_lut = {name: i for i, name in enumerate(singer_names)}
# dump singers.json
with open(singer_dict_file, "w", encoding="utf-8") as f:
json.dump(singer_lut, f, indent=4, ensure_ascii=False)
meta_info = {
"dataset": dataset,
"statistics": {
"size": len(all_utterances),
"hours": round(total_duration / 3600, 4),
},
}
for dataset_type in dataset_types:
meta_info[dataset_type] = {
"size": len(utterances_dict[dataset_type]),
"hours": round(duration[dataset_type] / 3600, 4),
}
meta_info["singers"] = {"size": len(singer_lut)}
# Use Counter to count the minutes for each singer
total_singer2mins = Counter()
training_singer2mins = Counter()
for dataset_type in dataset_types:
for utt in utterances_dict[dataset_type]:
k = f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
if dataset_type == "train":
training_singer2mins[k] += utt["Duration"] / 60
total_singer2mins[k] += utt["Duration"] / 60
training_singer2mins = dict(
sorted(training_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
training_singer2mins = {k: round(v, 2) for k, v in training_singer2mins.items()}
meta_info["singers"]["training_minutes"] = training_singer2mins
total_singer2mins = dict(
sorted(total_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
total_singer2mins = {k: round(v, 2) for k, v in total_singer2mins.items()}
meta_info["singers"]["minutes"] = total_singer2mins
with open(os.path.join(save_dir, "meta_info.json"), "w") as f:
json.dump(meta_info, f, indent=4, ensure_ascii=False)
for singer, min in training_singer2mins.items():
print(f"Speaker/Singer {singer}: {min} mins for training")
print("-" * 10, "\n")
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess(cfg, args)` to solve the following problem:
Proprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers
Here is the function:
def preprocess(cfg, args):
"""Proprocess raw data of single or multiple datasets (in cfg.dataset)
Args:
cfg (dict): dictionary that stores configurations
args (ArgumentParser): specify the configuration file and num_workers
"""
# Specify the output root path to save the processed data
output_path = cfg.preprocess.processed_dir
os.makedirs(output_path, exist_ok=True)
## Split train and test sets
for dataset in cfg.dataset:
print("Preprocess {}...".format(dataset))
preprocess_dataset(
dataset,
cfg.dataset_path[dataset],
output_path,
cfg.preprocess,
cfg.task_type,
is_custom_dataset=dataset in cfg.use_custom_dataset,
)
# Data augmentation: create new wav files with pitch shift, formant shift, equalizer, time stretch
try:
assert isinstance(
cfg.preprocess.data_augment, list
), "Please provide a list of datasets need to be augmented."
if len(cfg.preprocess.data_augment) > 0:
new_datasets_list = []
for dataset in cfg.preprocess.data_augment:
new_datasets = data_augment.augment_dataset(cfg, dataset)
new_datasets_list.extend(new_datasets)
cfg.dataset.extend(new_datasets_list)
print("Augmentation datasets: ", cfg.dataset)
except:
print("No Data Augmentation.")
# Dump metadata of datasets (singers, train/test durations, etc.)
cal_metadata(cfg)
## Prepare the acoustic features
for dataset in cfg.dataset:
# Skip augmented datasets which do not need to extract acoustic features
# We will copy acoustic features from the original dataset later
if (
"pitch_shift" in dataset
or "formant_shift" in dataset
or "equalizer" in dataset in dataset
):
continue
print(
"Extracting acoustic features for {} using {} workers ...".format(
dataset, args.num_workers
)
)
extract_acoustic_features(dataset, output_path, cfg, args.num_workers)
# Calculate the statistics of acoustic features
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
if cfg.preprocess.extract_pitch:
acoustic_extractor.cal_pitch_statistics_svc(dataset, output_path, cfg)
# Copy acoustic features for augmented datasets by creating soft-links
for dataset in cfg.dataset:
if "pitch_shift" in dataset:
src_dataset = dataset.replace("_pitch_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "formant_shift" in dataset:
src_dataset = dataset.replace("_formant_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "equalizer" in dataset:
src_dataset = dataset.replace("_equalizer", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
else:
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
print("Copying acoustic features for {}...".format(dataset))
acoustic_extractor.copy_acoustic_features(
metadata, dataset_dir, src_dataset_dir, cfg
)
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
if cfg.preprocess.extract_pitch:
acoustic_extractor.cal_pitch_statistics(dataset, output_path, cfg)
# Prepare the content features
for dataset in cfg.dataset:
print("Extracting content features for {}...".format(dataset))
extract_content_features(dataset, output_path, cfg, args.num_workers) | Proprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers |
17,528 | import argparse
import torch
from models.tts.fastspeech2.fs2_trainer import FastSpeech2Trainer
from models.tts.vits.vits_trainer import VITSTrainer
from models.tts.valle.valle_trainer import VALLETrainer
from models.tts.naturalspeech2.ns2_trainer import NS2Trainer
from utils.util import load_config
class FastSpeech2Trainer(TTSTrainer):
def __init__(self, args, cfg):
TTSTrainer.__init__(self, args, cfg)
self.cfg = cfg
def _build_dataset(self):
return FS2Dataset, FS2Collator
def __build_scheduler(self):
return NoamLR(self.optimizer, **self.cfg.train.lr_scheduler)
def _write_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar("train/" + key, value, self.step)
lr = self.optimizer.state_dict()["param_groups"][0]["lr"]
self.sw.add_scalar("learning_rate", lr, self.step)
def _write_valid_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar("val/" + key, value, self.step)
def _build_criterion(self):
return FastSpeech2Loss(self.cfg)
def get_state_dict(self):
state_dict = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
return state_dict
def _build_optimizer(self):
optimizer = torch.optim.Adam(self.model.parameters(), **self.cfg.train.adam)
return optimizer
def _build_scheduler(self):
scheduler = NoamLR(self.optimizer, **self.cfg.train.lr_scheduler)
return scheduler
def _build_model(self):
self.model = FastSpeech2(self.cfg)
return self.model
def _train_epoch(self):
r"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
self.model.train()
epoch_sum_loss: float = 0.0
epoch_step: int = 0
epoch_losses: dict = {}
for batch in tqdm(
self.train_dataloader,
desc=f"Training Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Do training step and BP
with self.accelerator.accumulate(self.model):
loss, train_losses = self._train_step(batch)
self.accelerator.backward(loss)
grad_clip_thresh = self.cfg.train.grad_clip_thresh
nn.utils.clip_grad_norm_(self.model.parameters(), grad_clip_thresh)
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
self.batch_count += 1
# Update info for each step
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
epoch_sum_loss += loss
for key, value in train_losses.items():
if key not in epoch_losses.keys():
epoch_losses[key] = value
else:
epoch_losses[key] += value
self.accelerator.log(
{
"Step/Train Loss": loss,
"Step/Learning Rate": self.optimizer.param_groups[0]["lr"],
},
step=self.step,
)
self.step += 1
epoch_step += 1
self.accelerator.wait_for_everyone()
epoch_sum_loss = (
epoch_sum_loss
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
for key in epoch_losses.keys():
epoch_losses[key] = (
epoch_losses[key]
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
return epoch_sum_loss, epoch_losses
def _train_step(self, data):
train_losses = {}
total_loss = 0
train_stats = {}
preds = self.model(data)
train_losses = self.criterion(data, preds)
total_loss = train_losses["loss"]
for key, value in train_losses.items():
train_losses[key] = value.item()
return total_loss, train_losses
def _valid_step(self, data):
valid_loss = {}
total_valid_loss = 0
valid_stats = {}
preds = self.model(data)
valid_losses = self.criterion(data, preds)
total_valid_loss = valid_losses["loss"]
for key, value in valid_losses.items():
valid_losses[key] = value.item()
return total_valid_loss, valid_losses, valid_stats
class VITSTrainer(TTSTrainer):
def __init__(self, args, cfg):
TTSTrainer.__init__(self, args, cfg)
if cfg.preprocess.use_spkid and cfg.train.multi_speaker_training:
if cfg.model.n_speakers == 0:
cfg.model.n_speaker = len(self.speakers)
def _build_model(self):
net_g = SynthesizerTrn(
self.cfg.model.text_token_num,
self.cfg.preprocess.n_fft // 2 + 1,
self.cfg.preprocess.segment_size // self.cfg.preprocess.hop_size,
**self.cfg.model,
)
net_d = MultiPeriodDiscriminator(self.cfg.model.use_spectral_norm)
model = {"generator": net_g, "discriminator": net_d}
return model
def _build_dataset(self):
return VITSDataset, VITSCollator
def _build_optimizer(self):
optimizer_g = torch.optim.AdamW(
self.model["generator"].parameters(),
self.cfg.train.learning_rate,
betas=self.cfg.train.AdamW.betas,
eps=self.cfg.train.AdamW.eps,
)
optimizer_d = torch.optim.AdamW(
self.model["discriminator"].parameters(),
self.cfg.train.learning_rate,
betas=self.cfg.train.AdamW.betas,
eps=self.cfg.train.AdamW.eps,
)
optimizer = {"optimizer_g": optimizer_g, "optimizer_d": optimizer_d}
return optimizer
def _build_scheduler(self):
scheduler_g = ExponentialLR(
self.optimizer["optimizer_g"],
gamma=self.cfg.train.lr_decay,
last_epoch=self.epoch - 1,
)
scheduler_d = ExponentialLR(
self.optimizer["optimizer_d"],
gamma=self.cfg.train.lr_decay,
last_epoch=self.epoch - 1,
)
scheduler = {"scheduler_g": scheduler_g, "scheduler_d": scheduler_d}
return scheduler
def _build_criterion(self):
class GeneratorLoss(nn.Module):
def __init__(self, cfg):
super(GeneratorLoss, self).__init__()
self.cfg = cfg
self.l1_loss = nn.L1Loss()
def generator_loss(self, disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
dg = dg.float()
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses
def feature_loss(self, fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
rl = rl.float().detach()
gl = gl.float()
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
def kl_loss(self, z_p, logs_q, m_p, logs_p, z_mask):
"""
z_p, logs_q: [b, h, t_t]
m_p, logs_p: [b, h, t_t]
"""
z_p = z_p.float()
logs_q = logs_q.float()
m_p = m_p.float()
logs_p = logs_p.float()
z_mask = z_mask.float()
kl = logs_p - logs_q - 0.5
kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
kl = torch.sum(kl * z_mask)
l = kl / torch.sum(z_mask)
return l
def forward(
self,
outputs_g,
outputs_d,
y_mel,
y_hat_mel,
):
loss_g = {}
# duration loss
loss_dur = torch.sum(outputs_g["l_length"].float())
loss_g["loss_dur"] = loss_dur
# mel loss
loss_mel = self.l1_loss(y_mel, y_hat_mel) * self.cfg.train.c_mel
loss_g["loss_mel"] = loss_mel
# kl loss
loss_kl = (
self.kl_loss(
outputs_g["z_p"],
outputs_g["logs_q"],
outputs_g["m_p"],
outputs_g["logs_p"],
outputs_g["z_mask"],
)
* self.cfg.train.c_kl
)
loss_g["loss_kl"] = loss_kl
# feature loss
loss_fm = self.feature_loss(outputs_d["fmap_rs"], outputs_d["fmap_gs"])
loss_g["loss_fm"] = loss_fm
# gan loss
loss_gen, losses_gen = self.generator_loss(outputs_d["y_d_hat_g"])
loss_g["loss_gen"] = loss_gen
loss_g["loss_gen_all"] = (
loss_dur + loss_mel + loss_kl + loss_fm + loss_gen
)
return loss_g
class DiscriminatorLoss(nn.Module):
def __init__(self, cfg):
super(DiscriminatorLoss, self).__init__()
self.cfg = cfg
self.l1Loss = torch.nn.L1Loss(reduction="mean")
def __call__(self, disc_real_outputs, disc_generated_outputs):
loss_d = {}
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
dr = dr.float()
dg = dg.float()
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg**2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
loss_d["loss_disc_all"] = loss
return loss_d
criterion = {
"generator": GeneratorLoss(self.cfg),
"discriminator": DiscriminatorLoss(self.cfg),
}
return criterion
def write_summary(
self,
losses,
stats,
images={},
audios={},
audio_sampling_rate=24000,
tag="train",
):
for key, value in losses.items():
self.sw.add_scalar(tag + "/" + key, value, self.step)
self.sw.add_scalar(
"learning_rate",
self.optimizer["optimizer_g"].param_groups[0]["lr"],
self.step,
)
if len(images) != 0:
for key, value in images.items():
self.sw.add_image(key, value, self.global_step, batchformats="HWC")
if len(audios) != 0:
for key, value in audios.items():
self.sw.add_audio(key, value, self.global_step, audio_sampling_rate)
def write_valid_summary(
self, losses, stats, images={}, audios={}, audio_sampling_rate=24000, tag="val"
):
for key, value in losses.items():
self.sw.add_scalar(tag + "/" + key, value, self.step)
if len(images) != 0:
for key, value in images.items():
self.sw.add_image(key, value, self.global_step, batchformats="HWC")
if len(audios) != 0:
for key, value in audios.items():
self.sw.add_audio(key, value, self.global_step, audio_sampling_rate)
def get_state_dict(self):
state_dict = {
"generator": self.model["generator"].state_dict(),
"discriminator": self.model["discriminator"].state_dict(),
"optimizer_g": self.optimizer["optimizer_g"].state_dict(),
"optimizer_d": self.optimizer["optimizer_d"].state_dict(),
"scheduler_g": self.scheduler["scheduler_g"].state_dict(),
"scheduler_d": self.scheduler["scheduler_d"].state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
return state_dict
def load_model(self, checkpoint):
self.step = checkpoint["step"]
self.epoch = checkpoint["epoch"]
self.model["generator"].load_state_dict(checkpoint["generator"])
self.model["discriminator"].load_state_dict(checkpoint["discriminator"])
self.optimizer["optimizer_g"].load_state_dict(checkpoint["optimizer_g"])
self.optimizer["optimizer_d"].load_state_dict(checkpoint["optimizer_d"])
self.scheduler["scheduler_g"].load_state_dict(checkpoint["scheduler_g"])
self.scheduler["scheduler_d"].load_state_dict(checkpoint["scheduler_d"])
def _valid_step(self, batch):
r"""Testing forward step. Should return average loss of a sample over
one batch. Provoke ``_forward_step`` is recommended except for special case.
See ``_test_epoch`` for usage.
"""
valid_losses = {}
total_loss = 0
valid_stats = {}
batch["linear"] = batch["linear"].transpose(2, 1) # [b, d, t]
batch["mel"] = batch["mel"].transpose(2, 1) # [b, d, t]
batch["audio"] = batch["audio"].unsqueeze(1) # [b, d, t]
# Discriminator
# Generator output
outputs_g = self.model["generator"](batch)
y_mel = slice_segments(
batch["mel"],
outputs_g["ids_slice"],
self.cfg.preprocess.segment_size // self.cfg.preprocess.hop_size,
)
y_hat_mel = mel_spectrogram_torch(
outputs_g["y_hat"].squeeze(1), self.cfg.preprocess
)
y = slice_segments(
batch["audio"],
outputs_g["ids_slice"] * self.cfg.preprocess.hop_size,
self.cfg.preprocess.segment_size,
)
# Discriminator output
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"].detach())
## Discriminator loss
loss_d = self.criterion["discriminator"](
outputs_d["y_d_hat_r"], outputs_d["y_d_hat_g"]
)
valid_losses.update(loss_d)
## Generator
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"])
loss_g = self.criterion["generator"](outputs_g, outputs_d, y_mel, y_hat_mel)
valid_losses.update(loss_g)
for item in valid_losses:
valid_losses[item] = valid_losses[item].item()
total_loss = loss_g["loss_gen_all"] + loss_d["loss_disc_all"]
return (
total_loss.item(),
valid_losses,
valid_stats,
)
def _train_step(self, batch):
r"""Forward step for training and inference. This function is called
in ``_train_step`` & ``_test_step`` function.
"""
train_losses = {}
total_loss = 0
training_stats = {}
batch["linear"] = batch["linear"].transpose(2, 1) # [b, d, t]
batch["mel"] = batch["mel"].transpose(2, 1) # [b, d, t]
batch["audio"] = batch["audio"].unsqueeze(1) # [b, d, t]
# Train Discriminator
# Generator output
outputs_g = self.model["generator"](batch)
y_mel = slice_segments(
batch["mel"],
outputs_g["ids_slice"],
self.cfg.preprocess.segment_size // self.cfg.preprocess.hop_size,
)
y_hat_mel = mel_spectrogram_torch(
outputs_g["y_hat"].squeeze(1), self.cfg.preprocess
)
y = slice_segments(
batch["audio"],
outputs_g["ids_slice"] * self.cfg.preprocess.hop_size,
self.cfg.preprocess.segment_size,
)
# Discriminator output
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"].detach())
## Discriminator loss
loss_d = self.criterion["discriminator"](
outputs_d["y_d_hat_r"], outputs_d["y_d_hat_g"]
)
train_losses.update(loss_d)
# BP and Grad Updated
self.optimizer["optimizer_d"].zero_grad()
self.accelerator.backward(loss_d["loss_disc_all"])
self.optimizer["optimizer_d"].step()
## Train Generator
outputs_d = self.model["discriminator"](y, outputs_g["y_hat"])
loss_g = self.criterion["generator"](outputs_g, outputs_d, y_mel, y_hat_mel)
train_losses.update(loss_g)
# BP and Grad Updated
self.optimizer["optimizer_g"].zero_grad()
self.accelerator.backward(loss_g["loss_gen_all"])
self.optimizer["optimizer_g"].step()
for item in train_losses:
train_losses[item] = train_losses[item].item()
total_loss = loss_g["loss_gen_all"] + loss_d["loss_disc_all"]
return (
total_loss.item(),
train_losses,
training_stats,
)
def _train_epoch(self):
r"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
epoch_sum_loss: float = 0.0
epoch_losses: dict = {}
epoch_step: int = 0
for batch in tqdm(
self.train_dataloader,
desc=f"Training Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
with self.accelerator.accumulate(self.model):
total_loss, train_losses, training_stats = self._train_step(batch)
self.batch_count += 1
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
epoch_sum_loss += total_loss
for key, value in train_losses.items():
if key not in epoch_losses.keys():
epoch_losses[key] = value
else:
epoch_losses[key] += value
self.accelerator.log(
{
"Step/Generator Loss": train_losses["loss_gen_all"],
"Step/Discriminator Loss": train_losses["loss_disc_all"],
"Step/Generator Learning Rate": self.optimizer[
"optimizer_d"
].param_groups[0]["lr"],
"Step/Discriminator Learning Rate": self.optimizer[
"optimizer_g"
].param_groups[0]["lr"],
},
step=self.step,
)
self.step += 1
epoch_step += 1
self.accelerator.wait_for_everyone()
epoch_sum_loss = (
epoch_sum_loss
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
for key in epoch_losses.keys():
epoch_losses[key] = (
epoch_losses[key]
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
return epoch_sum_loss, epoch_losses
class VALLETrainer(TTSTrainer):
def __init__(self, args, cfg):
TTSTrainer.__init__(self, args, cfg)
def _build_model(self):
model = VALLE(self.cfg.model)
return model
def _build_dataset(self):
return VALLEDataset, VALLECollator
def _build_optimizer(self):
if self.args.train_stage:
if isinstance(self.model, DistributedDataParallel):
model = self.model.module
else:
model = self.model
model_parameters = model.stage_parameters(self.args.train_stage)
else:
model_parameters = self.model.parameters()
if self.cfg.train.optimizer == "ScaledAdam":
parameters_names = []
if self.args.train_stage != 0:
parameters_names.append(
[
name_param_pair[0]
for name_param_pair in model.stage_named_parameters(
self.args.train_stage
)
]
)
else:
parameters_names.append(
[name_param_pair[0] for name_param_pair in model.named_parameters()]
)
optimizer = ScaledAdam(
model_parameters,
lr=self.cfg.train.base_lr,
betas=(0.9, 0.95),
clipping_scale=2.0,
parameters_names=parameters_names,
show_dominant_parameters=False,
clipping_update_period=1000,
)
elif self.cfg.train.optimizer == "Eve":
optimizer = Eve(
model_parameters,
lr=self.cfg.train.base_lr,
betas=(0.9, 0.98),
target_rms=0.1,
)
elif self.cfg.train.optimizer == "AdamW":
optimizer = torch.optim.AdamW(
model_parameters,
lr=self.cfg.train.base_lr,
betas=(0.9, 0.95),
weight_decay=1e-2,
eps=1e-8,
)
elif self.cfg.train.optimizer == "Adam":
optimizer = torch.optim.Adam(
model_parameters,
lr=self.cfg.train.base_lr,
betas=(0.9, 0.95),
eps=1e-8,
)
else:
raise NotImplementedError()
return optimizer
def _build_scheduler(self):
if self.cfg.train.scheduler.lower() == "eden":
scheduler = Eden(
self.optimizer, 5000, 4, warmup_batches=self.cfg.train.warmup_steps
)
elif self.cfg.train.scheduler.lower() == "noam":
scheduler = NoamScheduler(
self.cfg.train.base_lr,
self.optimizer,
self.cfg.model.decoder_dim,
warmup_steps=self.cfg.train.warmup_steps,
)
elif self.cfg.train.scheduler.lower() == "cosine":
from diffusers.optimization import get_cosine_schedule_with_warmup
scheduler = get_cosine_schedule_with_warmup(
self.optimizer,
num_warmup_steps=self.cfg.train.warmup_steps
* self.accelerator.num_processes,
num_training_steps=self.cfg.train.total_training_steps
* self.accelerator.num_processes,
)
else:
raise NotImplementedError(f"{self.cfg.train.scheduler}")
return scheduler
def _train_epoch(self):
r"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
if isinstance(self.model, dict):
for key in self.model.keys():
self.model[key].train()
else:
self.model.train()
epoch_sum_loss: float = 0.0
epoch_losses: dict = {}
epoch_step: int = 0
for batch in tqdm(
self.train_dataloader,
desc=f"Training Epoch {self.epoch}",
unit="batch",
colour="GREEN",
leave=False,
dynamic_ncols=True,
smoothing=0.04,
disable=not self.accelerator.is_main_process,
):
# Do training step and BP
with self.accelerator.accumulate(self.model):
total_loss, train_losses = self._train_step(batch)
self.accelerator.backward(total_loss)
self.optimizer.step()
self.optimizer.zero_grad()
self.batch_count += 1
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
if self.cfg.train.optimizer not in ["ScaledAdam", "Eve"]:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
for k in range(self.cfg.train.gradient_accumulation_step):
if isinstance(self.scheduler, Eden):
self.scheduler.step_batch(self.step)
else:
self.scheduler.step()
epoch_sum_loss += total_loss.detach().cpu().item()
if isinstance(train_losses, dict):
for key, value in train_losses.items():
if key not in epoch_losses.keys():
epoch_losses[key] = value
else:
epoch_losses[key] += value
if isinstance(train_losses, dict):
for key, loss in train_losses.items():
self.accelerator.log(
{"Step/Train {}".format(key): "{:.6f}".format(loss)},
step=self.step,
)
else:
self.accelerator.log(
{"Step/Train Loss": loss},
step=self.step,
)
self.accelerator.log(
{"Step/lr": self.scheduler.get_last_lr()[0]},
step=self.step,
)
# print loss every log_epoch_step steps
# if epoch_step % self.cfg.train.log_epoch_step == 0:
# for key, loss in train_losses.items():
# self.logger.info("Step/Train {}: {:.6f}".format(key, loss))
# print("Step/Train {}: {:.6f}".format(key, loss))
self.step += 1
epoch_step += 1
self.accelerator.wait_for_everyone()
epoch_sum_loss = (
epoch_sum_loss
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
for key in epoch_losses.keys():
epoch_losses[key] = (
epoch_losses[key]
/ len(self.train_dataloader)
* self.cfg.train.gradient_accumulation_step
)
return epoch_sum_loss, epoch_losses
def _train_step(self, batch, is_training=True):
text_tokens = batch["phone_seq"].to(self.device)
text_tokens_lens = batch["phone_len"].to(self.device)
assert text_tokens.ndim == 2
audio_features = batch["acoustic_token"].to(self.device)
audio_features_lens = batch["target_len"].to(self.device)
assert audio_features.ndim == 3
with torch.set_grad_enabled(is_training):
loss, losses = self.model(
x=text_tokens,
x_lens=text_tokens_lens,
y=audio_features,
y_lens=audio_features_lens,
train_stage=self.args.train_stage,
)
assert loss.requires_grad == is_training
loss_dict = {}
frames_sum = (audio_features_lens).sum()
avg_loss = loss / frames_sum
loss_dict["loss"] = avg_loss.detach().cpu().item()
for l in losses:
loss_dict[l] = losses[l].detach().cpu().item() / frames_sum.item()
return avg_loss, loss_dict
def _valid_step(self, batch):
valid_losses = {}
total_loss = 0
valid_stats = {}
total_loss, valid_losses = self._train_step(
batch=batch,
is_training=False,
)
assert total_loss.requires_grad is False
total_loss = total_loss.detach().cpu().item()
return total_loss, valid_losses, valid_stats
def add_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--train_stage",
type=int,
default="1",
help="0: train all modules, 1: AR Decoder, 2: NAR Decoder",
)
def _build_dataloader(self):
if not self.cfg.train.use_dynamic_batchsize:
return super()._build_dataloader()
if len(self.cfg.dataset) > 1:
raise Exception("use_dynamic_batchsize only supports single dataset now.")
Dataset, Collator = self._build_dataset()
train_dataset = Dataset(
self.cfg, self.cfg.dataset[0], is_valid=False
) # TODO: support use_dynamic_batchsize for more than one datasets.
train_collate = Collator(self.cfg)
batch_sampler = batch_by_size(
train_dataset.num_frame_indices,
train_dataset.get_num_frames,
max_tokens=self.cfg.train.max_tokens * self.accelerator.num_processes,
max_sentences=self.cfg.train.max_sentences * self.accelerator.num_processes,
required_batch_size_multiple=self.accelerator.num_processes,
)
np.random.seed(1234)
np.random.shuffle(batch_sampler)
print(batch_sampler[:1])
batches = [
x[self.accelerator.local_process_index :: self.accelerator.num_processes]
for x in batch_sampler
if len(x) % self.accelerator.num_processes == 0
]
train_loader = DataLoader(
train_dataset,
collate_fn=train_collate,
num_workers=self.cfg.train.dataloader.num_worker,
batch_sampler=VariableSampler(
batches, drop_last=False, use_random_sampler=True
),
pin_memory=False,
)
self.accelerator.wait_for_everyone()
valid_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=True)
valid_collate = Collator(self.cfg)
batch_sampler = batch_by_size(
valid_dataset.num_frame_indices,
valid_dataset.get_num_frames,
max_tokens=self.cfg.train.max_tokens * self.accelerator.num_processes,
max_sentences=self.cfg.train.max_sentences * self.accelerator.num_processes,
required_batch_size_multiple=self.accelerator.num_processes,
)
batches = [
x[self.accelerator.local_process_index :: self.accelerator.num_processes]
for x in batch_sampler
if len(x) % self.accelerator.num_processes == 0
]
valid_loader = DataLoader(
valid_dataset,
collate_fn=valid_collate,
num_workers=self.cfg.train.dataloader.num_worker,
batch_sampler=VariableSampler(batches, drop_last=False),
pin_memory=False,
)
self.accelerator.wait_for_everyone()
return train_loader, valid_loader
def _accelerator_prepare(self):
if not self.cfg.train.use_dynamic_batchsize:
(
self.train_dataloader,
self.valid_dataloader,
) = self.accelerator.prepare(
self.train_dataloader,
self.valid_dataloader,
)
if isinstance(self.model, dict):
for key in self.model.keys():
self.model[key] = self.accelerator.prepare(self.model[key])
else:
self.model = self.accelerator.prepare(self.model)
if isinstance(self.optimizer, dict):
for key in self.optimizer.keys():
self.optimizer[key] = self.accelerator.prepare(self.optimizer[key])
else:
self.optimizer = self.accelerator.prepare(self.optimizer)
if isinstance(self.scheduler, dict):
for key in self.scheduler.keys():
self.scheduler[key] = self.accelerator.prepare(self.scheduler[key])
else:
self.scheduler = self.accelerator.prepare(self.scheduler)
class NS2Trainer(TTSTrainer):
def __init__(self, args, cfg):
self.args = args
self.cfg = cfg
cfg.exp_name = args.exp_name
self._init_accelerator()
self.accelerator.wait_for_everyone()
# Init logger
with self.accelerator.main_process_first():
if self.accelerator.is_main_process:
os.makedirs(os.path.join(self.exp_dir, "checkpoint"), exist_ok=True)
self.log_file = os.path.join(
os.path.join(self.exp_dir, "checkpoint"), "train.log"
)
self.logger = Logger(self.log_file, level=self.args.log_level).logger
self.time_window = ValueWindow(50)
if self.accelerator.is_main_process:
# Log some info
self.logger.info("=" * 56)
self.logger.info("||\t\t" + "New training process started." + "\t\t||")
self.logger.info("=" * 56)
self.logger.info("\n")
self.logger.debug(f"Using {args.log_level.upper()} logging level.")
self.logger.info(f"Experiment name: {args.exp_name}")
self.logger.info(f"Experiment directory: {self.exp_dir}")
self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint")
if self.accelerator.is_main_process:
os.makedirs(self.checkpoint_dir, exist_ok=True)
if self.accelerator.is_main_process:
self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}")
# init counts
self.batch_count: int = 0
self.step: int = 0
self.epoch: int = 0
self.max_epoch = (
self.cfg.train.max_epoch if self.cfg.train.max_epoch > 0 else float("inf")
)
if self.accelerator.is_main_process:
self.logger.info(
"Max epoch: {}".format(
self.max_epoch if self.max_epoch < float("inf") else "Unlimited"
)
)
# Check values
if self.accelerator.is_main_process:
self._check_basic_configs()
# Set runtime configs
self.save_checkpoint_stride = self.cfg.train.save_checkpoint_stride
self.checkpoints_path = [
[] for _ in range(len(self.save_checkpoint_stride))
]
self.keep_last = [
i if i > 0 else float("inf") for i in self.cfg.train.keep_last
]
self.run_eval = self.cfg.train.run_eval
# set random seed
with self.accelerator.main_process_first():
start = time.monotonic_ns()
self._set_random_seed(self.cfg.train.random_seed)
end = time.monotonic_ns()
if self.accelerator.is_main_process:
self.logger.debug(
f"Setting random seed done in {(end - start) / 1e6:.2f}ms"
)
self.logger.debug(f"Random seed: {self.cfg.train.random_seed}")
# setup data_loader
with self.accelerator.main_process_first():
if self.accelerator.is_main_process:
self.logger.info("Building dataset...")
start = time.monotonic_ns()
self.train_dataloader, self.valid_dataloader = self._build_dataloader()
end = time.monotonic_ns()
if self.accelerator.is_main_process:
self.logger.info(
f"Building dataset done in {(end - start) / 1e6:.2f}ms"
)
# setup model
with self.accelerator.main_process_first():
if self.accelerator.is_main_process:
self.logger.info("Building model...")
start = time.monotonic_ns()
self.model = self._build_model()
end = time.monotonic_ns()
if self.accelerator.is_main_process:
self.logger.debug(self.model)
self.logger.info(f"Building model done in {(end - start) / 1e6:.2f}ms")
self.logger.info(
f"Model parameters: {self._count_parameters(self.model)/1e6:.2f}M"
)
# optimizer & scheduler
with self.accelerator.main_process_first():
if self.accelerator.is_main_process:
self.logger.info("Building optimizer and scheduler...")
start = time.monotonic_ns()
self.optimizer = self._build_optimizer()
self.scheduler = self._build_scheduler()
end = time.monotonic_ns()
if self.accelerator.is_main_process:
self.logger.info(
f"Building optimizer and scheduler done in {(end - start) / 1e6:.2f}ms"
)
# accelerate prepare
if not self.cfg.train.use_dynamic_batchsize:
if self.accelerator.is_main_process:
self.logger.info("Initializing accelerate...")
start = time.monotonic_ns()
(
self.train_dataloader,
self.valid_dataloader,
) = self.accelerator.prepare(
self.train_dataloader,
self.valid_dataloader,
)
if isinstance(self.model, dict):
for key in self.model.keys():
self.model[key] = self.accelerator.prepare(self.model[key])
else:
self.model = self.accelerator.prepare(self.model)
if isinstance(self.optimizer, dict):
for key in self.optimizer.keys():
self.optimizer[key] = self.accelerator.prepare(self.optimizer[key])
else:
self.optimizer = self.accelerator.prepare(self.optimizer)
if isinstance(self.scheduler, dict):
for key in self.scheduler.keys():
self.scheduler[key] = self.accelerator.prepare(self.scheduler[key])
else:
self.scheduler = self.accelerator.prepare(self.scheduler)
end = time.monotonic_ns()
if self.accelerator.is_main_process:
self.logger.info(
f"Initializing accelerate done in {(end - start) / 1e6:.2f}ms"
)
# create criterion
with self.accelerator.main_process_first():
if self.accelerator.is_main_process:
self.logger.info("Building criterion...")
start = time.monotonic_ns()
self.criterion = self._build_criterion()
end = time.monotonic_ns()
if self.accelerator.is_main_process:
self.logger.info(
f"Building criterion done in {(end - start) / 1e6:.2f}ms"
)
# TODO: Resume from ckpt need test/debug
with self.accelerator.main_process_first():
if args.resume:
if self.accelerator.is_main_process:
self.logger.info("Resuming from checkpoint...")
start = time.monotonic_ns()
ckpt_path = self._load_model(
self.checkpoint_dir,
args.checkpoint_path,
resume_type=args.resume_type,
)
end = time.monotonic_ns()
if self.accelerator.is_main_process:
self.logger.info(
f"Resuming from checkpoint done in {(end - start) / 1e6:.2f}ms"
)
self.checkpoints_path = json.load(
open(os.path.join(ckpt_path, "ckpts.json"), "r")
)
self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint")
if self.accelerator.is_main_process:
os.makedirs(self.checkpoint_dir, exist_ok=True)
if self.accelerator.is_main_process:
self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}")
# save config file path
self.config_save_path = os.path.join(self.exp_dir, "args.json")
# Only for TTS tasks
self.task_type = "TTS"
if self.accelerator.is_main_process:
self.logger.info("Task type: {}".format(self.task_type))
def _init_accelerator(self):
self.exp_dir = os.path.join(
os.path.abspath(self.cfg.log_dir), self.args.exp_name
)
project_config = ProjectConfiguration(
project_dir=self.exp_dir,
logging_dir=os.path.join(self.exp_dir, "log"),
)
# ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
self.accelerator = accelerate.Accelerator(
gradient_accumulation_steps=self.cfg.train.gradient_accumulation_step,
log_with=self.cfg.train.tracker,
project_config=project_config,
# kwargs_handlers=[ddp_kwargs]
)
if self.accelerator.is_main_process:
os.makedirs(project_config.project_dir, exist_ok=True)
os.makedirs(project_config.logging_dir, exist_ok=True)
with self.accelerator.main_process_first():
self.accelerator.init_trackers(self.args.exp_name)
def _build_model(self):
model = NaturalSpeech2(cfg=self.cfg.model)
return model
def _build_dataset(self):
return NS2Dataset, NS2Collator
def _build_dataloader(self):
if self.cfg.train.use_dynamic_batchsize:
print("Use Dynamic Batchsize......")
Dataset, Collator = self._build_dataset()
train_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=False)
train_collate = Collator(self.cfg)
batch_sampler = batch_by_size(
train_dataset.num_frame_indices,
train_dataset.get_num_frames,
max_tokens=self.cfg.train.max_tokens * self.accelerator.num_processes,
max_sentences=self.cfg.train.max_sentences
* self.accelerator.num_processes,
required_batch_size_multiple=self.accelerator.num_processes,
)
np.random.seed(980205)
np.random.shuffle(batch_sampler)
print(batch_sampler[:1])
batches = [
x[
self.accelerator.local_process_index :: self.accelerator.num_processes
]
for x in batch_sampler
if len(x) % self.accelerator.num_processes == 0
]
train_loader = DataLoader(
train_dataset,
collate_fn=train_collate,
num_workers=self.cfg.train.dataloader.num_worker,
batch_sampler=VariableSampler(
batches, drop_last=False, use_random_sampler=True
),
pin_memory=self.cfg.train.dataloader.pin_memory,
)
self.accelerator.wait_for_everyone()
valid_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=True)
valid_collate = Collator(self.cfg)
batch_sampler = batch_by_size(
valid_dataset.num_frame_indices,
valid_dataset.get_num_frames,
max_tokens=self.cfg.train.max_tokens * self.accelerator.num_processes,
max_sentences=self.cfg.train.max_sentences
* self.accelerator.num_processes,
required_batch_size_multiple=self.accelerator.num_processes,
)
batches = [
x[
self.accelerator.local_process_index :: self.accelerator.num_processes
]
for x in batch_sampler
if len(x) % self.accelerator.num_processes == 0
]
valid_loader = DataLoader(
valid_dataset,
collate_fn=valid_collate,
num_workers=self.cfg.train.dataloader.num_worker,
batch_sampler=VariableSampler(batches, drop_last=False),
pin_memory=self.cfg.train.dataloader.pin_memory,
)
self.accelerator.wait_for_everyone()
else:
print("Use Normal Batchsize......")
Dataset, Collator = self._build_dataset()
train_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=False)
train_collate = Collator(self.cfg)
train_loader = DataLoader(
train_dataset,
shuffle=True,
collate_fn=train_collate,
batch_size=self.cfg.train.batch_size,
num_workers=self.cfg.train.dataloader.num_worker,
pin_memory=self.cfg.train.dataloader.pin_memory,
)
valid_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=True)
valid_collate = Collator(self.cfg)
valid_loader = DataLoader(
valid_dataset,
shuffle=True,
collate_fn=valid_collate,
batch_size=self.cfg.train.batch_size,
num_workers=self.cfg.train.dataloader.num_worker,
pin_memory=self.cfg.train.dataloader.pin_memory,
)
self.accelerator.wait_for_everyone()
return train_loader, valid_loader
def _build_optimizer(self):
optimizer = torch.optim.AdamW(
filter(lambda p: p.requires_grad, self.model.parameters()),
**self.cfg.train.adam,
)
return optimizer
def _build_scheduler(self):
lr_scheduler = get_scheduler(
self.cfg.train.lr_scheduler,
optimizer=self.optimizer,
num_warmup_steps=self.cfg.train.lr_warmup_steps,
num_training_steps=self.cfg.train.num_train_steps,
)
return lr_scheduler
def _build_criterion(self):
criterion = torch.nn.L1Loss(reduction="mean")
return criterion
def write_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar(key, value, self.step)
def write_valid_summary(self, losses, stats):
for key, value in losses.items():
self.sw.add_scalar(key, value, self.step)
def get_state_dict(self):
state_dict = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
"batch_size": self.cfg.train.batch_size,
}
return state_dict
def load_model(self, checkpoint):
self.step = checkpoint["step"]
self.epoch = checkpoint["epoch"]
self.model.load_state_dict(checkpoint["model"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.scheduler.load_state_dict(checkpoint["scheduler"])
def _train_step(self, batch):
train_losses = {}
total_loss = 0
train_stats = {}
code = batch["code"] # (B, 16, T)
pitch = batch["pitch"] # (B, T)
duration = batch["duration"] # (B, N)
phone_id = batch["phone_id"] # (B, N)
ref_code = batch["ref_code"] # (B, 16, T')
phone_mask = batch["phone_mask"] # (B, N)
mask = batch["mask"] # (B, T)
ref_mask = batch["ref_mask"] # (B, T')
diff_out, prior_out = self.model(
code=code,
pitch=pitch,
duration=duration,
phone_id=phone_id,
ref_code=ref_code,
phone_mask=phone_mask,
mask=mask,
ref_mask=ref_mask,
)
# pitch loss
pitch_loss = log_pitch_loss(prior_out["pitch_pred_log"], pitch, mask=mask)
total_loss += pitch_loss
train_losses["pitch_loss"] = pitch_loss
# duration loss
dur_loss = log_dur_loss(prior_out["dur_pred_log"], duration, mask=phone_mask)
total_loss += dur_loss
train_losses["dur_loss"] = dur_loss
x0 = self.model.module.code_to_latent(code)
if self.cfg.model.diffusion.diffusion_type == "diffusion":
# diff loss x0
diff_loss_x0 = diff_loss(diff_out["x0_pred"], x0, mask=mask)
total_loss += diff_loss_x0
train_losses["diff_loss_x0"] = diff_loss_x0
# diff loss noise
diff_loss_noise = diff_loss(
diff_out["noise_pred"], diff_out["noise"], mask=mask
)
total_loss += diff_loss_noise * self.cfg.train.diff_noise_loss_lambda
train_losses["diff_loss_noise"] = diff_loss_noise
elif self.cfg.model.diffusion.diffusion_type == "flow":
# diff flow matching loss
flow_gt = diff_out["noise"] - x0
diff_loss_flow = diff_loss(diff_out["flow_pred"], flow_gt, mask=mask)
total_loss += diff_loss_flow
train_losses["diff_loss_flow"] = diff_loss_flow
# diff loss ce
# (nq, B, T); (nq, B, T, 1024)
if self.cfg.train.diff_ce_loss_lambda > 0:
pred_indices, pred_dist = self.model.module.latent_to_code(
diff_out["x0_pred"], nq=code.shape[1]
)
gt_indices, _ = self.model.module.latent_to_code(x0, nq=code.shape[1])
diff_loss_ce = diff_ce_loss(pred_dist, gt_indices, mask=mask)
total_loss += diff_loss_ce * self.cfg.train.diff_ce_loss_lambda
train_losses["diff_loss_ce"] = diff_loss_ce
self.optimizer.zero_grad()
# total_loss.backward()
self.accelerator.backward(total_loss)
if self.accelerator.sync_gradients:
self.accelerator.clip_grad_norm_(
filter(lambda p: p.requires_grad, self.model.parameters()), 0.5
)
self.optimizer.step()
self.scheduler.step()
for item in train_losses:
train_losses[item] = train_losses[item].item()
if self.cfg.train.diff_ce_loss_lambda > 0:
pred_indices_list = pred_indices.long().detach().cpu().numpy()
gt_indices_list = gt_indices.long().detach().cpu().numpy()
mask_list = batch["mask"].detach().cpu().numpy()
for i in range(pred_indices_list.shape[0]):
pred_acc = np.sum(
(pred_indices_list[i] == gt_indices_list[i]) * mask_list
) / np.sum(mask_list)
train_losses["pred_acc_{}".format(str(i))] = pred_acc
train_losses["batch_size"] = code.shape[0]
train_losses["max_frame_nums"] = np.max(
batch["frame_nums"].detach().cpu().numpy()
)
return (total_loss.item(), train_losses, train_stats)
def _valid_step(self, batch):
valid_losses = {}
total_loss = 0
valid_stats = {}
code = batch["code"] # (B, 16, T)
pitch = batch["pitch"] # (B, T)
duration = batch["duration"] # (B, N)
phone_id = batch["phone_id"] # (B, N)
ref_code = batch["ref_code"] # (B, 16, T')
phone_mask = batch["phone_mask"] # (B, N)
mask = batch["mask"] # (B, T)
ref_mask = batch["ref_mask"] # (B, T')
diff_out, prior_out = self.model(
code=code,
pitch=pitch,
duration=duration,
phone_id=phone_id,
ref_code=ref_code,
phone_mask=phone_mask,
mask=mask,
ref_mask=ref_mask,
)
# pitch loss
pitch_loss = log_pitch_loss(prior_out["pitch_pred_log"], pitch, mask=mask)
total_loss += pitch_loss
valid_losses["pitch_loss"] = pitch_loss
# duration loss
dur_loss = log_dur_loss(prior_out["dur_pred_log"], duration, mask=phone_mask)
total_loss += dur_loss
valid_losses["dur_loss"] = dur_loss
x0 = self.model.module.code_to_latent(code)
if self.cfg.model.diffusion.diffusion_type == "diffusion":
# diff loss x0
diff_loss_x0 = diff_loss(diff_out["x0_pred"], x0, mask=mask)
total_loss += diff_loss_x0
valid_losses["diff_loss_x0"] = diff_loss_x0
# diff loss noise
diff_loss_noise = diff_loss(
diff_out["noise_pred"], diff_out["noise"], mask=mask
)
total_loss += diff_loss_noise * self.cfg.train.diff_noise_loss_lambda
valid_losses["diff_loss_noise"] = diff_loss_noise
elif self.cfg.model.diffusion.diffusion_type == "flow":
# diff flow matching loss
flow_gt = diff_out["noise"] - x0
diff_loss_flow = diff_loss(diff_out["flow_pred"], flow_gt, mask=mask)
total_loss += diff_loss_flow
valid_losses["diff_loss_flow"] = diff_loss_flow
# diff loss ce
# (nq, B, T); (nq, B, T, 1024)
if self.cfg.train.diff_ce_loss_lambda > 0:
pred_indices, pred_dist = self.model.module.latent_to_code(
diff_out["x0_pred"], nq=code.shape[1]
)
gt_indices, _ = self.model.module.latent_to_code(x0, nq=code.shape[1])
diff_loss_ce = diff_ce_loss(pred_dist, gt_indices, mask=mask)
total_loss += diff_loss_ce * self.cfg.train.diff_ce_loss_lambda
valid_losses["diff_loss_ce"] = diff_loss_ce
for item in valid_losses:
valid_losses[item] = valid_losses[item].item()
if self.cfg.train.diff_ce_loss_lambda > 0:
pred_indices_list = pred_indices.long().detach().cpu().numpy()
gt_indices_list = gt_indices.long().detach().cpu().numpy()
mask_list = batch["mask"].detach().cpu().numpy()
for i in range(pred_indices_list.shape[0]):
pred_acc = np.sum(
(pred_indices_list[i] == gt_indices_list[i]) * mask_list
) / np.sum(mask_list)
valid_losses["pred_acc_{}".format(str(i))] = pred_acc
return (total_loss.item(), valid_losses, valid_stats)
def _valid_epoch(self):
r"""Testing epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
if isinstance(self.model, dict):
for key in self.model.keys():
self.model[key].eval()
else:
self.model.eval()
epoch_sum_loss = 0.0
epoch_losses = dict()
for batch in self.valid_dataloader:
# Put the data to cuda device
device = self.accelerator.device
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(device)
total_loss, valid_losses, valid_stats = self._valid_step(batch)
epoch_sum_loss = total_loss
for key, value in valid_losses.items():
epoch_losses[key] = value
self.accelerator.wait_for_everyone()
return epoch_sum_loss, epoch_losses
def _train_epoch(self):
r"""Training epoch. Should return average loss of a batch (sample) over
one epoch. See ``train_loop`` for usage.
"""
if isinstance(self.model, dict):
for key in self.model.keys():
self.model[key].train()
else:
self.model.train()
epoch_sum_loss: float = 0.0
epoch_losses: dict = {}
epoch_step: int = 0
for batch in self.train_dataloader:
# Put the data to cuda device
device = self.accelerator.device
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(device)
# Do training step and BP
with self.accelerator.accumulate(self.model):
total_loss, train_losses, training_stats = self._train_step(batch)
self.batch_count += 1
# Update info for each step
# TODO: step means BP counts or batch counts?
if self.batch_count % self.cfg.train.gradient_accumulation_step == 0:
epoch_sum_loss = total_loss
for key, value in train_losses.items():
epoch_losses[key] = value
if isinstance(train_losses, dict):
for key, loss in train_losses.items():
self.accelerator.log(
{"Epoch/Train {} Loss".format(key): loss},
step=self.step,
)
if (
self.accelerator.is_main_process
and self.batch_count
% (1 * self.cfg.train.gradient_accumulation_step)
== 0
):
self.echo_log(train_losses, mode="Training")
self.step += 1
epoch_step += 1
self.accelerator.wait_for_everyone()
return epoch_sum_loss, epoch_losses
def train_loop(self):
r"""Training loop. The public entry of training process."""
# Wait everyone to prepare before we move on
self.accelerator.wait_for_everyone()
# dump config file
if self.accelerator.is_main_process:
self._dump_cfg(self.config_save_path)
# self.optimizer.zero_grad()
# Wait to ensure good to go
self.accelerator.wait_for_everyone()
while self.epoch < self.max_epoch:
if self.accelerator.is_main_process:
self.logger.info("\n")
self.logger.info("-" * 32)
self.logger.info("Epoch {}: ".format(self.epoch))
# Do training & validating epoch
train_total_loss, train_losses = self._train_epoch()
if isinstance(train_losses, dict):
for key, loss in train_losses.items():
if self.accelerator.is_main_process:
self.logger.info(" |- Train/{} Loss: {:.6f}".format(key, loss))
self.accelerator.log(
{"Epoch/Train {} Loss".format(key): loss},
step=self.epoch,
)
valid_total_loss, valid_losses = self._valid_epoch()
if isinstance(valid_losses, dict):
for key, loss in valid_losses.items():
if self.accelerator.is_main_process:
self.logger.info(" |- Valid/{} Loss: {:.6f}".format(key, loss))
self.accelerator.log(
{"Epoch/Train {} Loss".format(key): loss},
step=self.epoch,
)
if self.accelerator.is_main_process:
self.logger.info(" |- Train/Loss: {:.6f}".format(train_total_loss))
self.logger.info(" |- Valid/Loss: {:.6f}".format(valid_total_loss))
self.accelerator.log(
{
"Epoch/Train Loss": train_total_loss,
"Epoch/Valid Loss": valid_total_loss,
},
step=self.epoch,
)
self.accelerator.wait_for_everyone()
if isinstance(self.scheduler, dict):
for key in self.scheduler.keys():
self.scheduler[key].step()
else:
self.scheduler.step()
# Check if hit save_checkpoint_stride and run_eval
run_eval = False
if self.accelerator.is_main_process:
save_checkpoint = False
hit_dix = []
for i, num in enumerate(self.save_checkpoint_stride):
if self.epoch % num == 0:
save_checkpoint = True
hit_dix.append(i)
run_eval |= self.run_eval[i]
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process and save_checkpoint:
path = os.path.join(
self.checkpoint_dir,
"epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, train_total_loss
),
)
print("save state......")
self.accelerator.save_state(path)
print("finish saving state......")
json.dump(
self.checkpoints_path,
open(os.path.join(path, "ckpts.json"), "w"),
ensure_ascii=False,
indent=4,
)
# Remove old checkpoints
to_remove = []
for idx in hit_dix:
self.checkpoints_path[idx].append(path)
while len(self.checkpoints_path[idx]) > self.keep_last[idx]:
to_remove.append((idx, self.checkpoints_path[idx].pop(0)))
# Search conflicts
total = set()
for i in self.checkpoints_path:
total |= set(i)
do_remove = set()
for idx, path in to_remove[::-1]:
if path in total:
self.checkpoints_path[idx].insert(0, path)
else:
do_remove.add(path)
# Remove old checkpoints
for path in do_remove:
shutil.rmtree(path, ignore_errors=True)
if self.accelerator.is_main_process:
self.logger.debug(f"Remove old checkpoint: {path}")
self.accelerator.wait_for_everyone()
if run_eval:
# TODO: run evaluation
pass
# Update info for each epoch
self.epoch += 1
# Finish training and save final checkpoint
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
self.accelerator.save_state(
os.path.join(
self.checkpoint_dir,
"final_epoch-{:04d}_step-{:07d}_loss-{:.6f}".format(
self.epoch, self.step, valid_total_loss
),
)
)
self.accelerator.end_training()
def build_trainer(args, cfg):
supported_trainer = {
"FastSpeech2": FastSpeech2Trainer,
"VITS": VITSTrainer,
"VALLE": VALLETrainer,
"NaturalSpeech2": NS2Trainer,
}
trainer_class = supported_trainer[cfg.model_type]
trainer = trainer_class(args, cfg)
return trainer | null |
17,529 | import argparse
import torch
from models.tts.fastspeech2.fs2_trainer import FastSpeech2Trainer
from models.tts.vits.vits_trainer import VITSTrainer
from models.tts.valle.valle_trainer import VALLETrainer
from models.tts.naturalspeech2.ns2_trainer import NS2Trainer
from utils.util import load_config
def cuda_relevant(deterministic=False):
torch.cuda.empty_cache()
# TF32 on Ampere and above
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.allow_tf32 = True
# Deterministic
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
torch.use_deterministic_algorithms(deterministic) | null |
17,530 | import argparse
from argparse import ArgumentParser
import os
from models.tts.fastspeech2.fs2_inference import FastSpeech2Inference
from models.tts.vits.vits_inference import VitsInference
from models.tts.valle.valle_inference import VALLEInference
from models.tts.naturalspeech2.ns2_inference import NS2Inference
from utils.util import load_config
import torch
class FastSpeech2Inference(TTSInference):
def __init__(self, args, cfg):
TTSInference.__init__(self, args, cfg)
self.args = args
self.cfg = cfg
self.infer_type = args.mode
def _build_model(self):
self.model = FastSpeech2(self.cfg)
return self.model
def load_model(self, state_dict):
raw_dict = state_dict["model"]
clean_dict = OrderedDict()
for k, v in raw_dict.items():
if k.startswith("module."):
clean_dict[k[7:]] = v
else:
clean_dict[k] = v
self.model.load_state_dict(clean_dict)
def _build_test_dataset(self):
return FS2TestDataset, FS2TestCollator
def _parse_vocoder(vocoder_dir):
r"""Parse vocoder config"""
vocoder_dir = os.path.abspath(vocoder_dir)
ckpt_list = [ckpt for ckpt in Path(vocoder_dir).glob("*.pt")]
# last step (different from the base *int(x.stem)*)
ckpt_list.sort(
key=lambda x: int(x.stem.split("_")[-2].split("-")[-1]), reverse=True
)
ckpt_path = str(ckpt_list[0])
vocoder_cfg = load_config(
os.path.join(vocoder_dir, "args.json"), lowercase=True
)
return vocoder_cfg, ckpt_path
def inference_for_batches(self):
y_pred = []
for i, batch in tqdm(enumerate(self.test_dataloader)):
y_pred, mel_lens, _ = self._inference_each_batch(batch)
y_ls = y_pred.chunk(self.test_batch_size)
tgt_ls = mel_lens.chunk(self.test_batch_size)
j = 0
for it, l in zip(y_ls, tgt_ls):
l = l.item()
it = it.squeeze(0)[:l].detach().cpu()
uid = self.test_dataset.metadata[i * self.test_batch_size + j]["Uid"]
torch.save(it, os.path.join(self.args.output_dir, f"{uid}.pt"))
j += 1
vocoder_cfg, vocoder_ckpt = self._parse_vocoder(self.args.vocoder_dir)
res = synthesis(
cfg=vocoder_cfg,
vocoder_weight_file=vocoder_ckpt,
n_samples=None,
pred=[
torch.load(
os.path.join(self.args.output_dir, "{}.pt".format(item["Uid"]))
).numpy()
for item in self.test_dataset.metadata
],
)
for it, wav in zip(self.test_dataset.metadata, res):
uid = it["Uid"]
save_audio(
os.path.join(self.args.output_dir, f"{uid}.wav"),
wav.numpy(),
self.cfg.preprocess.sample_rate,
add_silence=True,
turn_up=True,
)
os.remove(os.path.join(self.args.output_dir, f"{uid}.pt"))
def _inference_each_batch(self, batch_data):
device = self.accelerator.device
control_values = (
self.args.pitch_control,
self.args.energy_control,
self.args.duration_control,
)
for k, v in batch_data.items():
batch_data[k] = v.to(device)
pitch_control, energy_control, duration_control = control_values
output = self.model(
batch_data,
p_control=pitch_control,
e_control=energy_control,
d_control=duration_control,
)
pred_res = output["postnet_output"]
mel_lens = output["mel_lens"].cpu()
return pred_res, mel_lens, 0
def inference_for_single_utterance(self):
text = self.args.text
control_values = (
self.args.pitch_control,
self.args.energy_control,
self.args.duration_control,
)
pitch_control, energy_control, duration_control = control_values
# get phone symbol file
phone_symbol_file = None
if self.cfg.preprocess.phone_extractor != "lexicon":
phone_symbol_file = os.path.join(
self.exp_dir, self.cfg.preprocess.symbols_dict
)
assert os.path.exists(phone_symbol_file)
# convert text to phone sequence
phone_extractor = phoneExtractor(self.cfg)
phone_seq = phone_extractor.extract_phone(text) # phone_seq: list
# convert phone sequence to phone id sequence
phon_id_collator = phoneIDCollation(
self.cfg, symbols_dict_file=phone_symbol_file
)
phone_seq = ["{"] + phone_seq + ["}"]
phone_id_seq = phon_id_collator.get_phone_id_sequence(self.cfg, phone_seq)
# convert phone sequence to phone id sequence
phone_id_seq = np.array(phone_id_seq)
phone_id_seq = torch.from_numpy(phone_id_seq)
# get speaker id if multi-speaker training and use speaker id
speaker_id = None
if self.cfg.preprocess.use_spkid and self.cfg.train.multi_speaker_training:
spk2id_file = os.path.join(self.exp_dir, self.cfg.preprocess.spk2id)
with open(spk2id_file, "r") as f:
spk2id = json.load(f)
speaker_id = spk2id[self.args.speaker_name]
speaker_id = torch.from_numpy(np.array([speaker_id], dtype=np.int32))
else:
speaker_id = torch.Tensor(0).view(-1)
with torch.no_grad():
x_tst = phone_id_seq.to(self.device).unsqueeze(0)
x_tst_lengths = torch.LongTensor([phone_id_seq.size(0)]).to(self.device)
if speaker_id is not None:
speaker_id = speaker_id.to(self.device)
data = {}
data["texts"] = x_tst
data["text_len"] = x_tst_lengths
data["spk_id"] = speaker_id
output = self.model(
data,
p_control=pitch_control,
e_control=energy_control,
d_control=duration_control,
)
pred_res = output["postnet_output"]
vocoder_cfg, vocoder_ckpt = self._parse_vocoder(self.args.vocoder_dir)
audio = synthesis(
cfg=vocoder_cfg,
vocoder_weight_file=vocoder_ckpt,
n_samples=None,
pred=pred_res,
)
return audio[0]
class VitsInference(TTSInference):
def __init__(self, args=None, cfg=None):
TTSInference.__init__(self, args, cfg)
def _build_model(self):
net_g = SynthesizerTrn(
self.cfg.model.text_token_num,
self.cfg.preprocess.n_fft // 2 + 1,
self.cfg.preprocess.segment_size // self.cfg.preprocess.hop_size,
**self.cfg.model,
)
return net_g
def _build_test_dataset(sefl):
return VITSTestDataset, VITSTestCollator
def build_save_dir(self, dataset, speaker):
save_dir = os.path.join(
self.args.output_dir,
"tts_am_step-{}_{}".format(self.am_restore_step, self.args.mode),
)
if dataset is not None:
save_dir = os.path.join(save_dir, "data_{}".format(dataset))
if speaker != -1:
save_dir = os.path.join(
save_dir,
"spk_{}".format(speaker),
)
os.makedirs(save_dir, exist_ok=True)
print("Saving to ", save_dir)
return save_dir
def inference_for_batches(
self, noise_scale=0.667, noise_scale_w=0.8, length_scale=1
):
###### Construct test_batch ######
n_batch = len(self.test_dataloader)
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(
"Model eval time: {}, batch_size = {}, n_batch = {}".format(
now, self.test_batch_size, n_batch
)
)
self.model.eval()
###### Inference for each batch ######
pred_res = []
with torch.no_grad():
for i, batch_data in enumerate(
self.test_dataloader if n_batch == 1 else tqdm(self.test_dataloader)
):
spk_id = None
if (
self.cfg.preprocess.use_spkid
and self.cfg.train.multi_speaker_training
):
spk_id = batch_data["spk_id"]
outputs = self.model.infer(
batch_data["phone_seq"],
batch_data["phone_len"],
spk_id,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
)
audios = outputs["y_hat"]
masks = outputs["mask"]
for idx in range(audios.size(0)):
audio = audios[idx, 0, :].data.cpu().float()
mask = masks[idx, :, :]
audio_length = (
mask.sum([0, 1]).long() * self.cfg.preprocess.hop_size
)
audio_length = audio_length.cpu().numpy()
audio = audio[:audio_length]
pred_res.append(audio)
return pred_res
def inference_for_single_utterance(
self, noise_scale=0.667, noise_scale_w=0.8, length_scale=1
):
text = self.args.text
# get phone symbol file
phone_symbol_file = None
if self.cfg.preprocess.phone_extractor != "lexicon":
phone_symbol_file = os.path.join(
self.exp_dir, self.cfg.preprocess.symbols_dict
)
assert os.path.exists(phone_symbol_file)
# convert text to phone sequence
phone_extractor = phoneExtractor(self.cfg)
phone_seq = phone_extractor.extract_phone(text) # phone_seq: list
# convert phone sequence to phone id sequence
phon_id_collator = phoneIDCollation(
self.cfg, symbols_dict_file=phone_symbol_file
)
phone_id_seq = phon_id_collator.get_phone_id_sequence(self.cfg, phone_seq)
if self.cfg.preprocess.add_blank:
phone_id_seq = intersperse(phone_id_seq, 0)
# convert phone sequence to phone id sequence
phone_id_seq = np.array(phone_id_seq)
phone_id_seq = torch.from_numpy(phone_id_seq)
# get speaker id if multi-speaker training and use speaker id
speaker_id = None
if self.cfg.preprocess.use_spkid and self.cfg.train.multi_speaker_training:
spk2id_file = os.path.join(self.exp_dir, self.cfg.preprocess.spk2id)
with open(spk2id_file, "r") as f:
spk2id = json.load(f)
speaker_name = self.args.speaker_name
assert (
speaker_name in spk2id
), f"Speaker {speaker_name} not found in the spk2id keys. \
Please make sure you've specified the correct speaker name in infer_speaker_name."
speaker_id = spk2id[speaker_name]
speaker_id = torch.from_numpy(
np.array([speaker_id], dtype=np.int32)
).unsqueeze(0)
with torch.no_grad():
x_tst = phone_id_seq.to(self.device).unsqueeze(0)
x_tst_lengths = torch.LongTensor([phone_id_seq.size(0)]).to(self.device)
if speaker_id is not None:
speaker_id = speaker_id.to(self.device)
outputs = self.model.infer(
x_tst,
x_tst_lengths,
sid=speaker_id,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
)
audio = outputs["y_hat"][0, 0].data.cpu().float().numpy()
return audio
class VALLEInference(TTSInference):
def __init__(self, args=None, cfg=None):
TTSInference.__init__(self, args, cfg)
self.g2p_module = G2PModule(backend=self.cfg.preprocess.phone_extractor)
text_token_path = os.path.join(
cfg.preprocess.processed_dir, cfg.dataset[0], cfg.preprocess.symbols_dict
)
self.audio_tokenizer = AudioTokenizer()
def _build_model(self):
model = VALLE(self.cfg.model)
return model
def _build_test_dataset(self):
return VALLETestDataset, VALLETestCollator
def inference_one_clip(self, text, text_prompt, audio_file, save_name="pred"):
# get phone symbol file
phone_symbol_file = None
if self.cfg.preprocess.phone_extractor != "lexicon":
phone_symbol_file = os.path.join(
self.exp_dir, self.cfg.preprocess.symbols_dict
)
assert os.path.exists(phone_symbol_file)
# convert text to phone sequence
phone_extractor = phoneExtractor(self.cfg)
# convert phone sequence to phone id sequence
phon_id_collator = phoneIDCollation(
self.cfg, symbols_dict_file=phone_symbol_file
)
text = f"{text_prompt} {text}".strip()
phone_seq = phone_extractor.extract_phone(text) # phone_seq: list
phone_id_seq = phon_id_collator.get_phone_id_sequence(self.cfg, phone_seq)
phone_id_seq_len = torch.IntTensor([len(phone_id_seq)]).to(self.device)
# convert phone sequence to phone id sequence
phone_id_seq = np.array([phone_id_seq])
phone_id_seq = torch.from_numpy(phone_id_seq).to(self.device)
# extract acoustic token
encoded_frames = tokenize_audio(self.audio_tokenizer, audio_file)
audio_prompt_token = encoded_frames[0][0].transpose(2, 1).to(self.device)
# copysyn
if self.args.copysyn:
samples = self.audio_tokenizer.decode(encoded_frames)
audio_copysyn = samples[0].cpu().detach()
out_path = os.path.join(
self.args.output_dir, self.infer_type, f"{save_name}_copysyn.wav"
)
torchaudio.save(out_path, audio_copysyn, self.cfg.preprocess.sampling_rate)
if self.args.continual:
encoded_frames = self.model.continual(
phone_id_seq,
phone_id_seq_len,
audio_prompt_token,
)
else:
enroll_x_lens = None
if text_prompt:
# prompt_phone_seq = tokenize_text(self.g2p_module, text=f"{text_prompt}".strip())
# _, enroll_x_lens = self.text_tokenizer.get_token_id_seq(prompt_phone_seq)
text = f"{text_prompt}".strip()
prompt_phone_seq = phone_extractor.extract_phone(
text
) # phone_seq: list
prompt_phone_id_seq = phon_id_collator.get_phone_id_sequence(
self.cfg, prompt_phone_seq
)
prompt_phone_id_seq_len = torch.IntTensor(
[len(prompt_phone_id_seq)]
).to(self.device)
encoded_frames = self.model.inference(
phone_id_seq,
phone_id_seq_len,
audio_prompt_token,
enroll_x_lens=prompt_phone_id_seq_len,
top_k=self.args.top_k,
temperature=self.args.temperature,
)
samples = self.audio_tokenizer.decode([(encoded_frames.transpose(2, 1), None)])
audio = samples[0].squeeze(0).cpu().detach()
return audio
def inference_for_single_utterance(self):
text = self.args.text
text_prompt = self.args.text_prompt
audio_file = self.args.audio_prompt
if not self.args.continual:
assert text != ""
else:
text = ""
assert text_prompt != ""
assert audio_file != ""
audio = self.inference_one_clip(text, text_prompt, audio_file)
return audio
def inference_for_batches(self):
test_list_file = self.args.test_list_file
assert test_list_file is not None
pred_res = []
with open(test_list_file, "r") as fin:
for idx, line in enumerate(fin.readlines()):
fields = line.strip().split("|")
if self.args.continual:
assert len(fields) == 2
text_prompt, audio_prompt_path = fields
text = ""
else:
assert len(fields) == 3
text_prompt, audio_prompt_path, text = fields
audio = self.inference_one_clip(
text, text_prompt, audio_prompt_path, str(idx)
)
pred_res.append(audio)
return pred_res
"""
TODO: batch inference
###### Construct test_batch ######
n_batch = len(self.test_dataloader)
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(
"Model eval time: {}, batch_size = {}, n_batch = {}".format(
now, self.test_batch_size, n_batch
)
)
###### Inference for each batch ######
pred_res = []
with torch.no_grad():
for i, batch_data in enumerate(
self.test_dataloader if n_batch == 1 else tqdm(self.test_dataloader)
):
if self.args.continual:
encoded_frames = self.model.continual(
batch_data["phone_seq"],
batch_data["phone_len"],
batch_data["acoustic_token"],
)
else:
encoded_frames = self.model.inference(
batch_data["phone_seq"],
batch_data["phone_len"],
batch_data["acoustic_token"],
enroll_x_lens=batch_data["pmt_phone_len"],
top_k=self.args.top_k,
temperature=self.args.temperature
)
samples = self.audio_tokenizer.decode(
[(encoded_frames.transpose(2, 1), None)]
)
for idx in range(samples.size(0)):
audio = samples[idx].cpu()
pred_res.append(audio)
return pred_res
"""
def add_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--text_prompt",
type=str,
default="",
help="Text prompt that should be aligned with --audio_prompt.",
)
parser.add_argument(
"--audio_prompt",
type=str,
default="",
help="Audio prompt that should be aligned with --text_prompt.",
)
parser.add_argument(
"--top-k",
type=int,
default=-100,
help="Whether AR Decoder do top_k(if > 0) sampling.",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="The temperature of AR Decoder top_k sampling.",
)
parser.add_argument(
"--continual",
action="store_true",
help="Inference for continual task.",
)
parser.add_argument(
"--copysyn",
action="store_true",
help="Copysyn: generate audio by decoder of the original audio tokenizer.",
)
class NS2Inference:
def __init__(self, args, cfg):
self.cfg = cfg
self.args = args
self.model = self.build_model()
self.codec = self.build_codec()
self.symbols = valid_symbols + ["sp", "spn", "sil"] + ["<s>", "</s>"]
self.phone2id = {s: i for i, s in enumerate(self.symbols)}
self.id2phone = {i: s for s, i in self.phone2id.items()}
def build_model(self):
model = NaturalSpeech2(self.cfg.model)
model.load_state_dict(
torch.load(
os.path.join(self.args.checkpoint_path, "pytorch_model.bin"),
map_location="cpu",
)
)
model = model.to(self.args.device)
return model
def build_codec(self):
encodec_model = EncodecModel.encodec_model_24khz()
encodec_model = encodec_model.to(device=self.args.device)
encodec_model.set_target_bandwidth(12.0)
return encodec_model
def get_ref_code(self):
ref_wav_path = self.args.ref_audio
ref_wav, sr = torchaudio.load(ref_wav_path)
ref_wav = convert_audio(
ref_wav, sr, self.codec.sample_rate, self.codec.channels
)
ref_wav = ref_wav.unsqueeze(0).to(device=self.args.device)
with torch.no_grad():
encoded_frames = self.codec.encode(ref_wav)
ref_code = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1)
# print(ref_code.shape)
ref_mask = torch.ones(ref_code.shape[0], ref_code.shape[-1]).to(ref_code.device)
# print(ref_mask.shape)
return ref_code, ref_mask
def inference(self):
ref_code, ref_mask = self.get_ref_code()
lexicon = read_lexicon(self.cfg.preprocess.lexicon_path)
phone_seq = preprocess_english(self.args.text, lexicon)
print(phone_seq)
phone_id = np.array(
[
*map(
self.phone2id.get,
phone_seq.replace("{", "").replace("}", "").split(),
)
]
)
phone_id = torch.from_numpy(phone_id).unsqueeze(0).to(device=self.args.device)
print(phone_id)
x0, prior_out = self.model.inference(
ref_code, phone_id, ref_mask, self.args.inference_step
)
print(prior_out["dur_pred"])
print(prior_out["dur_pred_round"])
print(torch.sum(prior_out["dur_pred_round"]))
latent_ref = self.codec.quantizer.vq.decode(ref_code.transpose(0, 1))
rec_wav = self.codec.decoder(x0)
# ref_wav = self.codec.decoder(latent_ref)
os.makedirs(self.args.output_dir, exist_ok=True)
sf.write(
"{}/{}.wav".format(
self.args.output_dir, self.args.text.replace(" ", "_", 100)
),
rec_wav[0, 0].detach().cpu().numpy(),
samplerate=24000,
)
def add_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--ref_audio",
type=str,
default="",
help="Reference audio path",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
parser.add_argument(
"--inference_step",
type=int,
default=200,
help="Total inference steps for the diffusion model",
)
def build_inference(args, cfg):
supported_inference = {
"FastSpeech2": FastSpeech2Inference,
"VITS": VitsInference,
"VALLE": VALLEInference,
"NaturalSpeech2": NS2Inference,
}
inference_class = supported_inference[cfg.model_type]
inference = inference_class(args, cfg)
return inference | null |
17,531 | import argparse
from argparse import ArgumentParser
import os
from models.tts.fastspeech2.fs2_inference import FastSpeech2Inference
from models.tts.vits.vits_inference import VitsInference
from models.tts.valle.valle_inference import VALLEInference
from models.tts.naturalspeech2.ns2_inference import NS2Inference
from utils.util import load_config
import torch
def cuda_relevant(deterministic=False):
torch.cuda.empty_cache()
# TF32 on Ampere and above
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.allow_tf32 = True
# Deterministic
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
torch.use_deterministic_algorithms(deterministic) | null |
17,532 | import argparse
from argparse import ArgumentParser
import os
from models.tts.fastspeech2.fs2_inference import FastSpeech2Inference
from models.tts.vits.vits_inference import VitsInference
from models.tts.valle.valle_inference import VALLEInference
from models.tts.naturalspeech2.ns2_inference import NS2Inference
from utils.util import load_config
import torch
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
type=str,
required=True,
help="JSON/YAML file for configurations.",
)
parser.add_argument(
"--dataset",
type=str,
help="convert from the source data",
default=None,
)
parser.add_argument(
"--testing_set",
type=str,
help="train, test, golden_test",
default="test",
)
parser.add_argument(
"--test_list_file",
type=str,
help="convert from the test list file",
default=None,
)
parser.add_argument(
"--speaker_name",
type=str,
default=None,
help="speaker name for multi-speaker synthesis, for single-sentence mode only",
)
parser.add_argument(
"--text",
help="Text to be synthesized.",
type=str,
default="",
)
parser.add_argument(
"--vocoder_dir",
type=str,
default=None,
help="Vocoder checkpoint directory. Searching behavior is the same as "
"the acoustics one.",
)
parser.add_argument(
"--acoustics_dir",
type=str,
default=None,
help="Acoustic model checkpoint directory. If a directory is given, "
"search for the latest checkpoint dir in the directory. If a specific "
"checkpoint dir is given, directly load the checkpoint.",
)
parser.add_argument(
"--checkpoint_path",
type=str,
default=None,
help="Acoustic model checkpoint directory. If a directory is given, "
"search for the latest checkpoint dir in the directory. If a specific "
"checkpoint dir is given, directly load the checkpoint.",
)
parser.add_argument(
"--mode",
type=str,
choices=["batch", "single"],
required=True,
help="Synthesize a whole dataset or a single sentence",
)
parser.add_argument(
"--log_level",
type=str,
default="warning",
help="Logging level. Default: warning",
)
parser.add_argument(
"--pitch_control",
type=float,
default=1.0,
help="control the pitch of the whole utterance, larger value for higher pitch",
)
parser.add_argument(
"--energy_control",
type=float,
default=1.0,
help="control the energy of the whole utterance, larger value for larger volume",
)
parser.add_argument(
"--duration_control",
type=float,
default=1.0,
help="control the speed of the whole utterance, larger value for slower speaking rate",
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Output dir for saving generated results",
)
return parser | null |
17,533 | import faulthandler
import os
import argparse
import json
import pyworld as pw
from multiprocessing import cpu_count
from utils.util import load_config
from preprocessors.processor import preprocess_dataset, prepare_align
from preprocessors.metadata import cal_metadata
from processors import (
acoustic_extractor,
content_extractor,
data_augment,
phone_extractor,
)
def extract_acoustic_features(dataset, output_path, cfg, dataset_types, n_workers=1):
"""Extract acoustic features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
n_workers (int, optional): num of processes to extract features in parallel. Defaults to 1.
"""
metadata = []
for dataset_type in dataset_types:
dataset_output = os.path.join(output_path, dataset)
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
# acoustic_extractor.extract_utt_acoustic_features_parallel(
# metadata, dataset_output, cfg, n_workers=n_workers
# )
acoustic_extractor.extract_utt_acoustic_features_serial(
metadata, dataset_output, cfg
)
def extract_content_features(dataset, output_path, cfg, dataset_types, num_workers=1):
"""Extract content features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
"""
metadata = []
for dataset_type in dataset_types:
dataset_output = os.path.join(output_path, dataset)
# dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
content_extractor.extract_utt_content_features_dataloader(
cfg, metadata, num_workers
)
def extract_phonme_sequences(dataset, output_path, cfg, dataset_types):
"""Extract phoneme features of utterances in the dataset
Args:
dataset (str): name of dataset, e.g. opencpop
output_path (str): directory that stores train, test and feature files of datasets
cfg (dict): dictionary that stores configurations
"""
metadata = []
for dataset_type in dataset_types:
dataset_output = os.path.join(output_path, dataset)
dataset_file = os.path.join(dataset_output, "{}.json".format(dataset_type))
with open(dataset_file, "r") as f:
metadata.extend(json.load(f))
phone_extractor.extract_utt_phone_sequence(dataset, cfg, metadata)
def preprocess_dataset(
dataset, dataset_path, output_path, cfg, task_type, is_custom_dataset=False
):
"""Call specific function to handle specific dataset
Args:
dataset (str): name of a dataset, e.g. opencpop, m4singer
dataset_path (str): path to dataset
output_path (str): path to store preprocessing result files
"""
if is_custom_dataset:
if task_type == "svc":
customsvcdataset.main(output_path, dataset_path, dataset_name=dataset)
else:
raise NotImplementedError(
"Custom dataset for {} task not implemented!".format(cfg.task_type)
)
if re.match("opencpop*", dataset):
opencpop.main(dataset, output_path, dataset_path)
if dataset == "m4singer":
m4singer.main(output_path, dataset_path)
if dataset == "svcc":
svcc.main(output_path, dataset_path)
if dataset == "pjs":
pjs.main(output_path, dataset_path)
if dataset == "popbutfy":
popbutfy.main(output_path, dataset_path)
if dataset == "opensinger":
opensinger.main(output_path, dataset_path)
if dataset == "popcs":
popcs.main(output_path, dataset_path)
if dataset == "kising":
kising.main(output_path, dataset_path)
if dataset == "csd":
csd.main(output_path, dataset_path)
if dataset == "opera":
opera.main(output_path, dataset_path)
if dataset == "nus48e":
nus48e.main(output_path, dataset_path)
if dataset == "vctk":
vctk.main(output_path, dataset_path)
if dataset == "svcceval":
svcceval.main(output_path, dataset_path)
if dataset == "libritts":
libritts.main(output_path, dataset_path)
if dataset == "lijian":
lijian.main(output_path, dataset_path)
if dataset == "cdmusiceval":
cdmusiceval.main(output_path, dataset_path)
if dataset == "LJSpeech":
ljspeech.main(output_path, dataset_path, cfg)
if dataset == "ljspeech":
ljspeech_vocoder.main(output_path, dataset_path)
if dataset == "coco":
coco.main(output_path, dataset_path)
if dataset == "cocoeval":
cocoeval.main(output_path, dataset_path)
if dataset == "vocalist":
vocalist.main(output_path, dataset_path)
if dataset == "librilight":
librilight.main(output_path, dataset_path, cfg)
if dataset == "hifitts":
hifitts.main(output_path, dataset_path)
def prepare_align(dataset, dataset_path, cfg, output_path):
"""Call specific function to handle specific dataset
Args:
dataset (str): name of a dataset, e.g. ljspeech
dataset_path (str): path to dataset
output_path (str): path to store preprocessing result files
"""
if dataset == "LJSpeech":
ljspeech.prepare_align(dataset, dataset_path, cfg, output_path)
def cal_metadata(cfg, dataset_types=["train", "test"]):
"""
Dump metadata (singers.json, meta_info.json, utt2singer) for singer dataset or multi-datasets.
"""
from collections import Counter
datasets = cfg.dataset
print("-" * 10)
print("Preparing metadata...")
print("Including: \n{}\n".format("\n".join(datasets)))
datasets.sort()
for dataset in tqdm(datasets):
save_dir = os.path.join(cfg.preprocess.processed_dir, dataset)
assert os.path.exists(save_dir)
# 'train.json' and 'test.json' and 'valid.json' of target dataset
meta_info = dict()
utterances_dict = dict()
all_utterances = list()
duration = dict()
total_duration = 0.0
for dataset_type in dataset_types:
metadata = os.path.join(save_dir, "{}.json".format(dataset_type))
# Sort the metadata as the duration order
with open(metadata, "r", encoding="utf-8") as f:
utterances = json.load(f)
utterances = sorted(utterances, key=lambda x: x["Duration"])
utterances_dict[dataset_type] = utterances
all_utterances.extend(utterances)
# Write back the sorted metadata
with open(metadata, "w") as f:
json.dump(utterances, f, indent=4, ensure_ascii=False)
# Get the total duration and singer names for train and test utterances
duration[dataset_type] = sum(utt["Duration"] for utt in utterances)
total_duration += duration[dataset_type]
# Paths of metadata needed to be generated
singer_dict_file = os.path.join(save_dir, cfg.preprocess.spk2id)
utt2singer_file = os.path.join(save_dir, cfg.preprocess.utt2spk)
singer_names = set(
f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
for utt in all_utterances
)
# Write the utt2singer file and sort the singer names
with open(utt2singer_file, "w", encoding="utf-8") as f:
for utt in all_utterances:
f.write(
f"{utt['Dataset']}_{utt['Uid']}\t{replace_augment_name(utt['Dataset'])}_{utt['Singer']}\n"
)
singer_names = sorted(singer_names)
singer_lut = {name: i for i, name in enumerate(singer_names)}
# dump singers.json
with open(singer_dict_file, "w", encoding="utf-8") as f:
json.dump(singer_lut, f, indent=4, ensure_ascii=False)
meta_info = {
"dataset": dataset,
"statistics": {
"size": len(all_utterances),
"hours": round(total_duration / 3600, 4),
},
}
for dataset_type in dataset_types:
meta_info[dataset_type] = {
"size": len(utterances_dict[dataset_type]),
"hours": round(duration[dataset_type] / 3600, 4),
}
meta_info["singers"] = {"size": len(singer_lut)}
# Use Counter to count the minutes for each singer
total_singer2mins = Counter()
training_singer2mins = Counter()
for dataset_type in dataset_types:
for utt in utterances_dict[dataset_type]:
k = f"{replace_augment_name(utt['Dataset'])}_{utt['Singer']}"
if dataset_type == "train":
training_singer2mins[k] += utt["Duration"] / 60
total_singer2mins[k] += utt["Duration"] / 60
training_singer2mins = dict(
sorted(training_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
training_singer2mins = {k: round(v, 2) for k, v in training_singer2mins.items()}
meta_info["singers"]["training_minutes"] = training_singer2mins
total_singer2mins = dict(
sorted(total_singer2mins.items(), key=lambda x: x[1], reverse=True)
)
total_singer2mins = {k: round(v, 2) for k, v in total_singer2mins.items()}
meta_info["singers"]["minutes"] = total_singer2mins
with open(os.path.join(save_dir, "meta_info.json"), "w") as f:
json.dump(meta_info, f, indent=4, ensure_ascii=False)
for singer, min in training_singer2mins.items():
print(f"Speaker/Singer {singer}: {min} mins for training")
print("-" * 10, "\n")
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess(cfg, args)` to solve the following problem:
Preprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers
Here is the function:
def preprocess(cfg, args):
"""Preprocess raw data of single or multiple datasets (in cfg.dataset)
Args:
cfg (dict): dictionary that stores configurations
args (ArgumentParser): specify the configuration file and num_workers
"""
# Specify the output root path to save the processed data
output_path = cfg.preprocess.processed_dir
os.makedirs(output_path, exist_ok=True)
# Split train and test sets
for dataset in cfg.dataset:
print("Preprocess {}...".format(dataset))
if args.prepare_alignment:
# Prepare alignment with MFA
print("Prepare alignment {}...".format(dataset))
prepare_align(
dataset, cfg.dataset_path[dataset], cfg.preprocess, output_path
)
preprocess_dataset(
dataset,
cfg.dataset_path[dataset],
output_path,
cfg.preprocess,
cfg.task_type,
is_custom_dataset=dataset in cfg.use_custom_dataset,
)
# Data augmentation: create new wav files with pitch shift, formant shift, equalizer, time stretch
try:
assert isinstance(
cfg.preprocess.data_augment, list
), "Please provide a list of datasets need to be augmented."
if len(cfg.preprocess.data_augment) > 0:
new_datasets_list = []
for dataset in cfg.preprocess.data_augment:
new_datasets = data_augment.augment_dataset(cfg, dataset)
new_datasets_list.extend(new_datasets)
cfg.dataset.extend(new_datasets_list)
print("Augmentation datasets: ", cfg.dataset)
except:
print("No Data Augmentation.")
# json files
dataset_types = list()
dataset_types.append((cfg.preprocess.train_file).split(".")[0])
dataset_types.append((cfg.preprocess.valid_file).split(".")[0])
if "test" not in dataset_types:
dataset_types.append("test")
if "eval" in dataset:
dataset_types = ["test"]
# Dump metadata of datasets (singers, train/test durations, etc.)
cal_metadata(cfg, dataset_types)
# Prepare the acoustic features
for dataset in cfg.dataset:
# Skip augmented datasets which do not need to extract acoustic features
# We will copy acoustic features from the original dataset later
if (
"pitch_shift" in dataset
or "formant_shift" in dataset
or "equalizer" in dataset in dataset
):
continue
print(
"Extracting acoustic features for {} using {} workers ...".format(
dataset, args.num_workers
)
)
extract_acoustic_features(
dataset, output_path, cfg, dataset_types, args.num_workers
)
# Calculate the statistics of acoustic features
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
if cfg.preprocess.extract_pitch:
acoustic_extractor.cal_pitch_statistics(dataset, output_path, cfg)
if cfg.preprocess.extract_energy:
acoustic_extractor.cal_energy_statistics(dataset, output_path, cfg)
if cfg.preprocess.pitch_norm:
acoustic_extractor.normalize(dataset, cfg.preprocess.pitch_dir, cfg)
if cfg.preprocess.energy_norm:
acoustic_extractor.normalize(dataset, cfg.preprocess.energy_dir, cfg)
# Copy acoustic features for augmented datasets by creating soft-links
for dataset in cfg.dataset:
if "pitch_shift" in dataset:
src_dataset = dataset.replace("_pitch_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "formant_shift" in dataset:
src_dataset = dataset.replace("_formant_shift", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
elif "equalizer" in dataset:
src_dataset = dataset.replace("_equalizer", "")
src_dataset_dir = os.path.join(output_path, src_dataset)
else:
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
print("Copying acoustic features for {}...".format(dataset))
acoustic_extractor.copy_acoustic_features(
metadata, dataset_dir, src_dataset_dir, cfg
)
if cfg.preprocess.mel_min_max_norm:
acoustic_extractor.cal_mel_min_max(dataset, output_path, cfg)
if cfg.preprocess.extract_pitch:
acoustic_extractor.cal_pitch_statistics(dataset, output_path, cfg)
# Prepare the content features
for dataset in cfg.dataset:
print("Extracting content features for {}...".format(dataset))
extract_content_features(
dataset, output_path, cfg, dataset_types, args.num_workers
)
# Prepare the phenome squences
if cfg.preprocess.extract_phone:
for dataset in cfg.dataset:
print("Extracting phoneme sequence for {}...".format(dataset))
extract_phonme_sequences(dataset, output_path, cfg, dataset_types) | Preprocess raw data of single or multiple datasets (in cfg.dataset) Args: cfg (dict): dictionary that stores configurations args (ArgumentParser): specify the configuration file and num_workers |
17,534 | import random
import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_folders():
golden_samples = GOLDEN_TEST_SAMPLES["kising"]
# every item is a string
golden_folders = [s.split("_")[:1] for s in golden_samples]
# folder, eg: 422
return golden_folders | null |
17,535 | import random
import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def KiSing_statistics(data_dir):
folders = []
folders2utts = defaultdict(list)
folder_infos = glob(data_dir + "/*")
for folder_info in folder_infos:
folder = folder_info.split("/")[-1]
folders.append(folder)
utts = glob(folder_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
folders2utts[folder].append(uid)
unique_folders = list(set(folders))
unique_folders.sort()
print("KiSing: {} unique songs".format(len(unique_folders)))
return folders2utts | null |
17,536 | import os
import json
import os
from collections import defaultdict
from tqdm import tqdm
def get_uids_and_wav_paths(cfg, dataset, dataset_type):
assert dataset == "bigdata"
dataset_dir = os.path.join(
cfg.OUTPUT_PATH,
"preprocess/{}_version".format(cfg.PREPROCESS_VERSION),
"bigdata/{}".format(cfg.BIGDATA_VERSION),
)
dataset_file = os.path.join(
dataset_dir, "{}.json".format(dataset_type.split("_")[-1])
)
with open(dataset_file, "r") as f:
utterances = json.load(f)
# Uids
uids = [u["Uid"] for u in utterances]
# Wav paths
wav_paths = [u["Path"] for u in utterances]
return uids, wav_paths | null |
17,537 | import os
import json
import os
from collections import defaultdict
from tqdm import tqdm
def take_duration(utt):
return utt["Duration"] | null |
17,538 | import os
import json
import os
from tqdm import tqdm
import torchaudio
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from utils.io import save_audio
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
def split_to_utterances(language_dir, output_dir):
print("Splitting to utterances for {}...".format(language_dir))
for wav_file in tqdm(glob("{}/*/*".format(language_dir))):
# Load waveform
singer_name, song_name = wav_file.split("/")[-2:]
song_name = song_name.split(".")[0]
waveform, fs = torchaudio.load(wav_file)
# Split
slicer = Slicer(sr=fs, threshold=-30.0, max_sil_kept=3000)
chunks = slicer.slice(waveform)
for i, chunk in enumerate(chunks):
save_dir = os.path.join(output_dir, singer_name, song_name)
os.makedirs(save_dir, exist_ok=True)
output_file = os.path.join(save_dir, "{:04d}.wav".format(i))
save_audio(output_file, chunk, fs)
The provided code snippet includes necessary dependencies for implementing the `_main` function. Write a Python function `def _main(dataset_path)` to solve the following problem:
Split to utterances
Here is the function:
def _main(dataset_path):
"""
Split to utterances
"""
utterance_dir = os.path.join(dataset_path, "utterances")
for lang in ["chinese", "western"]:
split_to_utterances(os.path.join(dataset_path, lang), utterance_dir) | Split to utterances |
17,539 | import os
import json
import os
from tqdm import tqdm
import torchaudio
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from utils.io import save_audio
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["opera"]
# every item is a tuple (singer, song)
golden_songs = [s.split("#")[:2] for s in golden_samples]
# singer#song, eg:fem_01#neg_01
return golden_songs | null |
17,540 | import os
import json
import os
from tqdm import tqdm
import torchaudio
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from utils.io import save_audio
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
def opera_statistics(data_dir):
singers = []
songs = []
singers2songs = defaultdict(lambda: defaultdict(list))
singer_infos = glob(data_dir + "/*")
for singer_info in singer_infos:
singer = singer_info.split("/")[-1]
song_infos = glob(singer_info + "/*")
for song_info in song_infos:
song = song_info.split("/")[-1]
singers.append(singer)
songs.append(song)
utts = glob(song_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
singers2songs[singer][song].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"opera: {} singers, {} utterances ({} unique songs)".format(
len(unique_singers), len(songs), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return singers2songs, unique_singers | null |
17,541 | import random
import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from utils.audio_slicer import split_utterances_from_audio
from preprocessors import GOLDEN_TEST_SAMPLES
def split_utterances_from_audio(
wav_file,
output_dir,
max_duration_of_utterance=10.0,
min_interval=300,
db_threshold=-40,
):
"""
Split a long audio into utterances accoring to the silence (VAD).
max_duration_of_utterance (second):
The maximum duration of every utterance (seconds)
min_interval (millisecond):
The smaller min_interval is, the more sliced audio clips this script is likely to generate.
"""
print("File:", wav_file.split("/")[-1])
waveform, fs = torchaudio.load(wav_file)
slicer = Slicer(sr=fs, min_interval=min_interval, threshold=db_threshold)
chunks, positions = slicer.slice(waveform, return_chunks_positions=True)
durations = [(end - begin) / fs for begin, end in positions]
print(
"Slicer's min silence part is {}ms, min and max duration of sliced utterances is {}s and {}s".format(
min_interval, min(durations), max(durations)
)
)
res_chunks, res_positions = [], []
for i, chunk in enumerate(chunks):
if len(chunk.shape) == 1:
chunk = chunk[None, :]
begin, end = positions[i]
assert end - begin == chunk.shape[-1]
max_wav_len = max_duration_of_utterance * fs
if chunk.shape[-1] <= max_wav_len:
res_chunks.append(chunk)
res_positions.append(positions[i])
else:
# TODO: to reserve overlapping and conduct fade-in, fade-out
# Get segments number
number = 2
while chunk.shape[-1] // number >= max_wav_len:
number += 1
seg_len = chunk.shape[-1] // number
# Split
for num in range(number):
s = seg_len * num
t = min(s + seg_len, chunk.shape[-1])
seg_begin = begin + s
seg_end = begin + t
res_chunks.append(chunk[:, s:t])
res_positions.append((seg_begin, seg_end))
# Save utterances
os.makedirs(output_dir, exist_ok=True)
res = {"fs": int(fs)}
for i, chunk in enumerate(res_chunks):
filename = "{:04d}.wav".format(i)
res[filename] = [int(p) for p in res_positions[i]]
save_audio(os.path.join(output_dir, filename), chunk, fs)
# Save positions
with open(os.path.join(output_dir, "positions.json"), "w") as f:
json.dump(res, f, indent=4, ensure_ascii=False)
return res
def _split_utts():
raw_dir = "/mnt/chongqinggeminiceph1fs/geminicephfs/wx-mm-spr-xxxx/xueyaozhang/dataset/李玟/cocoeval/raw"
output_root = "/mnt/chongqinggeminiceph1fs/geminicephfs/wx-mm-spr-xxxx/xueyaozhang/dataset/李玟/cocoeval/utterances"
if os.path.exists(output_root):
os.system("rm -rf {}".format(output_root))
vocal_files = glob(os.path.join(raw_dir, "*/vocal.wav"))
for vocal_f in tqdm(vocal_files):
song_name = vocal_f.split("/")[-2]
output_dir = os.path.join(output_root, song_name)
os.makedirs(output_dir, exist_ok=True)
split_utterances_from_audio(vocal_f, output_dir, min_interval=300) | null |
17,542 | import random
import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from utils.audio_slicer import split_utterances_from_audio
from preprocessors import GOLDEN_TEST_SAMPLES
def cocoeval_statistics(data_dir):
song2utts = defaultdict(list)
song_infos = glob(data_dir + "/*")
for song in song_infos:
song_name = song.split("/")[-1]
utts = glob(song + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
song2utts[song_name].append(uid)
print("Cocoeval: {} songs".format(len(song_infos)))
return song2utts | null |
17,543 | import os
import json
import torchaudio
import librosa
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["popbutfy"]
# every item is a tuple (singer, song)
golden_songs = [s.split("#")[:2] for s in golden_samples]
# singer#song, eg: Female1#Almost_lover_Amateur
return golden_songs | null |
17,544 | import os
import json
import torchaudio
import librosa
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def popbutfy_statistics(data_dir):
singers = []
songs = []
singer2songs = defaultdict(lambda: defaultdict(list))
data_infos = glob(data_dir + "/*")
for data_info in data_infos:
data_info_split = data_info.split("/")[-1].split("#")
singer, song = data_info_split[0], data_info_split[-1]
singers.append(singer)
songs.append(song)
utts = glob(data_info + "/*")
for utt in utts:
uid = utt.split("/")[-1].split("_")[-1].split(".")[0]
singer2songs[singer][song].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"PopBuTFy: {} singers, {} utterances ({} unique songs)".format(
len(unique_singers), len(songs), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return singer2songs, unique_singers | null |
17,545 | import os
import json
import torchaudio
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["popcs"]
# every item is a string
golden_songs = [s.split("_")[:1] for s in golden_samples]
# song, eg: 万有引力
return golden_songs | null |
17,546 | import os
import json
import torchaudio
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def popcs_statistics(data_dir):
songs = []
songs2utts = defaultdict(list)
song_infos = glob(data_dir + "/*")
for song_info in song_infos:
song_info_split = song_info.split("/")[-1].split("-")[-1]
songs.append(song_info_split)
utts = glob(song_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split("_")[0]
songs2utts[song_info_split].append(uid)
unique_songs = list(set(songs))
unique_songs.sort()
print(
"popcs: {} utterances ({} unique songs)".format(len(songs), len(unique_songs))
)
print("Songs: \n{}".format("\t".join(unique_songs)))
return songs2utts | null |
17,547 | import os
import json
import pickle
import glob
from collections import defaultdict
from tqdm import tqdm
from preprocessors import get_golden_samples_indexes
TRAIN_MAX_NUM_EVERY_PERSON = 250
TEST_MAX_NUM_EVERY_PERSON = 25
def get_golden_samples_indexes(
dataset_name,
dataset_dir=None,
cfg=None,
split=None,
min_samples=5,
):
"""
# Get Standard samples' indexes
"""
if dataset_dir is None:
assert cfg is not None
dataset_dir = os.path.join(
cfg.OUTPUT_PATH,
"preprocess/{}_version".format(cfg.PREPROCESS_VERSION),
dataset_name,
)
assert split is not None
utt_file = os.path.join(dataset_dir, "{}.json".format(split))
with open(utt_file, "r", encoding="utf-8") as f:
samples = json.load(f)
if "train" in split:
golden_samples = GOLDEN_TRAIN_SAMPLES[dataset_name]
if "test" in split:
golden_samples = GOLDEN_TEST_SAMPLES[dataset_name]
res = []
for idx, utt in enumerate(samples):
if utt["Uid"] in golden_samples:
res.append(idx)
if dataset_name == "cdmusiceval":
if "_".join(utt["Uid"].split("_")[:2]) in golden_samples:
res.append(idx)
if len(res) == 0:
res = [i for i in range(min_samples)]
return res
def select_sample_idxs():
# =========== Train ===========
with open(os.path.join(vctk_dir, "train.json"), "r") as f:
raw_train = json.load(f)
train_idxs = []
train_nums = defaultdict(int)
for utt in tqdm(raw_train):
idx = utt["index"]
singer = utt["Singer"]
if train_nums[singer] < TRAIN_MAX_NUM_EVERY_PERSON:
train_idxs.append(idx)
train_nums[singer] += 1
# =========== Test ===========
with open(os.path.join(vctk_dir, "test.json"), "r") as f:
raw_test = json.load(f)
# golden test
test_idxs = get_golden_samples_indexes(
dataset_name="vctk", split="test", dataset_dir=vctk_dir
)
test_nums = defaultdict(int)
for idx in test_idxs:
singer = raw_test[idx]["Singer"]
test_nums[singer] += 1
for utt in tqdm(raw_test):
idx = utt["index"]
singer = utt["Singer"]
if test_nums[singer] < TEST_MAX_NUM_EVERY_PERSON:
test_idxs.append(idx)
test_nums[singer] += 1
train_idxs.sort()
test_idxs.sort()
return train_idxs, test_idxs, raw_train, raw_test | null |
17,548 | import os
import json
import os
import glob
from tqdm import tqdm
import torchaudio
import pandas as pd
from glob import glob
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def split_to_utterances(language_dir, output_dir):
print("Splitting to utterances for {}...".format(language_dir))
wav_dir = os.path.join(language_dir, "wav")
phoneme_dir = os.path.join(language_dir, "txt")
annot_dir = os.path.join(language_dir, "csv")
pitches = set()
for wav_file in tqdm(glob("{}/*.wav".format(wav_dir))):
# Load waveform
song_name = wav_file.split("/")[-1].split(".")[0]
waveform, fs = torchaudio.load(wav_file)
# Load utterances
phoneme_file = os.path.join(phoneme_dir, "{}.txt".format(song_name))
with open(phoneme_file, "r") as f:
lines = f.readlines()
utterances = [l.strip().split() for l in lines]
utterances = [utt for utt in utterances if len(utt) > 0]
# Load annotation
annot_file = os.path.join(annot_dir, "{}.csv".format(song_name))
annot_df = pd.read_csv(annot_file)
pitches = pitches.union(set(annot_df["pitch"]))
starts = annot_df["start"].tolist()
ends = annot_df["end"].tolist()
syllables = annot_df["syllable"].tolist()
# Split
curr = 0
for i, phones in enumerate(utterances):
sz = len(phones)
assert phones[0] == syllables[curr]
assert phones[-1] == syllables[curr + sz - 1]
s = starts[curr]
e = ends[curr + sz - 1]
curr += sz
save_dir = os.path.join(output_dir, song_name)
os.makedirs(save_dir, exist_ok=True)
output_file = os.path.join(save_dir, "{:04d}.wav".format(i))
save_utterance(output_file, waveform, fs, start=s, end=e)
The provided code snippet includes necessary dependencies for implementing the `_main` function. Write a Python function `def _main(dataset_path)` to solve the following problem:
Split to utterances
Here is the function:
def _main(dataset_path):
"""
Split to utterances
"""
utterance_dir = os.path.join(dataset_path, "utterances")
for lang in ["english", "korean"]:
split_to_utterances(os.path.join(dataset_path, lang), utterance_dir) | Split to utterances |
17,549 | import os
import json
import os
import glob
from tqdm import tqdm
import torchaudio
import pandas as pd
from glob import glob
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["csd"]
# every item is a tuple (language, song)
golden_songs = [s.split("_")[:2] for s in golden_samples]
# language_song, eg: en_001a
return golden_songs | null |
17,550 | import os
import json
import os
import glob
from tqdm import tqdm
import torchaudio
import pandas as pd
from glob import glob
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def csd_statistics(data_dir):
languages = []
songs = []
languages2songs = defaultdict(lambda: defaultdict(list))
folder_infos = glob(data_dir + "/*")
for folder_info in folder_infos:
folder_info_split = folder_info.split("/")[-1]
language = folder_info_split[:2]
song = folder_info_split[2:]
languages.append(language)
songs.append(song)
utts = glob(folder_info + "/*")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
languages2songs[language][song].append(uid)
unique_languages = list(set(languages))
unique_songs = list(set(songs))
unique_languages.sort()
unique_songs.sort()
print(
"csd: {} languages, {} utterances ({} unique songs)".format(
len(unique_languages), len(songs), len(unique_songs)
)
)
print("Languages: \n{}".format("\t".join(unique_languages)))
return languages2songs | null |
17,551 | import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
def split_to_utterances(dataset_path, singer, style, output_dir):
data_dir = os.path.join(dataset_path, singer, style)
print("Splitting to utterances for {}...".format(data_dir))
wave_files = glob(data_dir + "/*.wav")
for wav_file in tqdm(wave_files):
# Load waveform
song_name = wav_file.split("/")[-1].split(".")[0]
waveform, fs = torchaudio.load(wav_file)
# Split
slicer = Slicer(sr=fs, threshold=-40.0, max_sil_kept=4000)
chunks = slicer.slice(waveform)
for i, chunk in enumerate(chunks):
save_dir = os.path.join(output_dir, singer, style, song_name)
os.makedirs(save_dir, exist_ok=True)
output_file = os.path.join(save_dir, "{:04d}.wav".format(i))
save_audio(output_file, chunk, fs)
The provided code snippet includes necessary dependencies for implementing the `_main` function. Write a Python function `def _main(dataset_path)` to solve the following problem:
Split to utterances
Here is the function:
def _main(dataset_path):
"""
Split to utterances
"""
utterance_dir = os.path.join(dataset_path, "utterances")
singer_infos = glob(dataset_path + "/*")
for singer_info in singer_infos:
singer = singer_info.split("/")[-1]
for style in ["read", "sing"]:
split_to_utterances(dataset_path, singer, style, utterance_dir) | Split to utterances |
17,552 | import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["nus48e"]
# every item is a tuple (singer, song)
golden_songs = [s.split("#")[:2] for s in golden_samples]
# singer_song, eg: Female1#Almost_lover_Amateur
return golden_songs | null |
17,553 | import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
def nus48e_statistics(data_dir):
singers = []
songs = []
singer2songs = defaultdict(lambda: defaultdict(list))
singer_infos = glob(data_dir + "/*")
for singer_info in singer_infos:
singer_info_split = singer_info.split("/")[-1]
style_infos = glob(singer_info + "/*")
for style_info in style_infos:
style_info_split = style_info.split("/")[-1]
singer = singer_info_split + "_" + style_info_split
singers.append(singer)
song_infos = glob(style_info + "/*")
for song_info in song_infos:
song = song_info.split("/")[-1]
songs.append(song)
utts = glob(song_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
singer2songs[singer][song].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"nus_48_e: {} singers, {} utterances ({} unique songs)".format(
len(unique_singers), len(songs), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return singer2songs, unique_singers | null |
17,554 | import os
import json
import librosa
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
def vctk_statistics(data_dir):
speakers = []
speakers2utts = defaultdict(list)
speaker_infos = glob(data_dir + "/wav48_silence_trimmed" + "/*")
for speaker_info in speaker_infos:
speaker = speaker_info.split("/")[-1]
if speaker == "log.txt":
continue
speakers.append(speaker)
utts = glob(speaker_info + "/*")
for utt in utts:
uid = (
utt.split("/")[-1].split("_")[1]
+ "_"
+ utt.split("/")[-1].split("_")[2].split(".")[0]
)
speakers2utts[speaker].append(uid)
unique_speakers = list(set(speakers))
unique_speakers.sort()
print("Speakers: \n{}".format("\t".join(unique_speakers)))
return speakers2utts, unique_speakers | null |
17,555 | import os
import json
import librosa
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
def get_lines(file):
with open(file, "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
def vctk_speaker_infos(data_dir):
file = os.path.join(data_dir, "speaker-info.txt")
lines = get_lines(file)
ID2speakers = defaultdict()
for l in tqdm(lines):
items = l.replace(" ", "")
if items[:2] == "ID":
# The header line
continue
if items[0] == "p":
id = items[:4]
gender = items[6]
elif items[0] == "s":
id = items[:2]
gender = items[4]
if gender == "F":
speaker = "female_{}".format(id)
elif gender == "M":
speaker = "male_{}".format(id)
ID2speakers[id] = speaker
return ID2speakers | null |
17,556 | import glob
import os
import json
import torchaudio
from tqdm import tqdm
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed, remove_and_create
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
def split_to_utterances(input_dir, output_dir):
print("Splitting to utterances for {}...".format(input_dir))
files_list = glob.glob("*.flac", root_dir=input_dir)
files_list.sort()
for wav_file in tqdm(files_list):
# Load waveform
waveform, fs = torchaudio.load(os.path.join(input_dir, wav_file))
# Song name
filename = wav_file.replace(" ", "")
filename = filename.replace("(Live)", "")
song_id, filename = filename.split("李健-")
song_id = song_id.split("_")[0]
song_name = "{:03d}".format(int(song_id)) + filename.split("_")[0].split("-")[0]
# Split
slicer = Slicer(sr=fs, threshold=-30.0, max_sil_kept=3000)
chunks = slicer.slice(waveform)
save_dir = os.path.join(output_dir, song_name)
remove_and_create(save_dir)
for i, chunk in enumerate(chunks):
output_file = os.path.join(save_dir, "{:04d}.wav".format(i))
save_audio(output_file, chunk, fs)
The provided code snippet includes necessary dependencies for implementing the `_main` function. Write a Python function `def _main(dataset_path)` to solve the following problem:
Split to utterances
Here is the function:
def _main(dataset_path):
"""
Split to utterances
"""
utterance_dir = os.path.join(dataset_path, "utterances")
split_to_utterances(os.path.join(dataset_path, "vocal_v2"), utterance_dir) | Split to utterances |
17,557 | import glob
import os
import json
import torchaudio
from tqdm import tqdm
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed, remove_and_create
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["lijian"]
golden_songs = [s.split("_")[0] for s in golden_samples]
return golden_songs | null |
17,558 | import glob
import os
import json
import torchaudio
from tqdm import tqdm
from collections import defaultdict
from utils.io import save_audio
from utils.util import has_existed, remove_and_create
from utils.audio_slicer import Slicer
from preprocessors import GOLDEN_TEST_SAMPLES
def statistics(utt_dir):
song2utts = defaultdict(list)
song_infos = glob.glob(utt_dir + "/*")
song_infos.sort()
for song in song_infos:
song_name = song.split("/")[-1]
utt_infos = glob.glob(song + "/*.wav")
utt_infos.sort()
for utt in utt_infos:
uid = utt.split("/")[-1].split(".")[0]
song2utts[song_name].append(uid)
utt_sum = sum([len(utts) for utts in song2utts.values()])
print("Li Jian: {} unique songs, {} utterances".format(len(song2utts), utt_sum))
return song2utts | null |
17,559 | import random
import os
import json
import librosa
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["opensinger"]
# every item is a tuple (singer, song)
golden_songs = [s.split("_")[:3] for s in golden_samples]
# singer_song, eg: Female1#Almost_lover_Amateur
return golden_songs | null |
17,560 | import random
import os
import json
import librosa
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def opensinger_statistics(data_dir):
singers = []
songs = []
singer2songs = defaultdict(lambda: defaultdict(list))
gender_infos = glob(data_dir + "/*")
for gender_info in gender_infos:
gender_info_split = gender_info.split("/")[-1][:-3]
singer_and_song_infos = glob(gender_info + "/*")
for singer_and_song_info in singer_and_song_infos:
singer_and_song_info_split = singer_and_song_info.split("/")[-1].split("_")
singer_id, song = (
singer_and_song_info_split[0],
singer_and_song_info_split[1],
)
singer = gender_info_split + "_" + singer_id
singers.append(singer)
songs.append(song)
utts = glob(singer_and_song_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split("_")[-1].split(".")[0]
singer2songs[singer][song].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"opensinger: {} singers, {} songs ({} unique songs)".format(
len(unique_singers), len(songs), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return singer2songs, unique_singers | null |
17,561 | import json
from tqdm import tqdm
import os
import torchaudio
from utils import audio
import csv
import random
from utils.util import has_existed
from text import _clean_text
import librosa
import soundfile as sf
from scipy.io import wavfile
from pathlib import Path
import numpy as np
def get_lines(file):
lines = []
with open(file, encoding="utf-8") as f:
for line in tqdm(f):
lines.append(line.strip())
return lines | null |
17,562 | import json
from tqdm import tqdm
import os
import torchaudio
from utils import audio
import csv
import random
from utils.util import has_existed
from text import _clean_text
import librosa
import soundfile as sf
from scipy.io import wavfile
from pathlib import Path
import numpy as np
def get_uid2utt(ljspeech_path, dataset, cfg):
index_count = 0
total_duration = 0
uid2utt = []
for l in tqdm(dataset):
items = l.split("|")
uid = items[0]
text = items[2]
res = {
"Dataset": "LJSpeech",
"index": index_count,
"Singer": "LJSpeech",
"Uid": uid,
"Text": text,
}
# Duration in wav files
audio_file = os.path.join(ljspeech_path, "wavs/{}.wav".format(uid))
res["Path"] = audio_file
waveform, sample_rate = torchaudio.load(audio_file)
duration = waveform.size(-1) / sample_rate
res["Duration"] = duration
uid2utt.append(res)
index_count = index_count + 1
total_duration += duration
return uid2utt, total_duration / 3600 | null |
17,563 | import json
from tqdm import tqdm
import os
import torchaudio
from utils import audio
import csv
import random
from utils.util import has_existed
from text import _clean_text
import librosa
import soundfile as sf
from scipy.io import wavfile
from pathlib import Path
import numpy as np
def split_dataset(
lines, test_rate=0.05, valid_rate=0.05, test_size=None, valid_size=None
):
if test_size == None:
test_size = int(len(lines) * test_rate)
if valid_size == None:
valid_size = int(len(lines) * valid_rate)
random.shuffle(lines)
train_set = []
test_set = []
valid_set = []
for line in lines[:test_size]:
test_set.append(line)
for line in lines[test_size : test_size + valid_size]:
valid_set.append(line)
for line in lines[test_size + valid_size :]:
train_set.append(line)
return train_set, test_set, valid_set | null |
17,564 | import json
from tqdm import tqdm
import os
import torchaudio
from utils import audio
import csv
import random
from utils.util import has_existed
from text import _clean_text
import librosa
import soundfile as sf
from scipy.io import wavfile
from pathlib import Path
import numpy as np
def textgird_extract(
corpus_directory,
output_directory,
mfa_path=os.path.join(
"pretrained", "mfa", "montreal-forced-aligner", "bin", "mfa_align"
),
lexicon=os.path.join("text", "lexicon", "librispeech-lexicon.txt"),
acoustic_model_path=os.path.join(
"pretrained",
"mfa",
"montreal-forced-aligner",
"pretrained_models",
"english.zip",
),
jobs="8",
):
assert os.path.exists(
corpus_directory
), "Please check the directionary contains *.wav, *.lab"
assert (
os.path.exists(mfa_path)
and os.path.exists(lexicon)
and os.path.exists(acoustic_model_path)
), f"Please download the MFA tools to {mfa_path} firstly"
Path(output_directory).mkdir(parents=True, exist_ok=True)
print(f"MFA results are save in {output_directory}")
os.system(
f".{os.path.sep}{mfa_path} {corpus_directory} {lexicon} {acoustic_model_path} {output_directory} -j {jobs} --clean"
)
max_wav_value = 32768.0
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def prepare_align(dataset, dataset_path, cfg, output_path):
in_dir = dataset_path
out_dir = os.path.join(output_path, dataset, cfg.raw_data)
sampling_rate = cfg.sample_rate
cleaners = cfg.text_cleaners
speaker = "LJSpeech"
with open(os.path.join(dataset_path, "metadata.csv"), encoding="utf-8") as f:
for line in tqdm(f):
parts = line.strip().split("|")
base_name = parts[0]
text = parts[2]
text = _clean_text(text, cleaners)
output_wav_path = os.path.join(out_dir, speaker, "{}.wav".format(base_name))
output_lab_path = os.path.join(out_dir, speaker, "{}.lab".format(base_name))
if os.path.exists(output_wav_path) and os.path.exists(output_lab_path):
continue
wav_path = os.path.join(in_dir, "wavs", "{}.wav".format(base_name))
if os.path.exists(wav_path):
os.makedirs(os.path.join(out_dir, speaker), exist_ok=True)
wav, _ = librosa.load(wav_path, sampling_rate)
wav = wav / max(abs(wav)) * max_wav_value
wavfile.write(
os.path.join(out_dir, speaker, "{}.wav".format(base_name)),
sampling_rate,
wav.astype(np.int16),
)
with open(
os.path.join(out_dir, speaker, "{}.lab".format(base_name)),
"w",
) as f1:
f1.write(text)
# Extract textgird with MFA
textgird_extract(
corpus_directory=out_dir,
output_directory=os.path.join(output_path, dataset, "TextGrid"),
) | null |
17,565 | import os
import json
import librosa
from tqdm import tqdm
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
GOLDEN_TEST_SAMPLES = defaultdict(list)
GOLDEN_TEST_SAMPLES["m4singer"] = [
"Alto-1_美错_0014",
"Bass-1_十年_0008",
"Soprano-2_同桌的你_0018",
"Tenor-5_爱笑的眼睛_0010",
]
GOLDEN_TEST_SAMPLES["svcc"] = [
# IDF1
"IDF1_10030",
"IDF1_10120",
"IDF1_10140",
# IDM1
"IDM1_10001",
"IDM1_10030",
"IDM1_10120",
# CDF1
"CDF1_10030",
"CDF1_10120",
"CDF1_10140",
# CDM1
"CDM1_10001",
"CDM1_10030",
"CDM1_10120",
]
GOLDEN_TEST_SAMPLES["svcceval"] = [
# SF1
"SF1_30001",
"SF1_30002",
"SF1_30003",
# SM1
"SM1_30001",
"SM1_30002",
"SM1_30003",
]
GOLDEN_TEST_SAMPLES["popbutfy"] = [
"Female1#you_are_my_sunshine_Professional#0",
"Female4#Someone_Like_You_Professional#10",
"Male2#Lemon_Tree_Professional#12",
"Male5#can_you_feel_the_love_tonight_Professional#20",
]
GOLDEN_TEST_SAMPLES["opensinger"] = [
"Man_0_大鱼_10",
"Man_21_丑八怪_14",
"Woman_39_mojito_22",
"Woman_40_易燃易爆炸_12",
]
GOLDEN_TEST_SAMPLES["nus48e"] = [
"ADIZ_read#01#0000",
"MCUR_sing#10#0000",
"JLEE_read#08#0001",
"SAMF_sing#18#0001",
]
GOLDEN_TEST_SAMPLES["popcs"] = [
"明天会更好_0004",
"欧若拉_0005",
"虫儿飞_0006",
"隐形的翅膀_0008",
]
GOLDEN_TEST_SAMPLES["kising"] = [
"421_0040",
"424_0013",
"431_0026",
]
GOLDEN_TEST_SAMPLES["csd"] = [
"en_004a_0001",
"en_042b_0006",
"kr_013a_0006",
"kr_045b_0004",
]
GOLDEN_TEST_SAMPLES["opera"] = [
"fem_01#neg_1#0000",
"fem_12#pos_3#0003",
"male_02#neg_1#0002",
"male_11#pos_2#0001",
]
GOLDEN_TEST_SAMPLES["lijian"] = [
"058矜持_0000",
"079绒花_0000",
"120遥远的天空底下_0000",
]
GOLDEN_TEST_SAMPLES["cdmusiceval"] = ["陶喆_普通朋友", "蔡琴_给电影人的情书"]
def get_test_songs():
golden_samples = GOLDEN_TEST_SAMPLES["m4singer"]
# every item is a tuple (singer, song)
golden_songs = [s.split("_")[:2] for s in golden_samples]
# singer_song, eg: Alto-1_美错
golden_songs = ["_".join(t) for t in golden_songs]
return golden_songs | null |
17,566 | import os
import json
import librosa
from tqdm import tqdm
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def m4singer_statistics(meta):
singers = []
songs = []
singer2songs = defaultdict(lambda: defaultdict(list))
for utt in meta:
p, s, uid = utt["item_name"].split("#")
singers.append(p)
songs.append(s)
singer2songs[p][s].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"M4Singer: {} singers, {} utterances ({} unique songs)".format(
len(unique_singers), len(songs), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return singer2songs, unique_singers | null |
17,567 | import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def get_test_songs():
return ["007Di Da Di"] | null |
17,568 | import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
from preprocessors import GOLDEN_TEST_SAMPLES
def coco_statistics(data_dir):
song2utts = defaultdict(list)
song_infos = glob(data_dir + "/*")
for song in song_infos:
song_name = song.split("/")[-1]
utts = glob(song + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
song2utts[song_name].append(uid)
print("Coco: {} songs".format(len(song_infos)))
return song2utts | null |
17,569 | import os
import json
import pickle
import glob
from collections import defaultdict
from tqdm import tqdm
TEST_MAX_NUM_EVERY_PERSON = 5
def get_chosen_speakers():
def select_sample_idxs():
chosen_speakers = get_chosen_speakers()
with open(os.path.join(vctk_dir, "train.json"), "r") as f:
raw_train = json.load(f)
with open(os.path.join(vctk_dir, "test.json"), "r") as f:
raw_test = json.load(f)
train_idxs, test_idxs = [], []
# =========== Test ===========
test_nums = defaultdict(int)
for utt in tqdm(raw_train):
idx = utt["index"]
singer = utt["Singer"]
if singer in chosen_speakers and test_nums[singer] < TEST_MAX_NUM_EVERY_PERSON:
test_nums[singer] += 1
test_idxs.append("train_{}".format(idx))
for utt in tqdm(raw_test):
idx = utt["index"]
singer = utt["Singer"]
if singer in chosen_speakers and test_nums[singer] < TEST_MAX_NUM_EVERY_PERSON:
test_nums[singer] += 1
test_idxs.append("test_{}".format(idx))
# =========== Train ===========
for utt in tqdm(raw_train):
idx = utt["index"]
singer = utt["Singer"]
if singer in chosen_speakers and "train_{}".format(idx) not in test_idxs:
train_idxs.append("train_{}".format(idx))
for utt in tqdm(raw_test):
idx = utt["index"]
singer = utt["Singer"]
if singer in chosen_speakers and "test_{}".format(idx) not in test_idxs:
train_idxs.append("test_{}".format(idx))
train_idxs.sort()
test_idxs.sort()
return train_idxs, test_idxs, raw_train, raw_test | null |
17,570 | import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
def vocalist_statistics(data_dir):
singers = []
songs = []
global2singer2songs = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
global_infos = glob(data_dir + "/*")
for global_info in global_infos:
global_split = global_info.split("/")[-1]
singer_infos = glob(global_info + "/*")
for singer_info in singer_infos:
singer = singer_info.split("/")[-1]
singers.append(singer)
song_infos = glob(singer_info + "/*")
for song_info in song_infos:
song = song_info.split("/")[-1]
songs.append(song)
utts = glob(song_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
global2singer2songs[global_split][singer][song].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"vocalist: {} singers, {} songs ({} unique songs)".format(
len(unique_singers), len(songs), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return global2singer2songs, unique_singers | null |
17,571 | from glob import glob
import os
import json
import torchaudio
from tqdm import tqdm
from collections import defaultdict
from utils.util import has_existed, remove_and_create
from utils.audio_slicer import split_utterances_from_audio
def split_to_utterances(input_dir, output_dir):
print("Splitting to utterances for {}...".format(input_dir))
files_list = glob("*", root_dir=input_dir)
files_list.sort()
for wav_file in tqdm(files_list):
# # Load waveform
# waveform, fs = torchaudio.load(os.path.join(input_dir, wav_file))
# Singer name, Song name
song_name, singer_name = wav_file.split("_")[2].split("-")
save_dir = os.path.join(output_dir, singer_name, song_name)
split_utterances_from_audio(
os.path.join(input_dir, wav_file), save_dir, max_duration_of_utterance=10
)
# # Split
# slicer = Slicer(sr=fs, threshold=-30.0, max_sil_kept=3000, min_interval=1000)
# chunks = slicer.slice(waveform)
# for i, chunk in enumerate(chunks):
# save_dir = os.path.join(output_dir, singer_name, song_name)
# os.makedirs(save_dir, exist_ok=True)
# output_file = os.path.join(save_dir, "{:04d}.wav".format(i))
# save_audio(output_file, chunk, fs)
def remove_and_create(dir):
if os.path.exists(dir):
os.system("rm -r {}".format(dir))
os.makedirs(dir, exist_ok=True)
The provided code snippet includes necessary dependencies for implementing the `_main` function. Write a Python function `def _main(dataset_path)` to solve the following problem:
Split to utterances
Here is the function:
def _main(dataset_path):
"""
Split to utterances
"""
utterance_dir = os.path.join(dataset_path, "utterances")
remove_and_create(utterance_dir)
split_to_utterances(os.path.join(dataset_path, "vocal"), utterance_dir) | Split to utterances |
17,572 | from glob import glob
import os
import json
import torchaudio
from tqdm import tqdm
from collections import defaultdict
from utils.util import has_existed, remove_and_create
from utils.audio_slicer import split_utterances_from_audio
def statistics(utterance_dir):
singers = []
songs = []
singers2songs = defaultdict(lambda: defaultdict(list))
singer_infos = glob(utterance_dir + "/*")
for singer_info in singer_infos:
singer = singer_info.split("/")[-1]
song_infos = glob(singer_info + "/*")
for song_info in song_infos:
song = song_info.split("/")[-1]
singers.append(singer)
songs.append(song)
utts = glob(song_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
singers2songs[singer][song].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"Statistics: {} singers, {} utterances ({} unique songs)".format(
len(unique_singers), len(songs), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return singers2songs, unique_singers | null |
17,573 | import json
from tqdm import tqdm
import os
import librosa
from utils.util import has_existed
def get_lines(file):
with open(file, "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
def get_uid2utt(opencpop_path, dataset, dataset_type):
index_count = 0
total_duration = 0
file = os.path.join(opencpop_path, "segments", "{}.txt".format(dataset_type))
lines = get_lines(file)
uid2utt = []
for l in tqdm(lines):
items = l.split("|")
uid = items[0]
res = {
"Dataset": dataset,
"index": index_count,
"Singer": "female1",
"Uid": uid,
}
# Duration in wav files
audio_file = os.path.join(opencpop_path, "segments/wavs/{}.wav".format(uid))
res["Path"] = audio_file
duration = librosa.get_duration(filename=res["Path"])
res["Duration"] = duration
uid2utt.append(res)
index_count = index_count + 1
total_duration += duration
return uid2utt, total_duration / 3600 | null |
17,574 | import os
from tqdm import tqdm
import glob
import json
import torchaudio
from utils.util import has_existed
from utils.io import save_audio
def save_audio(path, waveform, fs, add_silence=False, turn_up=False, volume_peak=0.9):
"""Save audio to path with processing (turn up volume, add silence)
Args:
path (str): path to save audio
waveform (numpy array): waveform to save
fs (int): sampling rate
add_silence (bool, optional): whether to add silence to beginning and end. Defaults to False.
turn_up (bool, optional): whether to turn up volume. Defaults to False.
volume_peak (float, optional): volume peak. Defaults to 0.9.
"""
if turn_up:
# continue to turn up to volume_peak
ratio = volume_peak / max(waveform.max(), abs(waveform.min()))
waveform = waveform * ratio
if add_silence:
silence_len = fs // 20
silence = np.zeros((silence_len,), dtype=waveform.dtype)
result = np.concatenate([silence, waveform, silence])
waveform = result
waveform = torch.as_tensor(waveform, dtype=torch.float32, device="cpu")
if len(waveform.size()) == 1:
waveform = waveform[None, :]
elif waveform.size(0) != 1:
# Stereo to mono
waveform = torch.mean(waveform, dim=0, keepdim=True)
torchaudio.save(path, waveform, fs, encoding="PCM_S", bits_per_sample=16)
def get_splitted_utterances(
raw_wav_dir, trimed_wav_dir, n_utterance_splits, overlapping
):
res = []
raw_song_files = glob.glob(
os.path.join(raw_wav_dir, "**/pjs*_song.wav"), recursive=True
)
trimed_song_files = glob.glob(
os.path.join(trimed_wav_dir, "**/*.wav"), recursive=True
)
if len(raw_song_files) * n_utterance_splits == len(trimed_song_files):
print("Splitted done...")
for wav_file in tqdm(trimed_song_files):
uid = wav_file.split("/")[-1].split(".")[0]
utt = {"Dataset": "pjs", "Singer": "male1", "Uid": uid, "Path": wav_file}
waveform, sample_rate = torchaudio.load(wav_file)
duration = waveform.size(-1) / sample_rate
utt["Duration"] = duration
res.append(utt)
else:
for wav_file in tqdm(raw_song_files):
song_id = wav_file.split("/")[-1].split(".")[0]
waveform, sample_rate = torchaudio.load(wav_file)
trimed_waveform = torchaudio.functional.vad(waveform, sample_rate)
trimed_waveform = torchaudio.functional.vad(
trimed_waveform.flip(dims=[1]), sample_rate
).flip(dims=[1])
audio_len = trimed_waveform.size(-1)
lapping_len = overlapping * sample_rate
for i in range(n_utterance_splits):
start = i * audio_len // 3
end = start + audio_len // 3 + lapping_len
splitted_waveform = trimed_waveform[:, start:end]
utt = {
"Dataset": "pjs",
"Singer": "male1",
"Uid": "{}_{}".format(song_id, i),
}
# Duration
duration = splitted_waveform.size(-1) / sample_rate
utt["Duration"] = duration
# Save trimed wav
splitted_waveform_file = os.path.join(
trimed_wav_dir, "{}.wav".format(utt["Uid"])
)
save_audio(splitted_waveform_file, splitted_waveform, sample_rate)
# Path
utt["Path"] = splitted_waveform_file
res.append(utt)
res = sorted(res, key=lambda x: x["Uid"])
return res | null |
17,575 | import json
from tqdm import tqdm
import os
import torchaudio
import torch
from utils.mfa_prepare import (
process_wav_files,
get_wav_files,
filter_wav_files_by_length,
)
from utils.cut_by_vad import cut_segments
from utils.whisper_transcription import asr_main
from utils.util import has_existed
import subprocess
import random
from collections import defaultdict
from glob import glob
import shutil
The provided code snippet includes necessary dependencies for implementing the `librilight_statistics` function. Write a Python function `def librilight_statistics(data_dir)` to solve the following problem:
Get statistics for librilight dataset
Here is the function:
def librilight_statistics(data_dir):
"""Get statistics for librilight dataset"""
distribution2speakers2utts = defaultdict(lambda: defaultdict(list))
distribution_infos = glob(data_dir + "/*")
for distribution_info in distribution_infos:
distribution = distribution_info.split("/")[-1]
print(distribution)
speaker_infos = glob(distribution_info + "/*")
if len(speaker_infos) == 0:
continue
for speaker_info in speaker_infos:
speaker = speaker_info.split("/")[-1]
utts = glob(speaker_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
distribution2speakers2utts[distribution][speaker].append(uid)
return distribution2speakers2utts | Get statistics for librilight dataset |
17,576 | import json
from tqdm import tqdm
import os
import torchaudio
import torch
from utils.mfa_prepare import (
process_wav_files,
get_wav_files,
filter_wav_files_by_length,
)
from utils.cut_by_vad import cut_segments
from utils.whisper_transcription import asr_main
from utils.util import has_existed
import subprocess
import random
from collections import defaultdict
from glob import glob
import shutil
def get_speakers_from_directory(directory):
def has_existed(path, warning=False):
def split_dataset_by_speaker(base_dir, train_ratio=0.8, dev_ratio=0.1):
train_dir = os.path.join(base_dir, "train")
dev_dir = os.path.join(base_dir, "dev")
eval_dir = os.path.join(base_dir, "eval")
# Check if dataset is already split
if has_existed(train_dir) or has_existed(dev_dir) or has_existed(eval_dir):
print("Dataset already split. Calculating speakers...")
train_speakers = get_speakers_from_directory(train_dir)
dev_speakers = get_speakers_from_directory(dev_dir)
eval_speakers = get_speakers_from_directory(eval_dir)
all_speakers = train_speakers + dev_speakers + eval_speakers
unique_speakers = list(set(all_speakers))
unique_speakers.sort()
return unique_speakers
# List all directories in the base directory
all_speakers = [
d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d))
]
random.shuffle(all_speakers)
# Calculate split sizes
total_speakers = len(all_speakers)
train_size = int(total_speakers * train_ratio)
dev_size = int(total_speakers * dev_ratio)
eval_size = total_speakers - train_size - dev_size
print("Total speakers:", total_speakers)
print("Train speakers:", train_size)
print("Dev speakers:", dev_size)
print("Eval speakers:", eval_size)
# Split directories
train_speakers = all_speakers[:train_size]
dev_speakers = all_speakers[train_size : train_size + dev_size]
eval_speakers = all_speakers[train_size + dev_size :]
# Function to move directories
def move_speakers(speakers, target_dir):
for speaker in speakers:
shutil.move(
os.path.join(base_dir, speaker), os.path.join(target_dir, speaker)
)
# Move directories
print("Moving directories...")
print("Moving Train speakers...")
move_speakers(train_speakers, train_dir)
print("Moving Dev speakers...")
move_speakers(dev_speakers, dev_dir)
print("Moving Eval speakers...")
move_speakers(eval_speakers, eval_dir)
unique_speakers = list(set(all_speakers))
unique_speakers.sort()
return unique_speakers | null |
17,577 | import json
from tqdm import tqdm
import os
import torchaudio
import torch
from utils.mfa_prepare import (
process_wav_files,
get_wav_files,
filter_wav_files_by_length,
)
from utils.cut_by_vad import cut_segments
from utils.whisper_transcription import asr_main
from utils.util import has_existed
import subprocess
import random
from collections import defaultdict
from glob import glob
import shutil
def has_existed(path, warning=False):
if not warning:
return os.path.exists(path)
if os.path.exists(path):
answer = input(
"The path {} has existed. \nInput 'y' (or hit Enter) to skip it, and input 'n' to re-write it [y/n]\n".format(
path
)
)
if not answer == "n":
return True
return False
The provided code snippet includes necessary dependencies for implementing the `save_meta_data` function. Write a Python function `def save_meta_data(save_dir, processed_dir, distribution2speakers2utts, speakers)` to solve the following problem:
Save metadata for librilight dataset
Here is the function:
def save_meta_data(save_dir, processed_dir, distribution2speakers2utts, speakers):
"""Save metadata for librilight dataset"""
os.makedirs(save_dir, exist_ok=True)
train_output_file = os.path.join(save_dir, "train.json")
valid_output_file = os.path.join(save_dir, "dev.json")
test_output_file = os.path.join(save_dir, "eval.json")
singer_dict_file = os.path.join(save_dir, "singers.json")
utt2singer_file = os.path.join(save_dir, "utt2singer")
utt2singer = open(utt2singer_file, "w")
if has_existed(train_output_file):
print("Metadata already exists. Skipping...")
return
train = []
test = []
valid = []
train_index_count = 0
test_index_count = 0
valid_index_count = 0
train_total_duration = 0
test_total_duration = 0
valid_total_duration = 0
# Save metadata
for distribution, speakers2utts in tqdm(distribution2speakers2utts.items()):
for speaker, utts in tqdm(speakers2utts.items()):
for chosen_uid in utts:
res = {
"Dataset": "librilight",
"Singer": speaker,
"Uid": "{}#{}#{}".format(distribution, speaker, chosen_uid),
}
res["Path"] = "{}/{}/{}.wav".format(distribution, speaker, chosen_uid)
res["Path"] = os.path.join(processed_dir, res["Path"])
assert os.path.exists(res["Path"])
text_file_path = os.path.join(
processed_dir,
distribution,
speaker,
chosen_uid + ".txt",
)
with open(text_file_path, "r") as f:
lines = f.readlines()
assert len(lines) == 1
text = lines[0].strip()
res["Text"] = text
waveform, sample_rate = torchaudio.load(res["Path"])
duration = waveform.size(-1) / sample_rate
res["Duration"] = duration
if "train" in distribution:
res["index"] = train_index_count
train_total_duration += duration
train.append(res)
train_index_count += 1
elif "dev" in distribution:
res["index"] = valid_index_count
valid_total_duration += duration
valid.append(res)
valid_index_count += 1
elif "eval" in distribution:
res["index"] = test_index_count
test_total_duration += duration
test.append(res)
test_index_count += 1
utt2singer.write("{}\t{}\n".format(res["Uid"], res["Singer"]))
print("Done!")
print(
"Utterance count: train = {}, dev = {}, eval = {}".format(
len(train), len(valid), len(test)
)
)
print(
"#Train duration= {}, #Dev duration= {}, #Eval duration= {}".format(
train_total_duration / 3600,
valid_total_duration / 3600,
test_total_duration / 3600,
)
)
with open(train_output_file, "w") as f:
json.dump(train, f, indent=4, ensure_ascii=False)
with open(test_output_file, "w") as f:
json.dump(test, f, indent=4, ensure_ascii=False)
with open(valid_output_file, "w") as f:
json.dump(valid, f, indent=4, ensure_ascii=False)
utt2singer.close()
singer_lut = {name: i for i, name in enumerate(speakers)}
with open(singer_dict_file, "w") as f:
json.dump(singer_lut, f, indent=4, ensure_ascii=False)
print("Metadata saved to", save_dir) | Save metadata for librilight dataset |
17,578 | from glob import glob
import os
import json
import torchaudio
from tqdm import tqdm
from collections import defaultdict
from utils.util import has_existed
def statistics(utterance_dir):
singers = []
songs = []
utts_all = []
singers2songs = defaultdict(lambda: defaultdict(list))
singer_infos = glob(utterance_dir + "/*")
for singer_info in singer_infos:
singer = singer_info.split("/")[-1]
song_infos = glob(singer_info + "/*")
for song_info in song_infos:
song = song_info.split("/")[-1]
singers.append(singer)
songs.append(song)
utts = glob(song_info + "/*.wav")
utts_all.extend(utts)
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
singers2songs[singer][song].append(uid)
unique_singers = list(set(singers))
unique_songs = list(set(songs))
unique_singers.sort()
unique_songs.sort()
print(
"Statistics: {} singers, {} utterances ({} unique songs)".format(
len(unique_singers), len(utts_all), len(unique_songs)
)
)
print("Singers: \n{}".format("\t".join(unique_singers)))
return singers2songs, unique_singers | null |
17,579 | import os
import json
import torchaudio
from tqdm import tqdm
from glob import glob
from collections import defaultdict
from utils.util import has_existed
def libritts_statistics(data_dir):
speakers = []
distribution2speakers2pharases2utts = defaultdict(
lambda: defaultdict(lambda: defaultdict(list))
)
distribution_infos = glob(data_dir + "/*")
for distribution_info in distribution_infos:
distribution = distribution_info.split("/")[-1]
print(distribution)
speaker_infos = glob(distribution_info + "/*")
if len(speaker_infos) == 0:
continue
for speaker_info in speaker_infos:
speaker = speaker_info.split("/")[-1]
speakers.append(speaker)
pharase_infos = glob(speaker_info + "/*")
for pharase_info in pharase_infos:
pharase = pharase_info.split("/")[-1]
utts = glob(pharase_info + "/*.wav")
for utt in utts:
uid = utt.split("/")[-1].split(".")[0]
distribution2speakers2pharases2utts[distribution][speaker][
pharase
].append(uid)
unique_speakers = list(set(speakers))
unique_speakers.sort()
print("Speakers: \n{}".format("\t".join(unique_speakers)))
return distribution2speakers2pharases2utts, unique_speakers | null |
17,580 | import torchcrepe
import math
import librosa
import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `extract_f0_periodicity_rmse` function. Write a Python function `def extract_f0_periodicity_rmse( audio_ref, audio_deg, hop_length=256, **kwargs, )` to solve the following problem:
Compute f0 periodicity Root Mean Square Error (RMSE) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length.
Here is the function:
def extract_f0_periodicity_rmse(
audio_ref,
audio_deg,
hop_length=256,
**kwargs,
):
"""Compute f0 periodicity Root Mean Square Error (RMSE) between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
hop_length: hop length.
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# Convert to torch
audio_ref = torch.from_numpy(audio_ref).unsqueeze(0)
audio_deg = torch.from_numpy(audio_deg).unsqueeze(0)
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Get periodicity
_, periodicity_ref = torchcrepe.predict(
audio_ref,
sample_rate=fs,
hop_length=hop_length,
fmin=0,
fmax=1500,
model="full",
return_periodicity=True,
device=device,
)
_, periodicity_deg = torchcrepe.predict(
audio_deg,
sample_rate=fs,
hop_length=hop_length,
fmin=0,
fmax=1500,
model="full",
return_periodicity=True,
device=device,
)
# Cut silence
periodicity_ref = (
torchcrepe.threshold.Silence()(
periodicity_ref,
audio_ref,
fs,
hop_length=hop_length,
)
.squeeze(0)
.numpy()
)
periodicity_deg = (
torchcrepe.threshold.Silence()(
periodicity_deg,
audio_deg,
fs,
hop_length=hop_length,
)
.squeeze(0)
.numpy()
)
# Avoid silence audio
min_length = min(len(periodicity_ref), len(periodicity_deg))
if min_length <= 1:
return 0
# Periodicity length alignment
if method == "cut":
length = min(len(periodicity_ref), len(periodicity_deg))
periodicity_ref = periodicity_ref[:length]
periodicity_deg = periodicity_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(periodicity_ref, periodicity_deg, backtrack=True)
periodicity_ref_new = []
periodicity_deg_new = []
for i in range(wp.shape[0]):
ref_index = wp[i][0]
deg_index = wp[i][1]
periodicity_ref_new.append(periodicity_ref[ref_index])
periodicity_deg_new.append(periodicity_deg[deg_index])
periodicity_ref = np.array(periodicity_ref_new)
periodicity_deg = np.array(periodicity_deg_new)
assert len(periodicity_ref) == len(periodicity_deg)
# Compute RMSE
periodicity_mse = np.square(np.subtract(periodicity_ref, periodicity_deg)).mean()
periodicity_rmse = math.sqrt(periodicity_mse)
return periodicity_rmse | Compute f0 periodicity Root Mean Square Error (RMSE) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. |
17,581 | import math
import librosa
import torch
import numpy as np
from utils.util import JsonHParams
from utils.f0 import get_f0_features_using_parselmouth
class JsonHParams:
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = JsonHParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def get_f0_features_using_parselmouth(audio, cfg, speed=1):
"""Using parselmouth to extract the f0 feature.
Args:
audio
mel_len
hop_length
fs
f0_min
f0_max
speed(default=1)
Returns:
f0: numpy array of shape (frame_len,)
pitch_coarse: numpy array of shape (frame_len,)
"""
hop_size = int(np.round(cfg.hop_size * speed))
# Calculate the time step for pitch extraction
time_step = hop_size / cfg.sample_rate * 1000
f0 = (
parselmouth.Sound(audio, cfg.sample_rate)
.to_pitch_ac(
time_step=time_step / 1000,
voicing_threshold=0.6,
pitch_floor=cfg.f0_min,
pitch_ceiling=cfg.f0_max,
)
.selected_array["frequency"]
)
return f0
The provided code snippet includes necessary dependencies for implementing the `extract_f1_v_uv` function. Write a Python function `def extract_f1_v_uv( audio_ref, audio_deg, hop_length=256, f0_min=50, f0_max=1100, **kwargs, )` to solve the following problem:
Compute F1 socre of voiced/unvoiced accuracy between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization. need_mean: subtract the mean value from f0 if "True". method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length.
Here is the function:
def extract_f1_v_uv(
audio_ref,
audio_deg,
hop_length=256,
f0_min=50,
f0_max=1100,
**kwargs,
):
"""Compute F1 socre of voiced/unvoiced accuracy between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
hop_length: hop length.
f0_min: lower limit for f0.
f0_max: upper limit for f0.
pitch_bin: number of bins for f0 quantization.
pitch_max: upper limit for f0 quantization.
pitch_min: lower limit for f0 quantization.
need_mean: subtract the mean value from f0 if "True".
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# Initialize config
cfg = JsonHParams()
cfg.sample_rate = fs
cfg.hop_size = hop_length
cfg.f0_min = f0_min
cfg.f0_max = f0_max
cfg.pitch_bin = 256
cfg.pitch_max = f0_max
cfg.pitch_min = f0_min
# Compute f0
f0_ref = get_f0_features_using_parselmouth(
audio_ref,
cfg,
)
f0_deg = get_f0_features_using_parselmouth(
audio_deg,
cfg,
)
# Avoid silence
min_length = min(len(f0_ref), len(f0_deg))
if min_length <= 1:
return 0, 0, 0
# F0 length alignment
if method == "cut":
length = min(len(f0_ref), len(f0_deg))
f0_ref = f0_ref[:length]
f0_deg = f0_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(f0_ref, f0_deg, backtrack=True)
f0_gt_new = []
f0_pred_new = []
for i in range(wp.shape[0]):
gt_index = wp[i][0]
pred_index = wp[i][1]
f0_gt_new.append(f0_ref[gt_index])
f0_pred_new.append(f0_deg[pred_index])
f0_ref = np.array(f0_gt_new)
f0_deg = np.array(f0_pred_new)
assert len(f0_ref) == len(f0_deg)
# Get voiced/unvoiced parts
ref_voiced = torch.Tensor([f0_ref != 0]).bool()
deg_voiced = torch.Tensor([f0_deg != 0]).bool()
# Compute TP, FP, FN
true_postives = (ref_voiced & deg_voiced).sum()
false_postives = (~ref_voiced & deg_voiced).sum()
false_negatives = (ref_voiced & ~deg_voiced).sum()
return (
true_postives.detach().cpu().numpy().tolist(),
false_postives.detach().cpu().numpy().tolist(),
false_negatives.detach().cpu().numpy().tolist(),
) | Compute F1 socre of voiced/unvoiced accuracy between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization. need_mean: subtract the mean value from f0 if "True". method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. |
17,582 | import torch
import librosa
import numpy as np
from torchmetrics import PearsonCorrCoef
from utils.util import JsonHParams
from utils.f0 import get_f0_features_using_parselmouth, get_pitch_sub_median
class JsonHParams:
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = JsonHParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def get_f0_features_using_parselmouth(audio, cfg, speed=1):
"""Using parselmouth to extract the f0 feature.
Args:
audio
mel_len
hop_length
fs
f0_min
f0_max
speed(default=1)
Returns:
f0: numpy array of shape (frame_len,)
pitch_coarse: numpy array of shape (frame_len,)
"""
hop_size = int(np.round(cfg.hop_size * speed))
# Calculate the time step for pitch extraction
time_step = hop_size / cfg.sample_rate * 1000
f0 = (
parselmouth.Sound(audio, cfg.sample_rate)
.to_pitch_ac(
time_step=time_step / 1000,
voicing_threshold=0.6,
pitch_floor=cfg.f0_min,
pitch_ceiling=cfg.f0_max,
)
.selected_array["frequency"]
)
return f0
def get_pitch_sub_median(f0_hz):
"""
f0_hz: (,T)
"""
f0_cent = get_cents(f0_hz)
return f0_cent - np.median(f0_cent)
The provided code snippet includes necessary dependencies for implementing the `extract_fpc` function. Write a Python function `def extract_fpc( audio_ref, audio_deg, hop_length=256, f0_min=50, f0_max=1100, **kwargs, )` to solve the following problem:
Compute F0 Pearson Distance (FPC) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization. need_mean: subtract the mean value from f0 if "True". method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length.
Here is the function:
def extract_fpc(
audio_ref,
audio_deg,
hop_length=256,
f0_min=50,
f0_max=1100,
**kwargs,
):
"""Compute F0 Pearson Distance (FPC) between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
hop_length: hop length.
f0_min: lower limit for f0.
f0_max: upper limit for f0.
pitch_bin: number of bins for f0 quantization.
pitch_max: upper limit for f0 quantization.
pitch_min: lower limit for f0 quantization.
need_mean: subtract the mean value from f0 if "True".
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
need_mean = kwargs["need_mean"]
# Initialize method
pearson = PearsonCorrCoef()
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# Initialize config
cfg = JsonHParams()
cfg.sample_rate = fs
cfg.hop_size = hop_length
cfg.f0_min = f0_min
cfg.f0_max = f0_max
cfg.pitch_bin = 256
cfg.pitch_max = f0_max
cfg.pitch_min = f0_min
# Compute f0
f0_ref = get_f0_features_using_parselmouth(
audio_ref,
cfg,
)
f0_deg = get_f0_features_using_parselmouth(
audio_deg,
cfg,
)
# Subtract mean value from f0
if need_mean:
f0_ref = torch.from_numpy(f0_ref)
f0_deg = torch.from_numpy(f0_deg)
f0_ref = get_pitch_sub_median(f0_ref).numpy()
f0_deg = get_pitch_sub_median(f0_deg).numpy()
# Avoid silence
min_length = min(len(f0_ref), len(f0_deg))
if min_length <= 1:
return 1
# F0 length alignment
if method == "cut":
length = min(len(f0_ref), len(f0_deg))
f0_ref = f0_ref[:length]
f0_deg = f0_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(f0_ref, f0_deg, backtrack=True)
f0_gt_new = []
f0_pred_new = []
for i in range(wp.shape[0]):
gt_index = wp[i][0]
pred_index = wp[i][1]
f0_gt_new.append(f0_ref[gt_index])
f0_pred_new.append(f0_deg[pred_index])
f0_ref = np.array(f0_gt_new)
f0_deg = np.array(f0_pred_new)
assert len(f0_ref) == len(f0_deg)
# Convert to tensor
f0_ref = torch.from_numpy(f0_ref)
f0_deg = torch.from_numpy(f0_deg)
if torch.cuda.is_available():
device = torch.device("cuda")
f0_ref = f0_ref.to(device)
f0_deg = f0_deg.to(device)
pearson = pearson.to(device)
return pearson(f0_ref, f0_deg).detach().cpu().numpy().tolist() | Compute F0 Pearson Distance (FPC) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization. need_mean: subtract the mean value from f0 if "True". method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. |
17,583 | import math
import librosa
import torch
import numpy as np
from utils.util import JsonHParams
from utils.f0 import get_f0_features_using_parselmouth, get_pitch_sub_median
class JsonHParams:
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = JsonHParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def get_f0_features_using_parselmouth(audio, cfg, speed=1):
"""Using parselmouth to extract the f0 feature.
Args:
audio
mel_len
hop_length
fs
f0_min
f0_max
speed(default=1)
Returns:
f0: numpy array of shape (frame_len,)
pitch_coarse: numpy array of shape (frame_len,)
"""
hop_size = int(np.round(cfg.hop_size * speed))
# Calculate the time step for pitch extraction
time_step = hop_size / cfg.sample_rate * 1000
f0 = (
parselmouth.Sound(audio, cfg.sample_rate)
.to_pitch_ac(
time_step=time_step / 1000,
voicing_threshold=0.6,
pitch_floor=cfg.f0_min,
pitch_ceiling=cfg.f0_max,
)
.selected_array["frequency"]
)
return f0
def get_pitch_sub_median(f0_hz):
"""
f0_hz: (,T)
"""
f0_cent = get_cents(f0_hz)
return f0_cent - np.median(f0_cent)
The provided code snippet includes necessary dependencies for implementing the `extract_f0rmse` function. Write a Python function `def extract_f0rmse( audio_ref, audio_deg, hop_length=256, f0_min=50, f0_max=1100, **kwargs, )` to solve the following problem:
Compute F0 Root Mean Square Error (RMSE) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization. need_mean: subtract the mean value from f0 if "True". method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length.
Here is the function:
def extract_f0rmse(
audio_ref,
audio_deg,
hop_length=256,
f0_min=50,
f0_max=1100,
**kwargs,
):
"""Compute F0 Root Mean Square Error (RMSE) between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
hop_length: hop length.
f0_min: lower limit for f0.
f0_max: upper limit for f0.
pitch_bin: number of bins for f0 quantization.
pitch_max: upper limit for f0 quantization.
pitch_min: lower limit for f0 quantization.
need_mean: subtract the mean value from f0 if "True".
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
need_mean = kwargs["need_mean"]
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# Initialize config for f0 extraction
cfg = JsonHParams()
cfg.sample_rate = fs
cfg.hop_size = hop_length
cfg.f0_min = f0_min
cfg.f0_max = f0_max
cfg.pitch_bin = 256
cfg.pitch_max = f0_max
cfg.pitch_min = f0_min
# Extract f0
f0_ref = get_f0_features_using_parselmouth(
audio_ref,
cfg,
)
f0_deg = get_f0_features_using_parselmouth(
audio_deg,
cfg,
)
# Subtract mean value from f0
if need_mean:
f0_ref = torch.from_numpy(f0_ref)
f0_deg = torch.from_numpy(f0_deg)
f0_ref = get_pitch_sub_median(f0_ref).numpy()
f0_deg = get_pitch_sub_median(f0_deg).numpy()
# Avoid silence
min_length = min(len(f0_ref), len(f0_deg))
if min_length <= 1:
return 0
# F0 length alignment
if method == "cut":
length = min(len(f0_ref), len(f0_deg))
f0_ref = f0_ref[:length]
f0_deg = f0_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(f0_ref, f0_deg, backtrack=True)
f0_gt_new = []
f0_pred_new = []
for i in range(wp.shape[0]):
gt_index = wp[i][0]
pred_index = wp[i][1]
f0_gt_new.append(f0_ref[gt_index])
f0_pred_new.append(f0_deg[pred_index])
f0_ref = np.array(f0_gt_new)
f0_deg = np.array(f0_pred_new)
assert len(f0_ref) == len(f0_deg)
# Compute RMSE
f0_mse = np.square(np.subtract(f0_ref, f0_deg)).mean()
f0_rmse = math.sqrt(f0_mse)
return f0_rmse | Compute F0 Root Mean Square Error (RMSE) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. hop_length: hop length. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization. need_mean: subtract the mean value from f0 if "True". method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. |
17,584 | import math
import librosa
import torch
import numpy as np
from numpy import linalg as LA
from torchmetrics import PearsonCorrCoef
The provided code snippet includes necessary dependencies for implementing the `extract_energy_pearson_coeffcients` function. Write a Python function `def extract_energy_pearson_coeffcients( audio_ref, audio_deg, n_fft=1024, hop_length=256, win_length=1024, **kwargs, )` to solve the following problem:
Compute Energy Pearson Coefficients between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. n_fft: fft size. hop_length: hop length. win_length: window length. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. db_scale: the ground truth and predicted audio will be converted to db_scale if "True".
Here is the function:
def extract_energy_pearson_coeffcients(
audio_ref,
audio_deg,
n_fft=1024,
hop_length=256,
win_length=1024,
**kwargs,
):
"""Compute Energy Pearson Coefficients between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
n_fft: fft size.
hop_length: hop length.
win_length: window length.
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
db_scale: the ground truth and predicted audio will be converted to db_scale if "True".
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
db_scale = kwargs["db_scale"]
# Initialize method
pearson = PearsonCorrCoef()
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# STFT
spec_ref = librosa.stft(
y=audio_ref, n_fft=n_fft, hop_length=hop_length, win_length=win_length
)
spec_deg = librosa.stft(
y=audio_deg, n_fft=n_fft, hop_length=hop_length, win_length=win_length
)
# Get magnitudes
mag_ref = np.abs(spec_ref).T
mag_deg = np.abs(spec_deg).T
# Convert spectrogram to energy
energy_ref = LA.norm(mag_ref, axis=1)
energy_deg = LA.norm(mag_deg, axis=1)
# Convert to db_scale
if db_scale:
energy_ref = 20 * np.log10(energy_ref)
energy_deg = 20 * np.log10(energy_deg)
# Audio length alignment
if method == "cut":
length = min(len(energy_ref), len(energy_deg))
energy_ref = energy_ref[:length]
energy_deg = energy_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(energy_ref, energy_deg, backtrack=True)
energy_gt_new = []
energy_pred_new = []
for i in range(wp.shape[0]):
gt_index = wp[i][0]
pred_index = wp[i][1]
energy_gt_new.append(energy_ref[gt_index])
energy_pred_new.append(energy_deg[pred_index])
energy_ref = np.array(energy_gt_new)
energy_deg = np.array(energy_pred_new)
assert len(energy_ref) == len(energy_deg)
# Convert to tensor
energy_ref = torch.from_numpy(energy_ref)
energy_deg = torch.from_numpy(energy_deg)
if torch.cuda.is_available():
device = torch.device("cuda")
energy_ref = energy_ref.to(device)
energy_deg = energy_deg.to(device)
pearson = pearson.to(device)
return pearson(energy_ref, energy_deg).detach().cpu().numpy().tolist() | Compute Energy Pearson Coefficients between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. n_fft: fft size. hop_length: hop length. win_length: window length. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. db_scale: the ground truth and predicted audio will be converted to db_scale if "True". |
17,585 | import math
import librosa
import torch
import numpy as np
from numpy import linalg as LA
The provided code snippet includes necessary dependencies for implementing the `extract_energy_rmse` function. Write a Python function `def extract_energy_rmse( audio_ref, audio_deg, n_fft=1024, hop_length=256, win_length=1024, **kwargs, )` to solve the following problem:
Compute Energy Root Mean Square Error (RMSE) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. n_fft: fft size. hop_length: hop length. win_length: window length. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. db_scale: the ground truth and predicted audio will be converted to db_scale if "True".
Here is the function:
def extract_energy_rmse(
audio_ref,
audio_deg,
n_fft=1024,
hop_length=256,
win_length=1024,
**kwargs,
):
"""Compute Energy Root Mean Square Error (RMSE) between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
n_fft: fft size.
hop_length: hop length.
win_length: window length.
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
db_scale: the ground truth and predicted audio will be converted to db_scale if "True".
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
db_scale = kwargs["db_scale"]
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# STFT
spec_ref = librosa.stft(
y=audio_ref, n_fft=n_fft, hop_length=hop_length, win_length=win_length
)
spec_deg = librosa.stft(
y=audio_deg, n_fft=n_fft, hop_length=hop_length, win_length=win_length
)
# Get magnitudes
mag_ref = np.abs(spec_ref).T
mag_deg = np.abs(spec_deg).T
# Convert spectrogram to energy
energy_ref = LA.norm(mag_ref, axis=1)
energy_deg = LA.norm(mag_deg, axis=1)
# Convert to db_scale
if db_scale:
energy_ref = 20 * np.log10(energy_ref)
energy_deg = 20 * np.log10(energy_deg)
# Audio length alignment
if method == "cut":
length = min(len(energy_ref), len(energy_deg))
energy_ref = energy_ref[:length]
energy_deg = energy_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(energy_ref, energy_deg, backtrack=True)
energy_gt_new = []
energy_pred_new = []
for i in range(wp.shape[0]):
gt_index = wp[i][0]
pred_index = wp[i][1]
energy_gt_new.append(energy_ref[gt_index])
energy_pred_new.append(energy_deg[pred_index])
energy_ref = np.array(energy_gt_new)
energy_deg = np.array(energy_pred_new)
assert len(energy_ref) == len(energy_deg)
# Compute RMSE
energy_mse = np.square(np.subtract(energy_ref, energy_deg)).mean()
energy_rmse = math.sqrt(energy_mse)
return energy_rmse | Compute Energy Root Mean Square Error (RMSE) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. n_fft: fft size. hop_length: hop length. win_length: window length. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. db_scale: the ground truth and predicted audio will be converted to db_scale if "True". |
17,586 | import os
import numpy as np
import soundfile as sf
import torch
import torch.nn.functional as F
from tqdm import tqdm
import librosa
from evaluation.metrics.similarity.models.RawNetModel import RawNet3
from evaluation.metrics.similarity.models.RawNetBasicBlock import Bottle2neck
from transformers import Wav2Vec2FeatureExtractor, WavLMForXVector
from resemblyzer import VoiceEncoder, preprocess_wav
def extract_rawnet_speaker_embd(
model, fn: str, n_samples: int, n_segments: int = 10, gpu: bool = False
) -> np.ndarray:
audio, sample_rate = sf.read(fn)
if len(audio.shape) > 1:
raise ValueError(
f"RawNet3 supports mono input only. Input data has a shape of {audio.shape}."
)
if sample_rate != 16000:
audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=16000)
if len(audio) < n_samples:
shortage = n_samples - len(audio) + 1
audio = np.pad(audio, (0, shortage), "wrap")
audios = []
startframe = np.linspace(0, len(audio) - n_samples, num=n_segments)
for asf in startframe:
audios.append(audio[int(asf) : int(asf) + n_samples])
audios = torch.from_numpy(np.stack(audios, axis=0).astype(np.float32))
if gpu:
audios = audios.to("cuda")
with torch.no_grad():
output = model(audios)
return output
class RawNet3(nn.Module):
def __init__(self, block, model_scale, context, summed, C=1024, **kwargs):
super().__init__()
nOut = kwargs["nOut"]
self.context = context
self.encoder_type = kwargs["encoder_type"]
self.log_sinc = kwargs["log_sinc"]
self.norm_sinc = kwargs["norm_sinc"]
self.out_bn = kwargs["out_bn"]
self.summed = summed
self.preprocess = nn.Sequential(
PreEmphasis(), nn.InstanceNorm1d(1, eps=1e-4, affine=True)
)
self.conv1 = Encoder(
ParamSincFB(
C // 4,
251,
stride=kwargs["sinc_stride"],
)
)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(C // 4)
self.layer1 = block(
C // 4, C, kernel_size=3, dilation=2, scale=model_scale, pool=5
)
self.layer2 = block(C, C, kernel_size=3, dilation=3, scale=model_scale, pool=3)
self.layer3 = block(C, C, kernel_size=3, dilation=4, scale=model_scale)
self.layer4 = nn.Conv1d(3 * C, 1536, kernel_size=1)
if self.context:
attn_input = 1536 * 3
else:
attn_input = 1536
print("self.encoder_type", self.encoder_type)
if self.encoder_type == "ECA":
attn_output = 1536
elif self.encoder_type == "ASP":
attn_output = 1
else:
raise ValueError("Undefined encoder")
self.attention = nn.Sequential(
nn.Conv1d(attn_input, 128, kernel_size=1),
nn.ReLU(),
nn.BatchNorm1d(128),
nn.Conv1d(128, attn_output, kernel_size=1),
nn.Softmax(dim=2),
)
self.bn5 = nn.BatchNorm1d(3072)
self.fc6 = nn.Linear(3072, nOut)
self.bn6 = nn.BatchNorm1d(nOut)
self.mp3 = nn.MaxPool1d(3)
def forward(self, x):
"""
:param x: input mini-batch (bs, samp)
"""
with torch.cuda.amp.autocast(enabled=False):
x = self.preprocess(x)
x = torch.abs(self.conv1(x))
if self.log_sinc:
x = torch.log(x + 1e-6)
if self.norm_sinc == "mean":
x = x - torch.mean(x, dim=-1, keepdim=True)
elif self.norm_sinc == "mean_std":
m = torch.mean(x, dim=-1, keepdim=True)
s = torch.std(x, dim=-1, keepdim=True)
s[s < 0.001] = 0.001
x = (x - m) / s
if self.summed:
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(self.mp3(x1) + x2)
else:
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x = self.layer4(torch.cat((self.mp3(x1), x2, x3), dim=1))
x = self.relu(x)
t = x.size()[-1]
if self.context:
global_x = torch.cat(
(
x,
torch.mean(x, dim=2, keepdim=True).repeat(1, 1, t),
torch.sqrt(
torch.var(x, dim=2, keepdim=True).clamp(min=1e-4, max=1e4)
).repeat(1, 1, t),
),
dim=1,
)
else:
global_x = x
w = self.attention(global_x)
mu = torch.sum(x * w, dim=2)
sg = torch.sqrt((torch.sum((x**2) * w, dim=2) - mu**2).clamp(min=1e-4, max=1e4))
x = torch.cat((mu, sg), 1)
x = self.bn5(x)
x = self.fc6(x)
if self.out_bn:
x = self.bn6(x)
return x
class Bottle2neck(nn.Module):
def __init__(
self,
inplanes,
planes,
kernel_size=None,
dilation=None,
scale=4,
pool=False,
):
super().__init__()
width = int(math.floor(planes / scale))
self.conv1 = nn.Conv1d(inplanes, width * scale, kernel_size=1)
self.bn1 = nn.BatchNorm1d(width * scale)
self.nums = scale - 1
convs = []
bns = []
num_pad = math.floor(kernel_size / 2) * dilation
for i in range(self.nums):
convs.append(
nn.Conv1d(
width,
width,
kernel_size=kernel_size,
dilation=dilation,
padding=num_pad,
)
)
bns.append(nn.BatchNorm1d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv1d(width * scale, planes, kernel_size=1)
self.bn3 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU()
self.width = width
self.mp = nn.MaxPool1d(pool) if pool else False
self.afms = AFMS(planes)
if inplanes != planes: # if change in number of filters
self.residual = nn.Sequential(
nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias=False)
)
else:
self.residual = nn.Identity()
def forward(self, x):
residual = self.residual(x)
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(sp)
sp = self.bns[i](sp)
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.nums]), 1)
out = self.conv3(out)
out = self.relu(out)
out = self.bn3(out)
out += residual
if self.mp:
out = self.mp(out)
out = self.afms(out)
return out
def extract_similarity(path_ref, path_deg, **kwargs):
kwargs = kwargs["kwargs"]
model_name = kwargs["model_name"]
ref_embds = []
deg_embds = []
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
if model_name == "rawnet":
model = RawNet3(
Bottle2neck,
model_scale=8,
context=True,
summed=True,
encoder_type="ECA",
nOut=256,
out_bn=False,
sinc_stride=10,
log_sinc=True,
norm_sinc="mean",
grad_mult=1,
)
model.load_state_dict(
torch.load(
"pretrained/rawnet3/model.pt",
map_location=lambda storage, loc: storage,
)["model"]
)
model.eval()
model = model.to(device)
for file in tqdm(os.listdir(path_ref)):
output = extract_rawnet_speaker_embd(
model,
fn=os.path.join(path_ref, file),
n_samples=48000,
n_segments=10,
gpu=torch.cuda.is_available(),
).mean(0)
ref_embds.append(output)
for file in tqdm(os.listdir(path_deg)):
output = extract_rawnet_speaker_embd(
model,
fn=os.path.join(path_deg, file),
n_samples=48000,
n_segments=10,
gpu=torch.cuda.is_available(),
).mean(0)
deg_embds.append(output)
elif model_name == "wavlm":
try:
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"microsoft/wavlm-base-plus-sv"
)
model = WavLMForXVector.from_pretrained("microsoft/wavlm-base-plus-sv")
except:
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"pretrained/wavlm", sampling_rate=16000
)
model = WavLMForXVector.from_pretrained("pretrained/wavlm")
model = model.to(device)
for file in tqdm(os.listdir(path_ref)):
wav_path = os.path.join(path_ref, file)
wav, _ = librosa.load(wav_path, sr=16000)
inputs = feature_extractor(
[wav], padding=True, return_tensors="pt", sampling_rate=16000
)
if torch.cuda.is_available():
for key in inputs.keys():
inputs[key] = inputs[key].to(device)
with torch.no_grad():
embds = model(**inputs).embeddings
embds = embds
ref_embds.append(embds[0])
for file in tqdm(os.listdir(path_deg)):
wav_path = os.path.join(path_deg, file)
wav, _ = librosa.load(wav_path, sr=16000)
inputs = feature_extractor(
[wav], padding=True, return_tensors="pt", sampling_rate=16000
)
if torch.cuda.is_available():
for key in inputs.keys():
inputs[key] = inputs[key].to(device)
with torch.no_grad():
embds = model(**inputs).embeddings
embds = embds
deg_embds.append(embds[0])
elif model_name == "resemblyzer":
encoder = VoiceEncoder().to(device)
for file in tqdm(os.listdir(path_ref)):
wav_path = os.path.join(path_ref, file)
wav = preprocess_wav(wav_path)
output = encoder.embed_utterance(wav)
ref_embds.append(torch.from_numpy(output).to(device))
for file in tqdm(os.listdir(path_deg)):
wav_path = os.path.join(path_deg, file)
wav = preprocess_wav(wav_path)
output = encoder.embed_utterance(wav)
deg_embds.append(torch.from_numpy(output).to(device))
similarity_mode = kwargs["similarity_mode"]
scores = []
if similarity_mode == "pairwith":
for ref_embd, deg_embd in zip(ref_embds, deg_embds):
scores.append(
F.cosine_similarity(ref_embd, deg_embd, dim=-1).detach().cpu().numpy()
)
elif similarity_mode == "overall":
for ref_embd in ref_embds:
for deg_embd in deg_embds:
scores.append(
F.cosine_similarity(ref_embd, deg_embd, dim=-1)
.detach()
.cpu()
.numpy()
)
return np.mean(scores) | null |
17,587 | import torch
import torch.nn as nn
from asteroid_filterbanks import Encoder, ParamSincFB
from .RawNetBasicBlock import Bottle2neck, PreEmphasis
class RawNet3(nn.Module):
def __init__(self, block, model_scale, context, summed, C=1024, **kwargs):
super().__init__()
nOut = kwargs["nOut"]
self.context = context
self.encoder_type = kwargs["encoder_type"]
self.log_sinc = kwargs["log_sinc"]
self.norm_sinc = kwargs["norm_sinc"]
self.out_bn = kwargs["out_bn"]
self.summed = summed
self.preprocess = nn.Sequential(
PreEmphasis(), nn.InstanceNorm1d(1, eps=1e-4, affine=True)
)
self.conv1 = Encoder(
ParamSincFB(
C // 4,
251,
stride=kwargs["sinc_stride"],
)
)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(C // 4)
self.layer1 = block(
C // 4, C, kernel_size=3, dilation=2, scale=model_scale, pool=5
)
self.layer2 = block(C, C, kernel_size=3, dilation=3, scale=model_scale, pool=3)
self.layer3 = block(C, C, kernel_size=3, dilation=4, scale=model_scale)
self.layer4 = nn.Conv1d(3 * C, 1536, kernel_size=1)
if self.context:
attn_input = 1536 * 3
else:
attn_input = 1536
print("self.encoder_type", self.encoder_type)
if self.encoder_type == "ECA":
attn_output = 1536
elif self.encoder_type == "ASP":
attn_output = 1
else:
raise ValueError("Undefined encoder")
self.attention = nn.Sequential(
nn.Conv1d(attn_input, 128, kernel_size=1),
nn.ReLU(),
nn.BatchNorm1d(128),
nn.Conv1d(128, attn_output, kernel_size=1),
nn.Softmax(dim=2),
)
self.bn5 = nn.BatchNorm1d(3072)
self.fc6 = nn.Linear(3072, nOut)
self.bn6 = nn.BatchNorm1d(nOut)
self.mp3 = nn.MaxPool1d(3)
def forward(self, x):
"""
:param x: input mini-batch (bs, samp)
"""
with torch.cuda.amp.autocast(enabled=False):
x = self.preprocess(x)
x = torch.abs(self.conv1(x))
if self.log_sinc:
x = torch.log(x + 1e-6)
if self.norm_sinc == "mean":
x = x - torch.mean(x, dim=-1, keepdim=True)
elif self.norm_sinc == "mean_std":
m = torch.mean(x, dim=-1, keepdim=True)
s = torch.std(x, dim=-1, keepdim=True)
s[s < 0.001] = 0.001
x = (x - m) / s
if self.summed:
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(self.mp3(x1) + x2)
else:
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x = self.layer4(torch.cat((self.mp3(x1), x2, x3), dim=1))
x = self.relu(x)
t = x.size()[-1]
if self.context:
global_x = torch.cat(
(
x,
torch.mean(x, dim=2, keepdim=True).repeat(1, 1, t),
torch.sqrt(
torch.var(x, dim=2, keepdim=True).clamp(min=1e-4, max=1e4)
).repeat(1, 1, t),
),
dim=1,
)
else:
global_x = x
w = self.attention(global_x)
mu = torch.sum(x * w, dim=2)
sg = torch.sqrt((torch.sum((x**2) * w, dim=2) - mu**2).clamp(min=1e-4, max=1e4))
x = torch.cat((mu, sg), 1)
x = self.bn5(x)
x = self.fc6(x)
if self.out_bn:
x = self.bn6(x)
return x
class Bottle2neck(nn.Module):
def __init__(
self,
inplanes,
planes,
kernel_size=None,
dilation=None,
scale=4,
pool=False,
):
super().__init__()
width = int(math.floor(planes / scale))
self.conv1 = nn.Conv1d(inplanes, width * scale, kernel_size=1)
self.bn1 = nn.BatchNorm1d(width * scale)
self.nums = scale - 1
convs = []
bns = []
num_pad = math.floor(kernel_size / 2) * dilation
for i in range(self.nums):
convs.append(
nn.Conv1d(
width,
width,
kernel_size=kernel_size,
dilation=dilation,
padding=num_pad,
)
)
bns.append(nn.BatchNorm1d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv1d(width * scale, planes, kernel_size=1)
self.bn3 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU()
self.width = width
self.mp = nn.MaxPool1d(pool) if pool else False
self.afms = AFMS(planes)
if inplanes != planes: # if change in number of filters
self.residual = nn.Sequential(
nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias=False)
)
else:
self.residual = nn.Identity()
def forward(self, x):
residual = self.residual(x)
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(sp)
sp = self.bns[i](sp)
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.nums]), 1)
out = self.conv3(out)
out = self.relu(out)
out = self.bn3(out)
out += residual
if self.mp:
out = self.mp(out)
out = self.afms(out)
return out
def MainModel(**kwargs):
model = RawNet3(Bottle2neck, model_scale=8, context=True, summed=True, **kwargs)
return model | null |
17,588 | import whisper
import torch
from torchmetrics import WordErrorRate
The provided code snippet includes necessary dependencies for implementing the `extract_wer` function. Write a Python function `def extract_wer( model, **kwargs, )` to solve the following problem:
Compute Word Error Rate (WER) between the predicted and the ground truth audio. content_gt: the ground truth content. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. mode: "gt_content" computes the WER between the predicted content obtained from the whisper model and the ground truth content. both content_gt and audio_deg are needed. "gt_audio" computes the WER between the extracted ground truth and predicted contents obtained from the whisper model. both audio_ref and audio_deg are needed.
Here is the function:
def extract_wer(
model,
**kwargs,
):
"""Compute Word Error Rate (WER) between the predicted and the ground truth audio.
content_gt: the ground truth content.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
mode: "gt_content" computes the WER between the predicted content obtained from the whisper model and the ground truth content.
both content_gt and audio_deg are needed.
"gt_audio" computes the WER between the extracted ground truth and predicted contents obtained from the whisper model.
both audio_ref and audio_deg are needed.
"""
kwargs = kwargs["kwargs"]
mode = kwargs["intelligibility_mode"]
language = kwargs["language"]
wer = WordErrorRate()
if torch.cuda.is_available():
device = torch.device("cuda")
wer = wer.to(device)
# Get ground truth content
if mode == "gt_content":
content_gt = kwargs["content_gt"]
audio_deg = kwargs["audio_deg"]
if language == "chinese":
prompt = "以下是普通话的句子"
result_deg = model.transcribe(
audio_deg, language="zh", verbose=True, initial_prompt=prompt
)
else:
result_deg = model.transcribe(audio_deg, verbose=True)
elif mode == "gt_audio":
audio_ref = kwargs["audio_ref"]
audio_deg = kwargs["audio_deg"]
if language == "chinese":
prompt = "以下是普通话的句子"
result_ref = model.transcribe(
audio_ref, language="zh", verbose=True, initial_prompt=prompt
)
result_deg = model.transcribe(
audio_deg, language="zh", verbose=True, initial_prompt=prompt
)
else:
result_ref = model.transcribe(audio_deg, verbose=True)
result_deg = model.transcribe(audio_deg, verbose=True)
content_gt = result_ref["text"]
content_gt = content_gt.replace(" ", "")
content_gt = content_gt.replace(".", "")
content_gt = content_gt.replace("'", "")
content_gt = content_gt.replace("-", "")
content_gt = content_gt.replace(",", "")
content_gt = content_gt.replace("!", "")
content_gt = content_gt.lower()
# Get predicted truth content
content_pred = result_deg["text"]
content_pred = content_pred.replace(" ", "")
content_pred = content_pred.replace(".", "")
content_pred = content_pred.replace("'", "")
content_pred = content_pred.replace("-", "")
content_pred = content_pred.replace(",", "")
content_pred = content_pred.replace("!", "")
content_pred = content_pred.lower()
return wer(content_pred, content_gt).detach().cpu().numpy().tolist() | Compute Word Error Rate (WER) between the predicted and the ground truth audio. content_gt: the ground truth content. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. mode: "gt_content" computes the WER between the predicted content obtained from the whisper model and the ground truth content. both content_gt and audio_deg are needed. "gt_audio" computes the WER between the extracted ground truth and predicted contents obtained from the whisper model. both audio_ref and audio_deg are needed. |
17,589 | import whisper
import torch
from torchmetrics import CharErrorRate
The provided code snippet includes necessary dependencies for implementing the `extract_cer` function. Write a Python function `def extract_cer( model, **kwargs, )` to solve the following problem:
Compute Character Error Rate (CER) between the predicted and the ground truth audio. content_gt: the ground truth content. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. mode: "gt_content" computes the CER between the predicted content obtained from the whisper model and the ground truth content. both content_gt and audio_deg are needed. "gt_audio" computes the CER between the extracted ground truth and predicted contents obtained from the whisper model. both audio_ref and audio_deg are needed.
Here is the function:
def extract_cer(
model,
**kwargs,
):
"""Compute Character Error Rate (CER) between the predicted and the ground truth audio.
content_gt: the ground truth content.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
mode: "gt_content" computes the CER between the predicted content obtained from the whisper model and the ground truth content.
both content_gt and audio_deg are needed.
"gt_audio" computes the CER between the extracted ground truth and predicted contents obtained from the whisper model.
both audio_ref and audio_deg are needed.
"""
kwargs = kwargs["kwargs"]
mode = kwargs["intelligibility_mode"]
language = kwargs["language"]
cer = CharErrorRate()
if torch.cuda.is_available():
device = torch.device("cuda")
cer = cer.to(device)
# Get ground truth content
if mode == "gt_content":
content_gt = kwargs["content_gt"]
audio_deg = kwargs["audio_deg"]
if language == "chinese":
prompt = "以下是普通话的句子"
result_deg = model.transcribe(
audio_deg, language="zh", verbose=True, initial_prompt=prompt
)
else:
result_deg = model.transcribe(audio_deg, verbose=True)
elif mode == "gt_audio":
audio_ref = kwargs["audio_ref"]
audio_deg = kwargs["audio_deg"]
if language == "chinese":
prompt = "以下是普通话的句子"
result_ref = model.transcribe(
audio_ref, language="zh", verbose=True, initial_prompt=prompt
)
result_deg = model.transcribe(
audio_deg, language="zh", verbose=True, initial_prompt=prompt
)
else:
result_ref = model.transcribe(audio_deg, verbose=True)
result_deg = model.transcribe(audio_deg, verbose=True)
content_gt = result_ref["text"]
content_gt = content_gt.replace(" ", "")
content_gt = content_gt.replace(".", "")
content_gt = content_gt.replace("'", "")
content_gt = content_gt.replace("-", "")
content_gt = content_gt.replace(",", "")
content_gt = content_gt.replace("!", "")
content_gt = content_gt.lower()
# Get predicted truth content
content_pred = result_deg["text"]
content_pred = content_pred.replace(" ", "")
content_pred = content_pred.replace(".", "")
content_pred = content_pred.replace("'", "")
content_pred = content_pred.replace("-", "")
content_pred = content_pred.replace(",", "")
content_pred = content_pred.replace("!", "")
content_pred = content_pred.lower()
return cer(content_pred, content_gt).detach().cpu().numpy().tolist() | Compute Character Error Rate (CER) between the predicted and the ground truth audio. content_gt: the ground truth content. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. mode: "gt_content" computes the CER between the predicted content obtained from the whisper model and the ground truth content. both content_gt and audio_deg are needed. "gt_audio" computes the CER between the extracted ground truth and predicted contents obtained from the whisper model. both audio_ref and audio_deg are needed. |
17,590 | import torch
import librosa
import numpy as np
from torchmetrics import ScaleInvariantSignalDistortionRatio
def extract_si_sdr(audio_ref, audio_deg, **kwargs):
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
si_sdr = ScaleInvariantSignalDistortionRatio()
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
if len(audio_ref) != len(audio_deg):
if method == "cut":
length = min(len(audio_ref), len(audio_deg))
audio_ref = audio_ref[:length]
audio_deg = audio_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(audio_ref, audio_deg, backtrack=True)
audio_ref_new = []
audio_deg_new = []
for i in range(wp.shape[0]):
ref_index = wp[i][0]
deg_index = wp[i][1]
audio_ref_new.append(audio_ref[ref_index])
audio_deg_new.append(audio_deg[deg_index])
audio_ref = np.array(audio_ref_new)
audio_deg = np.array(audio_deg_new)
assert len(audio_ref) == len(audio_deg)
audio_ref = torch.from_numpy(audio_ref)
audio_deg = torch.from_numpy(audio_deg)
if torch.cuda.is_available():
device = torch.device("cuda")
audio_ref = audio_ref.to(device)
audio_deg = audio_deg.to(device)
si_sdr = si_sdr.to(device)
return si_sdr(audio_deg, audio_ref).detach().cpu().numpy().tolist() | null |
17,591 | from frechet_audio_distance import FrechetAudioDistance
The provided code snippet includes necessary dependencies for implementing the `extract_fad` function. Write a Python function `def extract_fad( audio_dir1, audio_dir2, **kwargs, )` to solve the following problem:
Extract Frechet Audio Distance for two given audio folders. audio_dir1: path to the ground truth audio folder. audio_dir2: path to the predicted audio folder. mode: "vggish", "pann", "clap" for different models.
Here is the function:
def extract_fad(
audio_dir1,
audio_dir2,
**kwargs,
):
"""Extract Frechet Audio Distance for two given audio folders.
audio_dir1: path to the ground truth audio folder.
audio_dir2: path to the predicted audio folder.
mode: "vggish", "pann", "clap" for different models.
"""
frechet = FrechetAudioDistance(
model_name="vggish",
use_pca=False,
use_activation=False,
verbose=False,
)
fad_score = frechet.score(audio_dir1, audio_dir2)
return fad_score | Extract Frechet Audio Distance for two given audio folders. audio_dir1: path to the ground truth audio folder. audio_dir2: path to the predicted audio folder. mode: "vggish", "pann", "clap" for different models. |
17,592 | import librosa
import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `extract_mstft` function. Write a Python function `def extract_mstft( audio_ref, audio_deg, **kwargs, )` to solve the following problem:
Compute Multi-Scale STFT Distance (mstft) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. med_freq: division frequency for mid frequency parts. high_freq: division frequency for high frequency parts. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length.
Here is the function:
def extract_mstft(
audio_ref,
audio_deg,
**kwargs,
):
"""Compute Multi-Scale STFT Distance (mstft) between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
med_freq: division frequency for mid frequency parts.
high_freq: division frequency for high frequency parts.
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# Audio length alignment
if len(audio_ref) != len(audio_deg):
if method == "cut":
length = min(len(audio_ref), len(audio_deg))
audio_ref = audio_ref[:length]
audio_deg = audio_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(audio_ref, audio_deg, backtrack=True)
audio_ref_new = []
audio_deg_new = []
for i in range(wp.shape[0]):
ref_index = wp[i][0]
deg_index = wp[i][1]
audio_ref_new.append(audio_ref[ref_index])
audio_deg_new.append(audio_deg[deg_index])
audio_ref = np.array(audio_ref_new)
audio_deg = np.array(audio_deg_new)
assert len(audio_ref) == len(audio_deg)
# Define loss function
l1Loss = torch.nn.L1Loss(reduction="mean")
# Compute distance
fft_sizes = [1024, 2048, 512]
hop_sizes = [120, 240, 50]
win_sizes = [600, 1200, 240]
audio_ref = torch.from_numpy(audio_ref)
audio_deg = torch.from_numpy(audio_deg)
if torch.cuda.is_available():
device = torch.device("cuda")
audio_ref = audio_ref.to(device)
audio_deg = audio_deg.to(device)
mstft_sc = 0
mstft_mag = 0
for n_fft, hop_length, win_length in zip(fft_sizes, hop_sizes, win_sizes):
spec_ref = torch.stft(
audio_ref, n_fft, hop_length, win_length, return_complex=False
)
spec_deg = torch.stft(
audio_deg, n_fft, hop_length, win_length, return_complex=False
)
real_ref = spec_ref[..., 0]
imag_ref = spec_ref[..., 1]
real_deg = spec_deg[..., 0]
imag_deg = spec_deg[..., 1]
mag_ref = torch.sqrt(
torch.clamp(real_ref**2 + imag_ref**2, min=1e-7)
).transpose(1, 0)
mag_deg = torch.sqrt(
torch.clamp(real_deg**2 + imag_deg**2, min=1e-7)
).transpose(1, 0)
sc_loss = torch.norm(mag_ref - mag_deg, p="fro") / torch.norm(mag_ref, p="fro")
mag_loss = l1Loss(torch.log(mag_ref), torch.log(mag_deg))
mstft_sc += sc_loss
mstft_mag += mag_loss
# Normalize distances
mstft_sc /= len(fft_sizes)
mstft_mag /= len(fft_sizes)
return (
mstft_sc.detach().cpu().numpy().tolist()
+ mstft_mag.detach().cpu().numpy().tolist()
) | Compute Multi-Scale STFT Distance (mstft) between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. med_freq: division frequency for mid frequency parts. high_freq: division frequency for high frequency parts. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. |
17,593 | from pymcd.mcd import Calculate_MCD
The provided code snippet includes necessary dependencies for implementing the `extract_mcd` function. Write a Python function `def extract_mcd(audio_ref, audio_deg, **kwargs)` to solve the following problem:
Extract Mel-Cepstral Distance for a two given audio. Args: audio_ref: The given reference audio. It is an audio path. audio_deg: The given synthesized audio. It is an audio path.
Here is the function:
def extract_mcd(audio_ref, audio_deg, **kwargs):
"""Extract Mel-Cepstral Distance for a two given audio.
Args:
audio_ref: The given reference audio. It is an audio path.
audio_deg: The given synthesized audio. It is an audio path.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
mcd_toolbox = Calculate_MCD(MCD_mode="dtw_sl")
if fs != None:
mcd_toolbox.SAMPLING_RATE = fs
mcd_value = mcd_toolbox.calculate_mcd(audio_ref, audio_deg)
return mcd_value | Extract Mel-Cepstral Distance for a two given audio. Args: audio_ref: The given reference audio. It is an audio path. audio_deg: The given synthesized audio. It is an audio path. |
17,594 | import torch
import librosa
import numpy as np
from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility
The provided code snippet includes necessary dependencies for implementing the `extract_stoi` function. Write a Python function `def extract_stoi(audio_ref, audio_deg, **kwargs)` to solve the following problem:
Compute Short-Time Objective Intelligibility between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length.
Here is the function:
def extract_stoi(audio_ref, audio_deg, **kwargs):
"""Compute Short-Time Objective Intelligibility between the predicted and the ground truth audio.
audio_ref: path to the ground truth audio.
audio_deg: path to the predicted audio.
fs: sampling rate.
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# Initialize method
stoi = ShortTimeObjectiveIntelligibility(fs, extended=False)
# Audio length alignment
if len(audio_ref) != len(audio_deg):
if method == "cut":
length = min(len(audio_ref), len(audio_deg))
audio_ref = audio_ref[:length]
audio_deg = audio_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(audio_ref, audio_deg, backtrack=True)
audio_ref_new = []
audio_deg_new = []
for i in range(wp.shape[0]):
ref_index = wp[i][0]
deg_index = wp[i][1]
audio_ref_new.append(audio_ref[ref_index])
audio_deg_new.append(audio_deg[deg_index])
audio_ref = np.array(audio_ref_new)
audio_deg = np.array(audio_deg_new)
assert len(audio_ref) == len(audio_deg)
# Convert to tensor
audio_ref = torch.from_numpy(audio_ref)
audio_deg = torch.from_numpy(audio_deg)
if torch.cuda.is_available():
device = torch.device("cuda")
audio_ref = audio_ref.to(device)
audio_deg = audio_deg.to(device)
stoi = stoi.to(device)
return stoi(audio_deg, audio_ref).detach().cpu().numpy().tolist() | Compute Short-Time Objective Intelligibility between the predicted and the ground truth audio. audio_ref: path to the ground truth audio. audio_deg: path to the predicted audio. fs: sampling rate. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. |
17,595 | import librosa
import numpy as np
from pypesq import pesq
The provided code snippet includes necessary dependencies for implementing the `extract_pesq` function. Write a Python function `def extract_pesq(audio_ref, audio_deg, **kwargs)` to solve the following problem:
Extract PESQ for a two given audio. audio1: the given reference audio. It is a numpy array. audio2: the given synthesized audio. It is a numpy array. fs: sampling rate. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length.
Here is the function:
def extract_pesq(audio_ref, audio_deg, **kwargs):
"""Extract PESQ for a two given audio.
audio1: the given reference audio. It is a numpy array.
audio2: the given synthesized audio. It is a numpy array.
fs: sampling rate.
method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio.
"cut" will cut both audios into a same length according to the one with the shorter length.
"""
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
# Load audio
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
# Resample
if fs != 16000:
audio_ref = librosa.resample(audio_ref, orig_sr=fs, target_sr=16000)
audio_deg = librosa.resample(audio_deg, orig_sr=fs, target_sr=16000)
fs = 16000
# Audio length alignment
if len(audio_ref) != len(audio_deg):
if method == "cut":
length = min(len(audio_ref), len(audio_deg))
audio_ref = audio_ref[:length]
audio_deg = audio_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(audio_ref, audio_deg, backtrack=True)
audio_ref_new = []
audio_deg_new = []
for i in range(wp.shape[0]):
ref_index = wp[i][0]
deg_index = wp[i][1]
audio_ref_new.append(audio_ref[ref_index])
audio_deg_new.append(audio_deg[deg_index])
audio_ref = np.array(audio_ref_new)
audio_deg = np.array(audio_deg_new)
assert len(audio_ref) == len(audio_deg)
# Compute pesq
score = pesq(audio_ref, audio_deg, fs)
return score | Extract PESQ for a two given audio. audio1: the given reference audio. It is a numpy array. audio2: the given synthesized audio. It is a numpy array. fs: sampling rate. method: "dtw" will use dtw algorithm to align the length of the ground truth and predicted audio. "cut" will cut both audios into a same length according to the one with the shorter length. |
17,596 | import torch
import librosa
import numpy as np
from torchmetrics import ScaleInvariantSignalNoiseRatio
def extract_si_snr(audio_ref, audio_deg, **kwargs):
# Load hyperparameters
kwargs = kwargs["kwargs"]
fs = kwargs["fs"]
method = kwargs["method"]
si_snr = ScaleInvariantSignalNoiseRatio()
if fs != None:
audio_ref, _ = librosa.load(audio_ref, sr=fs)
audio_deg, _ = librosa.load(audio_deg, sr=fs)
else:
audio_ref, fs = librosa.load(audio_ref)
audio_deg, fs = librosa.load(audio_deg)
if len(audio_ref) != len(audio_deg):
if method == "cut":
length = min(len(audio_ref), len(audio_deg))
audio_ref = audio_ref[:length]
audio_deg = audio_deg[:length]
elif method == "dtw":
_, wp = librosa.sequence.dtw(audio_ref, audio_deg, backtrack=True)
audio_ref_new = []
audio_deg_new = []
for i in range(wp.shape[0]):
ref_index = wp[i][0]
deg_index = wp[i][1]
audio_ref_new.append(audio_ref[ref_index])
audio_deg_new.append(audio_deg[deg_index])
audio_ref = np.array(audio_ref_new)
audio_deg = np.array(audio_deg_new)
assert len(audio_ref) == len(audio_deg)
audio_ref = torch.from_numpy(audio_ref)
audio_deg = torch.from_numpy(audio_deg)
if torch.cuda.is_available():
device = torch.device("cuda")
audio_ref = audio_ref.to(device)
audio_deg = audio_deg.to(device)
si_snr = si_snr.to(device)
return si_snr(audio_deg, audio_ref).detach().cpu().numpy().tolist() | null |
17,597 | import numpy as np
import scipy.signal as sig
import copy
import librosa
def bandpower(ps, mode="time"):
"""
estimate bandpower, see https://de.mathworks.com/help/signal/ref/bandpower.html
"""
if mode == "time":
x = ps
l2norm = np.linalg.norm(x) ** 2.0 / len(x)
return l2norm
elif mode == "psd":
return sum(ps)
def getIndizesAroundPeak(arr, peakIndex, searchWidth=1000):
peakBins = []
magMax = arr[peakIndex]
curVal = magMax
for i in range(searchWidth):
newBin = peakIndex + i
if newBin >= len(arr):
break
newVal = arr[newBin]
if newVal > curVal:
break
else:
peakBins.append(int(newBin))
curVal = newVal
curVal = magMax
for i in range(searchWidth):
newBin = peakIndex - i
if newBin < 0:
break
newVal = arr[newBin]
if newVal > curVal:
break
else:
peakBins.append(int(newBin))
curVal = newVal
return np.array(list(set(peakBins)))
def getPeakInArea(psd, faxis, estimation, searchWidthHz=10):
"""
returns bin and frequency of the maximum in an area
"""
binLow = freqToBin(faxis, estimation - searchWidthHz)
binHi = freqToBin(faxis, estimation + searchWidthHz)
peakbin = binLow + np.argmax(psd[binLow : binHi + 1])
return peakbin, faxis[peakbin]
def getHarmonics(fund, sr, nHarmonics=6, aliased=False):
harmonicMultipliers = np.arange(2, nHarmonics + 2)
harmonicFs = fund * harmonicMultipliers
if not aliased:
harmonicFs[harmonicFs > sr / 2] = -1
harmonicFs = np.delete(harmonicFs, harmonicFs == -1)
else:
nyqZone = np.floor(harmonicFs / (sr / 2))
oddEvenNyq = nyqZone % 2
harmonicFs = np.mod(harmonicFs, sr / 2)
harmonicFs[oddEvenNyq == 1] = (sr / 2) - harmonicFs[oddEvenNyq == 1]
return harmonicFs
The provided code snippet includes necessary dependencies for implementing the `extract_snr` function. Write a Python function `def extract_snr(audio, sr=None)` to solve the following problem:
Extract Signal-to-Noise Ratio for a given audio.
Here is the function:
def extract_snr(audio, sr=None):
"""Extract Signal-to-Noise Ratio for a given audio."""
if sr != None:
audio, _ = librosa.load(audio, sr=sr)
else:
audio, sr = librosa.load(audio, sr=sr)
faxis, ps = sig.periodogram(
audio, fs=sr, window=("kaiser", 38)
) # get periodogram, parametrized like in matlab
fundBin = np.argmax(
ps
) # estimate fundamental at maximum amplitude, get the bin number
fundIndizes = getIndizesAroundPeak(
ps, fundBin
) # get bin numbers around fundamental peak
fundFrequency = faxis[fundBin] # frequency of fundamental
nHarmonics = 18
harmonicFs = getHarmonics(
fundFrequency, sr, nHarmonics=nHarmonics, aliased=True
) # get harmonic frequencies
harmonicBorders = np.zeros([2, nHarmonics], dtype=np.int16).T
fullHarmonicBins = np.array([], dtype=np.int16)
fullHarmonicBinList = []
harmPeakFreqs = []
harmPeaks = []
for i, harmonic in enumerate(harmonicFs):
searcharea = 0.1 * fundFrequency
estimation = harmonic
binNum, freq = getPeakInArea(ps, faxis, estimation, searcharea)
harmPeakFreqs.append(freq)
harmPeaks.append(ps[binNum])
allBins = getIndizesAroundPeak(ps, binNum, searchWidth=1000)
fullHarmonicBins = np.append(fullHarmonicBins, allBins)
fullHarmonicBinList.append(allBins)
harmonicBorders[i, :] = [allBins[0], allBins[-1]]
fundIndizes.sort()
pFund = bandpower(ps[fundIndizes[0] : fundIndizes[-1]]) # get power of fundamental
noisePrepared = copy.copy(ps)
noisePrepared[fundIndizes] = 0
noisePrepared[fullHarmonicBins] = 0
noiseMean = np.median(noisePrepared[noisePrepared != 0])
noisePrepared[fundIndizes] = noiseMean
noisePrepared[fullHarmonicBins] = noiseMean
noisePower = bandpower(noisePrepared)
r = 10 * np.log10(pFund / noisePower)
return r, 10 * np.log10(noisePower) | Extract Signal-to-Noise Ratio for a given audio. |
17,598 | import librosa
from scipy import signal
The provided code snippet includes necessary dependencies for implementing the `extract_ltas` function. Write a Python function `def extract_ltas(audio, fs=None, n_fft=1024, hop_length=256)` to solve the following problem:
Extract Long-Term Average Spectrum for a given audio.
Here is the function:
def extract_ltas(audio, fs=None, n_fft=1024, hop_length=256):
"""Extract Long-Term Average Spectrum for a given audio."""
if fs != None:
y, _ = librosa.load(audio, sr=fs)
else:
y, fs = librosa.load(audio)
frequency, density = signal.welch(
x=y, fs=fs, window="hann", nperseg=hop_length, nfft=n_fft
)
return frequency, density | Extract Long-Term Average Spectrum for a given audio. |
17,599 | import torch
import librosa
from utils.util import JsonHParams
from utils.f0 import get_f0_features_using_parselmouth, get_pitch_sub_median
from utils.mel import extract_mel_features
class JsonHParams:
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = JsonHParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def get_f0_features_using_parselmouth(audio, cfg, speed=1):
"""Using parselmouth to extract the f0 feature.
Args:
audio
mel_len
hop_length
fs
f0_min
f0_max
speed(default=1)
Returns:
f0: numpy array of shape (frame_len,)
pitch_coarse: numpy array of shape (frame_len,)
"""
hop_size = int(np.round(cfg.hop_size * speed))
# Calculate the time step for pitch extraction
time_step = hop_size / cfg.sample_rate * 1000
f0 = (
parselmouth.Sound(audio, cfg.sample_rate)
.to_pitch_ac(
time_step=time_step / 1000,
voicing_threshold=0.6,
pitch_floor=cfg.f0_min,
pitch_ceiling=cfg.f0_max,
)
.selected_array["frequency"]
)
return f0
def extract_mel_features(
y,
cfg,
center=False,
):
"""Extract mel features
Args:
y (tensor): audio data in tensor
cfg (dict): configuration in cfg.preprocess
center (bool, optional): In STFT, whether t-th frame is centered at time t*hop_length. Defaults to False.
Returns:
tensor: a tensor containing the mel feature calculated based on STFT result
"""
if torch.min(y) < -1.0:
print("min value is ", torch.min(y))
if torch.max(y) > 1.0:
print("max value is ", torch.max(y))
global mel_basis, hann_window
if cfg.fmax not in mel_basis:
mel = librosa_mel_fn(
sr=cfg.sample_rate,
n_fft=cfg.n_fft,
n_mels=cfg.n_mel,
fmin=cfg.fmin,
fmax=cfg.fmax,
)
mel_basis[str(cfg.fmax) + "_" + str(y.device)] = (
torch.from_numpy(mel).float().to(y.device)
)
hann_window[str(y.device)] = torch.hann_window(cfg.win_size).to(y.device)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(int((cfg.n_fft - cfg.hop_size) / 2), int((cfg.n_fft - cfg.hop_size) / 2)),
mode="reflect",
)
y = y.squeeze(1)
# complex tensor as default, then use view_as_real for future pytorch compatibility
spec = torch.stft(
y,
cfg.n_fft,
hop_length=cfg.hop_size,
win_length=cfg.win_size,
window=hann_window[str(y.device)],
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
spec = torch.matmul(mel_basis[str(cfg.fmax) + "_" + str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec.squeeze(0)
The provided code snippet includes necessary dependencies for implementing the `extract_spr` function. Write a Python function `def extract_spr( audio, fs=None, hop_length=256, win_length=1024, n_fft=1024, n_mels=128, f0_min=37, f0_max=1000, pitch_bin=256, pitch_max=1100.0, pitch_min=50.0, )` to solve the following problem:
Compute Singing Power Ratio (SPR) from a given audio. audio: path to the audio. fs: sampling rate. hop_length: hop length. win_length: window length. n_mels: number of mel filters. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization.
Here is the function:
def extract_spr(
audio,
fs=None,
hop_length=256,
win_length=1024,
n_fft=1024,
n_mels=128,
f0_min=37,
f0_max=1000,
pitch_bin=256,
pitch_max=1100.0,
pitch_min=50.0,
):
"""Compute Singing Power Ratio (SPR) from a given audio.
audio: path to the audio.
fs: sampling rate.
hop_length: hop length.
win_length: window length.
n_mels: number of mel filters.
f0_min: lower limit for f0.
f0_max: upper limit for f0.
pitch_bin: number of bins for f0 quantization.
pitch_max: upper limit for f0 quantization.
pitch_min: lower limit for f0 quantization.
"""
# Load audio
if fs != None:
audio, _ = librosa.load(audio, sr=fs)
else:
audio, fs = librosa.load(audio)
audio = torch.from_numpy(audio)
# Initialize config
cfg = JsonHParams()
cfg.sample_rate = fs
cfg.hop_size = hop_length
cfg.win_size = win_length
cfg.n_fft = n_fft
cfg.n_mel = n_mels
cfg.f0_min = f0_min
cfg.f0_max = f0_max
cfg.pitch_bin = pitch_bin
cfg.pitch_max = pitch_max
cfg.pitch_min = pitch_min
# Extract mel spectrograms
cfg.fmin = 2000
cfg.fmax = 4000
mel1 = extract_mel_features(
y=audio.unsqueeze(0),
cfg=cfg,
).squeeze(0)
cfg.fmin = 0
cfg.fmax = 2000
mel2 = extract_mel_features(
y=audio.unsqueeze(0),
cfg=cfg,
).squeeze(0)
f0 = get_f0_features_using_parselmouth(
audio,
cfg,
)
# Mel length alignment
length = min(len(f0), mel1.shape[-1])
f0 = f0[:length]
mel1 = mel1[:, :length]
mel2 = mel2[:, :length]
# Compute SPR
res = []
for i in range(mel1.shape[-1]):
if f0[i] <= 1:
continue
chunk1 = mel1[:, i]
chunk2 = mel2[:, i]
max1 = max(chunk1.numpy())
max2 = max(chunk2.numpy())
tmp_res = max2 - max1
res.append(tmp_res)
if len(res) == 0:
return False
else:
return sum(res) / len(res) | Compute Singing Power Ratio (SPR) from a given audio. audio: path to the audio. fs: sampling rate. hop_length: hop length. win_length: window length. n_mels: number of mel filters. f0_min: lower limit for f0. f0_max: upper limit for f0. pitch_bin: number of bins for f0 quantization. pitch_max: upper limit for f0 quantization. pitch_min: lower limit for f0 quantization. |
17,600 | import os
from tqdm import tqdm
from text.g2p_module import G2PModule, LexiconModule
from text.symbol_table import SymbolTable
class SymbolTable(Generic[Symbol]):
"""SymbolTable that maps symbol IDs, found on the FSA arcs to
actual objects. These objects can be arbitrary Python objects
that can serve as keys in a dictionary (i.e. they need to be
hashable and immutable).
The SymbolTable can only be read to/written from disk if the
symbols are strings.
"""
_id2sym: Dict[int, Symbol] = field(default_factory=dict)
"""Map an integer to a symbol.
"""
_sym2id: Dict[Symbol, int] = field(default_factory=dict)
"""Map a symbol to an integer.
"""
_next_available_id: int = 1
"""A helper internal field that helps adding new symbols
to the table efficiently.
"""
eps: Symbol = "<eps>"
"""Null symbol, always mapped to index 0.
"""
def __post_init__(self):
assert all(self._sym2id[sym] == idx for idx, sym in self._id2sym.items())
assert all(self._id2sym[idx] == sym for sym, idx in self._sym2id.items())
assert 0 not in self._id2sym or self._id2sym[0] == self.eps
self._next_available_id = max(self._id2sym, default=0) + 1
self._id2sym.setdefault(0, self.eps)
self._sym2id.setdefault(self.eps, 0)
def from_str(s: str) -> "SymbolTable":
"""Build a symbol table from a string.
The string consists of lines. Every line has two fields separated
by space(s), tab(s) or both. The first field is the symbol and the
second the integer id of the symbol.
Args:
s:
The input string with the format described above.
Returns:
An instance of :class:`SymbolTable`.
"""
id2sym: Dict[int, str] = dict()
sym2id: Dict[str, int] = dict()
for line in s.split("\n"):
fields = line.split()
if len(fields) == 0:
continue # skip empty lines
assert (
len(fields) == 2
), f"Expect a line with 2 fields. Given: {len(fields)}"
sym, idx = fields[0], int(fields[1])
assert sym not in sym2id, f"Duplicated symbol {sym}"
assert idx not in id2sym, f"Duplicated id {idx}"
id2sym[idx] = sym
sym2id[sym] = idx
eps = id2sym.get(0, "<eps>")
return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=eps)
def from_file(filename: str) -> "SymbolTable":
"""Build a symbol table from file.
Every line in the symbol table file has two fields separated by
space(s), tab(s) or both. The following is an example file:
.. code-block::
<eps> 0
a 1
b 2
c 3
Args:
filename:
Name of the symbol table file. Its format is documented above.
Returns:
An instance of :class:`SymbolTable`.
"""
with open(filename, "r", encoding="utf-8") as f:
return SymbolTable.from_str(f.read().strip())
def to_str(self) -> str:
"""
Returns:
Return a string representation of this object. You can pass
it to the method ``from_str`` to recreate an identical object.
"""
s = ""
for idx, symbol in sorted(self._id2sym.items()):
s += f"{symbol} {idx}\n"
return s
def to_file(self, filename: str):
"""Serialize the SymbolTable to a file.
Every line in the symbol table file has two fields separated by
space(s), tab(s) or both. The following is an example file:
.. code-block::
<eps> 0
a 1
b 2
c 3
Args:
filename:
Name of the symbol table file. Its format is documented above.
"""
with open(filename, "w") as f:
for idx, symbol in sorted(self._id2sym.items()):
print(symbol, idx, file=f)
def add(self, symbol: Symbol, index: Optional[int] = None) -> int:
"""Add a new symbol to the SymbolTable.
Args:
symbol:
The symbol to be added.
index:
Optional int id to which the symbol should be assigned.
If it is not available, a ValueError will be raised.
Returns:
The int id to which the symbol has been assigned.
"""
# Already in the table? Return its ID.
if symbol in self._sym2id:
return self._sym2id[symbol]
# Specific ID not provided - use next available.
if index is None:
index = self._next_available_id
# Specific ID provided but not available.
if index in self._id2sym:
raise ValueError(
f"Cannot assign id '{index}' to '{symbol}' - "
f"already occupied by {self._id2sym[index]}"
)
self._sym2id[symbol] = index
self._id2sym[index] = symbol
# Update next available ID if needed
if self._next_available_id <= index:
self._next_available_id = index + 1
return index
def get(self, k: Union[int, Symbol]) -> Union[Symbol, int]:
"""Get a symbol for an id or get an id for a symbol
Args:
k:
If it is an id, it tries to find the symbol corresponding
to the id; if it is a symbol, it tries to find the id
corresponding to the symbol.
Returns:
An id or a symbol depending on the given `k`.
"""
if isinstance(k, int):
return self._id2sym[k]
else:
return self._sym2id[k]
def merge(self, other: "SymbolTable") -> "SymbolTable":
"""Create a union of two SymbolTables.
Raises an AssertionError if the same IDs are occupied by
different symbols.
Args:
other:
A symbol table to merge with ``self``.
Returns:
A new symbol table.
"""
self._check_compatible(other)
return SymbolTable(
_id2sym={**self._id2sym, **other._id2sym},
_sym2id={**self._sym2id, **other._sym2id},
eps=self.eps,
)
def _check_compatible(self, other: "SymbolTable") -> None:
# Epsilon compatibility
assert self.eps == other.eps, (
f"Mismatched epsilon symbol: " f"{self.eps} != {other.eps}"
)
# IDs compatibility
common_ids = set(self._id2sym).intersection(other._id2sym)
for idx in common_ids:
assert self[idx] == other[idx], (
f"ID conflict for id: {idx}, "
f'self[idx] = "{self[idx]}", '
f'other[idx] = "{other[idx]}"'
)
# Symbols compatibility
common_symbols = set(self._sym2id).intersection(other._sym2id)
for sym in common_symbols:
assert self[sym] == other[sym], (
f"ID conflict for id: {sym}, "
f'self[sym] = "{self[sym]}", '
f'other[sym] = "{other[sym]}"'
)
def __getitem__(self, item: Union[int, Symbol]) -> Union[Symbol, int]:
return self.get(item)
def __contains__(self, item: Union[int, Symbol]) -> bool:
if isinstance(item, int):
return item in self._id2sym
else:
return item in self._sym2id
def __len__(self) -> int:
return len(self._id2sym)
def __eq__(self, other: "SymbolTable") -> bool:
if len(self) != len(other):
return False
for s in self.symbols:
if self[s] != other[s]:
return False
return True
def ids(self) -> List[int]:
"""Returns a list of integer IDs corresponding to the symbols."""
ans = list(self._id2sym.keys())
ans.sort()
return ans
def symbols(self) -> List[Symbol]:
"""Returns a list of symbols (e.g., strings) corresponding to
the integer IDs.
"""
ans = list(self._sym2id.keys())
ans.sort()
return ans
def save_all_dataset_phone_symbols_to_table(self, cfg, dataset):
# phone symbols dict
phone_symbols = set()
for dataset_name in dataset:
phone_symbols_file = os.path.join(
cfg.preprocess.processed_dir, dataset_name, cfg.preprocess.symbols_dict
)
# load and merge saved phone symbols
assert os.path.exists(phone_symbols_file)
phone_symbol_dict_saved = SymbolTable.from_file(
phone_symbols_file
)._sym2id.keys()
phone_symbols.update(set(phone_symbol_dict_saved))
# save all phone symbols to each dataset
phone_symbol_dict = SymbolTable()
for s in sorted(list(phone_symbols)):
phone_symbol_dict.add(s)
for dataset_name in dataset:
phone_symbols_file = os.path.join(
cfg.preprocess.processed_dir, dataset_name, cfg.preprocess.symbols_dict
)
phone_symbol_dict.to_file(phone_symbols_file) | null |
17,601 | import os
import torch
import numpy as np
import json
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from utils.io import save_feature, save_txt, save_torch_audio
from utils.util import has_existed
from utils.tokenizer import extract_encodec_token
from utils.stft import TacotronSTFT
from utils.dsp import compress, audio_to_label
from utils.data_utils import remove_outlier
from preprocessors.metadata import replace_augment_name
from scipy.interpolate import interp1d
from utils.mel import (
extract_mel_features,
extract_linear_features,
extract_mel_features_tts,
)
def extract_utt_acoustic_features_tts(dataset_output, cfg, utt):
"""Extract acoustic features from utterances (in single process)
Args:
dataset_output (str): directory to store acoustic features
cfg (dict): dictionary that stores configurations
utt (dict): utterance info including dataset, singer, uid:{singer}_{song}_{index},
path to utternace, duration, utternace index
"""
from utils import audio, f0, world, duration
uid = utt["Uid"]
wav_path = utt["Path"]
if os.path.exists(os.path.join(dataset_output, cfg.preprocess.raw_data)):
wav_path = os.path.join(
dataset_output, cfg.preprocess.raw_data, utt["Singer"], uid + ".wav"
)
if not os.path.exists(wav_path):
wav_path = os.path.join(
dataset_output, cfg.preprocess.raw_data, utt["Singer"], uid + ".flac"
)
assert os.path.exists(wav_path)
with torch.no_grad():
# Load audio data into tensor with sample rate of the config file
wav_torch, _ = audio.load_audio_torch(wav_path, cfg.preprocess.sample_rate)
wav = wav_torch.cpu().numpy()
# extract features
if cfg.preprocess.extract_duration:
durations, phones, start, end = duration.get_duration(
utt, wav, cfg.preprocess
)
save_feature(dataset_output, cfg.preprocess.duration_dir, uid, durations)
save_txt(dataset_output, cfg.preprocess.lab_dir, uid, phones)
wav = wav[start:end].astype(np.float32)
wav_torch = torch.from_numpy(wav).to(wav_torch.device)
if cfg.preprocess.extract_linear_spec:
from utils.mel import extract_linear_features
linear = extract_linear_features(wav_torch.unsqueeze(0), cfg.preprocess)
save_feature(
dataset_output, cfg.preprocess.linear_dir, uid, linear.cpu().numpy()
)
if cfg.preprocess.extract_mel:
from utils.mel import extract_mel_features
if cfg.preprocess.mel_extract_mode == "taco":
_stft = TacotronSTFT(
sampling_rate=cfg.preprocess.sample_rate,
win_length=cfg.preprocess.win_size,
hop_length=cfg.preprocess.hop_size,
filter_length=cfg.preprocess.n_fft,
n_mel_channels=cfg.preprocess.n_mel,
mel_fmin=cfg.preprocess.fmin,
mel_fmax=cfg.preprocess.fmax,
)
mel = extract_mel_features_tts(
wav_torch.unsqueeze(0), cfg.preprocess, taco=True, _stft=_stft
)
if cfg.preprocess.extract_duration:
mel = mel[:, : sum(durations)]
else:
mel = extract_mel_features(wav_torch.unsqueeze(0), cfg.preprocess)
save_feature(dataset_output, cfg.preprocess.mel_dir, uid, mel.cpu().numpy())
if cfg.preprocess.extract_energy:
if (
cfg.preprocess.energy_extract_mode == "from_mel"
and cfg.preprocess.extract_mel
):
energy = (mel.exp() ** 2).sum(0).sqrt().cpu().numpy()
elif cfg.preprocess.energy_extract_mode == "from_waveform":
energy = audio.energy(wav, cfg.preprocess)
elif cfg.preprocess.energy_extract_mode == "from_tacotron_stft":
_stft = TacotronSTFT(
sampling_rate=cfg.preprocess.sample_rate,
win_length=cfg.preprocess.win_size,
hop_length=cfg.preprocess.hop_size,
filter_length=cfg.preprocess.n_fft,
n_mel_channels=cfg.preprocess.n_mel,
mel_fmin=cfg.preprocess.fmin,
mel_fmax=cfg.preprocess.fmax,
)
_, energy = audio.get_energy_from_tacotron(wav, _stft)
else:
assert cfg.preprocess.energy_extract_mode in [
"from_mel",
"from_waveform",
"from_tacotron_stft",
], f"{cfg.preprocess.energy_extract_mode} not in supported energy_extract_mode [from_mel, from_waveform, from_tacotron_stft]"
if cfg.preprocess.extract_duration:
energy = energy[: sum(durations)]
phone_energy = avg_phone_feature(energy, durations)
save_feature(
dataset_output, cfg.preprocess.phone_energy_dir, uid, phone_energy
)
save_feature(dataset_output, cfg.preprocess.energy_dir, uid, energy)
if cfg.preprocess.extract_pitch:
pitch = f0.get_f0(wav, cfg.preprocess)
if cfg.preprocess.extract_duration:
pitch = pitch[: sum(durations)]
phone_pitch = avg_phone_feature(pitch, durations, interpolation=True)
save_feature(
dataset_output, cfg.preprocess.phone_pitch_dir, uid, phone_pitch
)
save_feature(dataset_output, cfg.preprocess.pitch_dir, uid, pitch)
if cfg.preprocess.extract_uv:
assert isinstance(pitch, np.ndarray)
uv = pitch != 0
save_feature(dataset_output, cfg.preprocess.uv_dir, uid, uv)
if cfg.preprocess.extract_audio:
save_torch_audio(
dataset_output,
cfg.preprocess.audio_dir,
uid,
wav_torch,
cfg.preprocess.sample_rate,
)
if cfg.preprocess.extract_label:
if cfg.preprocess.is_mu_law:
# compress audio
wav = compress(wav, cfg.preprocess.bits)
label = audio_to_label(wav, cfg.preprocess.bits)
save_feature(dataset_output, cfg.preprocess.label_dir, uid, label)
if cfg.preprocess.extract_acoustic_token:
if cfg.preprocess.acoustic_token_extractor == "Encodec":
codes = extract_encodec_token(wav_path)
save_feature(
dataset_output, cfg.preprocess.acoustic_token_dir, uid, codes
)
def extract_utt_acoustic_features_svc(dataset_output, cfg, utt):
__extract_utt_acoustic_features(dataset_output, cfg, utt)
def extract_utt_acoustic_features_tta(dataset_output, cfg, utt):
__extract_utt_acoustic_features(dataset_output, cfg, utt)
def extract_utt_acoustic_features_vocoder(dataset_output, cfg, utt):
"""Extract acoustic features from utterances (in single process)
Args:
dataset_output (str): directory to store acoustic features
cfg (dict): dictionary that stores configurations
utt (dict): utterance info including dataset, singer, uid:{singer}_{song}_{index},
path to utternace, duration, utternace index
"""
from utils import audio, f0, world, duration
uid = utt["Uid"]
wav_path = utt["Path"]
with torch.no_grad():
# Load audio data into tensor with sample rate of the config file
wav_torch, _ = audio.load_audio_torch(wav_path, cfg.preprocess.sample_rate)
wav = wav_torch.cpu().numpy()
# extract features
if cfg.preprocess.extract_mel:
from utils.mel import extract_mel_features
mel = extract_mel_features(wav_torch.unsqueeze(0), cfg.preprocess)
save_feature(dataset_output, cfg.preprocess.mel_dir, uid, mel.cpu().numpy())
if cfg.preprocess.extract_energy:
if (
cfg.preprocess.energy_extract_mode == "from_mel"
and cfg.preprocess.extract_mel
):
energy = (mel.exp() ** 2).sum(0).sqrt().cpu().numpy()
elif cfg.preprocess.energy_extract_mode == "from_waveform":
energy = audio.energy(wav, cfg.preprocess)
else:
assert cfg.preprocess.energy_extract_mode in [
"from_mel",
"from_waveform",
], f"{cfg.preprocess.energy_extract_mode} not in supported energy_extract_mode [from_mel, from_waveform, from_tacotron_stft]"
save_feature(dataset_output, cfg.preprocess.energy_dir, uid, energy)
if cfg.preprocess.extract_pitch:
pitch = f0.get_f0(wav, cfg.preprocess)
save_feature(dataset_output, cfg.preprocess.pitch_dir, uid, pitch)
if cfg.preprocess.extract_uv:
assert isinstance(pitch, np.ndarray)
uv = pitch != 0
save_feature(dataset_output, cfg.preprocess.uv_dir, uid, uv)
if cfg.preprocess.extract_amplitude_phase:
from utils.mel import amplitude_phase_spectrum
log_amplitude, phase, real, imaginary = amplitude_phase_spectrum(
wav_torch.unsqueeze(0), cfg.preprocess
)
save_feature(
dataset_output, cfg.preprocess.log_amplitude_dir, uid, log_amplitude
)
save_feature(dataset_output, cfg.preprocess.phase_dir, uid, phase)
save_feature(dataset_output, cfg.preprocess.real_dir, uid, real)
save_feature(dataset_output, cfg.preprocess.imaginary_dir, uid, imaginary)
if cfg.preprocess.extract_audio:
save_feature(dataset_output, cfg.preprocess.audio_dir, uid, wav)
if cfg.preprocess.extract_label:
if cfg.preprocess.is_mu_law:
# compress audio
wav = compress(wav, cfg.preprocess.bits)
label = audio_to_label(wav, cfg.preprocess.bits)
save_feature(dataset_output, cfg.preprocess.label_dir, uid, label)
The provided code snippet includes necessary dependencies for implementing the `extract_utt_acoustic_features_parallel` function. Write a Python function `def extract_utt_acoustic_features_parallel(metadata, dataset_output, cfg, n_workers=1)` to solve the following problem:
Extract acoustic features from utterances using muliprocess Args: metadata (dict): dictionary that stores data in train.json and test.json files dataset_output (str): directory to store acoustic features cfg (dict): dictionary that stores configurations n_workers (int, optional): num of processes to extract features in parallel. Defaults to 1. Returns: list: acoustic features
Here is the function:
def extract_utt_acoustic_features_parallel(metadata, dataset_output, cfg, n_workers=1):
"""Extract acoustic features from utterances using muliprocess
Args:
metadata (dict): dictionary that stores data in train.json and test.json files
dataset_output (str): directory to store acoustic features
cfg (dict): dictionary that stores configurations
n_workers (int, optional): num of processes to extract features in parallel. Defaults to 1.
Returns:
list: acoustic features
"""
for utt in tqdm(metadata):
if cfg.task_type == "tts":
extract_utt_acoustic_features_tts(dataset_output, cfg, utt)
if cfg.task_type == "svc":
extract_utt_acoustic_features_svc(dataset_output, cfg, utt)
if cfg.task_type == "vocoder":
extract_utt_acoustic_features_vocoder(dataset_output, cfg, utt)
if cfg.task_type == "tta":
extract_utt_acoustic_features_tta(dataset_output, cfg, utt) | Extract acoustic features from utterances using muliprocess Args: metadata (dict): dictionary that stores data in train.json and test.json files dataset_output (str): directory to store acoustic features cfg (dict): dictionary that stores configurations n_workers (int, optional): num of processes to extract features in parallel. Defaults to 1. Returns: list: acoustic features |
17,602 | import os
import torch
import numpy as np
import json
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from utils.io import save_feature, save_txt, save_torch_audio
from utils.util import has_existed
from utils.tokenizer import extract_encodec_token
from utils.stft import TacotronSTFT
from utils.dsp import compress, audio_to_label
from utils.data_utils import remove_outlier
from preprocessors.metadata import replace_augment_name
from scipy.interpolate import interp1d
from utils.mel import (
extract_mel_features,
extract_linear_features,
extract_mel_features_tts,
)
def load_mel_extrema(cfg, dataset_name):
data_dir = os.path.join(cfg.processed_dir, dataset_name, cfg.mel_min_max_stats_dir)
min_file = os.path.join(data_dir, "mel_min.npy")
max_file = os.path.join(data_dir, "mel_max.npy")
mel_min = np.load(min_file)
mel_max = np.load(max_file)
return mel_min, mel_max
def normalize_mel_channel(mel, mel_min, mel_max):
"""
mel: (n_mels, T)
mel_min, mel_max: (n_mels)
"""
mel_min = np.expand_dims(mel_min, -1)
mel_max = np.expand_dims(mel_max, -1)
return (mel - mel_min) / (mel_max - mel_min + ZERO) * 2 - 1
The provided code snippet includes necessary dependencies for implementing the `cal_normalized_mel` function. Write a Python function `def cal_normalized_mel(mel, dataset_name, cfg)` to solve the following problem:
mel: (n_mels, T)
Here is the function:
def cal_normalized_mel(mel, dataset_name, cfg):
"""
mel: (n_mels, T)
"""
# mel_min, mel_max: (n_mels)
mel_min, mel_max = load_mel_extrema(cfg, dataset_name)
mel_norm = normalize_mel_channel(mel, mel_min, mel_max)
return mel_norm | mel: (n_mels, T) |
17,603 | import os
import torch
import numpy as np
import json
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from utils.io import save_feature, save_txt, save_torch_audio
from utils.util import has_existed
from utils.tokenizer import extract_encodec_token
from utils.stft import TacotronSTFT
from utils.dsp import compress, audio_to_label
from utils.data_utils import remove_outlier
from preprocessors.metadata import replace_augment_name
from scipy.interpolate import interp1d
from utils.mel import (
extract_mel_features,
extract_linear_features,
extract_mel_features_tts,
)
def load_mel_extrema(cfg, dataset_name):
data_dir = os.path.join(cfg.processed_dir, dataset_name, cfg.mel_min_max_stats_dir)
min_file = os.path.join(data_dir, "mel_min.npy")
max_file = os.path.join(data_dir, "mel_max.npy")
mel_min = np.load(min_file)
mel_max = np.load(max_file)
return mel_min, mel_max
def denormalize_mel_channel(mel, mel_min, mel_max):
mel_min = np.expand_dims(mel_min, -1)
mel_max = np.expand_dims(mel_max, -1)
return (mel + 1) / 2 * (mel_max - mel_min + ZERO) + mel_min
The provided code snippet includes necessary dependencies for implementing the `denorm_for_pred_mels` function. Write a Python function `def denorm_for_pred_mels(cfg, dataset_name, split, pred)` to solve the following problem:
Args: pred: a list whose every element is (frame_len, n_mels) Return: similar like pred
Here is the function:
def denorm_for_pred_mels(cfg, dataset_name, split, pred):
"""
Args:
pred: a list whose every element is (frame_len, n_mels)
Return:
similar like pred
"""
mel_min, mel_max = load_mel_extrema(cfg.preprocess, dataset_name)
recovered_mels = [
denormalize_mel_channel(mel.T, mel_min, mel_max).T for mel in pred
]
return recovered_mels | Args: pred: a list whose every element is (frame_len, n_mels) Return: similar like pred |
17,604 | import os
import torch
import numpy as np
import json
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from utils.io import save_feature, save_txt, save_torch_audio
from utils.util import has_existed
from utils.tokenizer import extract_encodec_token
from utils.stft import TacotronSTFT
from utils.dsp import compress, audio_to_label
from utils.data_utils import remove_outlier
from preprocessors.metadata import replace_augment_name
from scipy.interpolate import interp1d
from utils.mel import (
extract_mel_features,
extract_linear_features,
extract_mel_features_tts,
)
def load_normalized(feat_dir, dataset_name, cfg):
dataset_output = os.path.join(cfg.preprocess.processed_dir, dataset_name)
stat_npy = os.path.join(dataset_output, f"{feat_dir}_stat.npy")
min_value, max_value, mean, std = np.load(stat_npy)
return mean, std, min_value, max_value | null |
17,605 | import torch
from torch.optim import Optimizer
from typing import List, Optional, Tuple, Union
def calc_lr(step, dim_embed, warmup_steps):
return dim_embed ** (-0.5) * min(step ** (-0.5), step * warmup_steps ** (-1.5)) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.