id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
181,949
import math import os import re import subprocess from contextlib import redirect_stdout from fairseq import options from fairseq_cli import eval_lm, preprocess def write_reprocessed( sources, hypos, targets, source_outfile, hypo_outfile, target_outfile, right_to_left=False, prefix_len=None, bpe_symbol=None, target_prefix_frac=None, source_prefix_frac=None, ): def eval_lm( models: List[fairseq.models.FairseqModel], source_dictionary: fairseq.data.Dictionary, batch_iterator: Iterable, post_process: Optional[str] = None, output_word_probs: bool = False, output_word_stats: bool = False, target_dictionary: Optional[fairseq.data.Dictionary] = None, softmax_batch: int = 0, remove_bos_token: bool = False, device: Optional[torch.device] = None, ): def lm_scoring( preprocess_directory, bpe_status, gen_output, pre_gen, cur_lm_dict, cur_lm_name, cur_language_model, cur_lm_bpe_code, batch_size, lm_score_file, target_lang, source_lang, prefix_len=None, ): if prefix_len is not None: assert ( bpe_status == "different" ), "bpe status must be different to use prefix len" if bpe_status == "no bpe": # run lm on output without bpe write_reprocessed( gen_output.no_bpe_source, gen_output.no_bpe_hypo, gen_output.no_bpe_target, pre_gen + "/rescore_data_no_bpe.de", pre_gen + "/rescore_data_no_bpe.en", pre_gen + "/reference_file_no_bpe", ) preprocess_lm_param = [ "--only-source", "--trainpref", pre_gen + "/rescore_data_no_bpe." + target_lang, "--srcdict", cur_lm_dict, "--destdir", preprocess_directory, ] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_lm_param) preprocess.main(input_args) eval_lm_param = [ preprocess_directory, "--path", cur_language_model, "--output-word-probs", "--batch-size", str(batch_size), "--max-tokens", "1024", "--sample-break-mode", "eos", "--gen-subset", "train", ] eval_lm_parser = options.get_eval_lm_parser() input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param) with open(lm_score_file, "w") as f: with redirect_stdout(f): eval_lm.main(input_args) elif bpe_status == "shared": preprocess_lm_param = [ "--only-source", "--trainpref", pre_gen + "/rescore_data." + target_lang, "--srcdict", cur_lm_dict, "--destdir", preprocess_directory, ] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_lm_param) preprocess.main(input_args) eval_lm_param = [ preprocess_directory, "--path", cur_language_model, "--output-word-probs", "--batch-size", str(batch_size), "--sample-break-mode", "eos", "--gen-subset", "train", ] eval_lm_parser = options.get_eval_lm_parser() input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param) with open(lm_score_file, "w") as f: with redirect_stdout(f): eval_lm.main(input_args) elif bpe_status == "different": rescore_file = pre_gen + "/rescore_data_no_bpe" rescore_bpe = pre_gen + "/rescore_data_new_bpe" rescore_file += "." rescore_bpe += "." write_reprocessed( gen_output.no_bpe_source, gen_output.no_bpe_hypo, gen_output.no_bpe_target, rescore_file + source_lang, rescore_file + target_lang, pre_gen + "/reference_file_no_bpe", bpe_symbol=None, ) # apply LM bpe to nbest list bpe_src_param = [ "-c", cur_lm_bpe_code, "--input", rescore_file + target_lang, "--output", rescore_bpe + target_lang, ] subprocess.call( [ "python", os.path.join( os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py" ), ] + bpe_src_param, shell=False, ) # uncomment to use fastbpe instead of subword-nmt bpe # bpe_src_param = [rescore_bpe+target_lang, rescore_file+target_lang, cur_lm_bpe_code] # subprocess.call(["/private/home/edunov/fastBPE/fast", "applybpe"] + bpe_src_param, shell=False) preprocess_dir = preprocess_directory preprocess_lm_param = [ "--only-source", "--trainpref", rescore_bpe + target_lang, "--srcdict", cur_lm_dict, "--destdir", preprocess_dir, ] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_lm_param) preprocess.main(input_args) eval_lm_param = [ preprocess_dir, "--path", cur_language_model, "--output-word-probs", "--batch-size", str(batch_size), "--max-tokens", "1024", "--sample-break-mode", "eos", "--gen-subset", "train", ] eval_lm_parser = options.get_eval_lm_parser() input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param) with open(lm_score_file, "w") as f: with redirect_stdout(f): eval_lm.main(input_args)
null
181,961
from typing import Dict, List, NamedTuple, Optional import torch import torch.nn as nn from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer, ) from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, tiny_architecture ) from torch import Tensor def base_monotonic_architecture(args): base_architecture(args) args.encoder_unidirectional = getattr(args, "encoder_unidirectional", False) "transformer_monotonic", "transformer_monotonic_iwslt_de_en" def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) def transformer_monotonic_iwslt_de_en(args): transformer_iwslt_de_en(args) base_monotonic_architecture(args)
null
181,969
import torch def prob_check(tensor, eps=1e-10): assert not torch.isnan(tensor).any(), ( "Nan in a probability tensor." ) # Add the eps here to prevent errors introduced by precision assert tensor.le(1.0 + eps).all() and tensor.ge(0.0 - eps).all(), ( "Incorrect values in a probability tensor" ", 0.0 <= tensor <= 1.0" )
null
181,971
import torch The provided code snippet includes necessary dependencies for implementing the `moving_sum` function. Write a Python function `def moving_sum(x, start_idx: int, end_idx: int)` to solve the following problem: From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] Here is the function: def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ # TODO: Make dimension configurable assert start_idx > 0 and end_idx > 0 batch_size, tgt_len, src_len = x.size() x = x.view(-1, src_len).unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = torch.ones([1, 1, end_idx + start_idx - 1]).type_as(x) moving_sum = torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ).squeeze(1) moving_sum = moving_sum[:, end_idx:-start_idx] assert src_len == moving_sum.size(1) assert batch_size * tgt_len == moving_sum.size(0) moving_sum = moving_sum.view(batch_size, tgt_len, src_len) return moving_sum
From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]]
181,977
import logging import os import sys import numpy as np from sklearn.cluster import MiniBatchKMeans import joblib logger = logging.getLogger("learn_kmeans") def get_km_model( n_clusters, init, max_iter, batch_size, tol, max_no_improvement, n_init, reassignment_ratio, ): def load_feature(feat_dir, split, nshard, seed, percent): def learn_kmeans( feat_dir, split, nshard, km_path, n_clusters, seed, percent, init, max_iter, batch_size, tol, n_init, reassignment_ratio, max_no_improvement, ): np.random.seed(seed) feat = load_feature(feat_dir, split, nshard, seed, percent) km_model = get_km_model( n_clusters, init, max_iter, batch_size, tol, max_no_improvement, n_init, reassignment_ratio, ) km_model.fit(feat) joblib.dump(km_model, km_path) inertia = -km_model.score(feat) / len(feat) logger.info("total intertia: %.5f", inertia) logger.info("finished successfully")
null
181,987
import numpy as np import torch from scipy.interpolate import interp1d import torchaudio from fairseq.tasks.text_to_speech import ( batch_compute_distortion, compute_rms_dist ) def compute_rms_dist(x1, x2): l2_dist = compute_l2_dist(x1, x2) return (l2_dist / x1.size(1)).pow(0.5) def batch_compute_distortion(y1, y2, sr, feat_fn, dist_fn, normalize_type): d, s, x1, x2 = [], [], [], [] for cur_y1, cur_y2 in zip(y1, y2): assert (cur_y1.ndim == 1 and cur_y2.ndim == 1) cur_x1 = feat_fn(cur_y1) cur_x2 = feat_fn(cur_y2) x1.append(cur_x1) x2.append(cur_x2) cur_d = dist_fn(cur_x1, cur_x2) d.append(cur_d) s.append(d[-1].size()) max_m = max(ss[0] for ss in s) max_n = max(ss[1] for ss in s) d = torch.stack( [F.pad(dd, (0, max_n - dd.size(1), 0, max_m - dd.size(0))) for dd in d] ) s = torch.LongTensor(s).to(d.device) cumdists, backptrs, pathmaps = batch_dynamic_time_warping(d, s) rets = [] itr = zip(s, x1, x2, d, cumdists, backptrs, pathmaps) for (m, n), cur_x1, cur_x2, dist, cumdist, backptr, pathmap in itr: cumdist = cumdist[:m, :n] backptr = backptr[:m, :n] pathmap = pathmap[:m, :n] divisor = get_divisor(pathmap, normalize_type) distortion = cumdist[-1, -1] / divisor ret = distortion, (cur_x1, cur_x2, dist, cumdist, backptr, pathmap) rets.append(ret) return rets The provided code snippet includes necessary dependencies for implementing the `batch_mel_spectral_distortion` function. Write a Python function `def batch_mel_spectral_distortion( y1, y2, sr, normalize_type="path", mel_fn=None )` to solve the following problem: https://arxiv.org/pdf/2011.03568.pdf Same as Mel Cepstral Distortion, but computed on log-mel spectrograms. Here is the function: def batch_mel_spectral_distortion( y1, y2, sr, normalize_type="path", mel_fn=None ): """ https://arxiv.org/pdf/2011.03568.pdf Same as Mel Cepstral Distortion, but computed on log-mel spectrograms. """ if mel_fn is None or mel_fn.sample_rate != sr: mel_fn = torchaudio.transforms.MelSpectrogram( sr, n_fft=int(0.05 * sr), win_length=int(0.05 * sr), hop_length=int(0.0125 * sr), f_min=20, n_mels=80, window_fn=torch.hann_window ).to(y1[0].device) offset = 1e-6 return batch_compute_distortion( y1, y2, sr, lambda y: torch.log(mel_fn(y) + offset).transpose(-1, -2), compute_rms_dist, normalize_type )
https://arxiv.org/pdf/2011.03568.pdf Same as Mel Cepstral Distortion, but computed on log-mel spectrograms.
181,992
import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale def trim_or_pad_to_target_length( data_1d_or_2d: np.ndarray, target_length: int ) -> np.ndarray: assert len(data_1d_or_2d.shape) in {1, 2} delta = data_1d_or_2d.shape[0] - target_length if delta >= 0: # trim if being longer data_1d_or_2d = data_1d_or_2d[: target_length] else: # pad if being shorter if len(data_1d_or_2d.shape) == 1: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros(-delta)], axis=0 ) else: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros((-delta, data_1d_or_2d.shape[1]))], axis=0 ) return data_1d_or_2d class TTSSpectrogram(torch.nn.Module): def __init__( self, n_fft: int, win_length: int, hop_length: int, window_fn: callable = torch.hann_window, return_phase: bool = False ) -> None: super(TTSSpectrogram, self).__init__() self.n_fft = n_fft self.hop_length = hop_length self.return_phase = return_phase basis = get_fourier_basis(n_fft).unsqueeze(1) basis *= get_window(window_fn, n_fft, win_length) self.register_buffer('basis', basis) def forward( self, waveform: torch.Tensor ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: padding = (self.n_fft // 2, self.n_fft // 2) x = F.pad(waveform.unsqueeze(1), padding, mode='reflect') x = F.conv1d(x, self.basis, stride=self.hop_length) real_part = x[:, :self.n_fft // 2 + 1, :] imag_part = x[:, self.n_fft // 2 + 1:, :] magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) if self.return_phase: phase = torch.atan2(imag_part, real_part) return magnitude, phase return magnitude class TTSMelScale(torch.nn.Module): def __init__( self, n_mels: int, sample_rate: int, f_min: float, f_max: float, n_stft: int ) -> None: super(TTSMelScale, self).__init__() basis = get_mel_filters(sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max) self.register_buffer('basis', basis) def forward(self, specgram: torch.Tensor) -> torch.Tensor: return torch.matmul(self.basis, specgram) def extract_logmel_spectrogram( waveform: torch.Tensor, sample_rate: int, output_path: Optional[Path] = None, win_length: int = 1024, hop_length: int = 256, n_fft: int = 1024, win_fn: callable = torch.hann_window, n_mels: int = 80, f_min: float = 0., f_max: float = 8000, eps: float = 1e-5, overwrite: bool = False, target_length: Optional[int] = None ): if output_path is not None and output_path.is_file() and not overwrite: return spectrogram_transform = TTSSpectrogram( n_fft=n_fft, win_length=win_length, hop_length=hop_length, window_fn=win_fn ) mel_scale_transform = TTSMelScale( n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max, n_stft=n_fft // 2 + 1 ) spectrogram = spectrogram_transform(waveform) mel_spec = mel_scale_transform(spectrogram) logmel_spec = torch.clamp(mel_spec, min=eps).log() assert len(logmel_spec.shape) == 3 and logmel_spec.shape[0] == 1 logmel_spec = logmel_spec.squeeze().t() # D x T -> T x D if target_length is not None: trim_or_pad_to_target_length(logmel_spec, target_length) if output_path is not None: np.save(output_path.as_posix(), logmel_spec) else: return logmel_spec
null
181,993
import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale def trim_or_pad_to_target_length( data_1d_or_2d: np.ndarray, target_length: int ) -> np.ndarray: assert len(data_1d_or_2d.shape) in {1, 2} delta = data_1d_or_2d.shape[0] - target_length if delta >= 0: # trim if being longer data_1d_or_2d = data_1d_or_2d[: target_length] else: # pad if being shorter if len(data_1d_or_2d.shape) == 1: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros(-delta)], axis=0 ) else: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros((-delta, data_1d_or_2d.shape[1]))], axis=0 ) return data_1d_or_2d def extract_pitch( waveform: torch.Tensor, sample_rate: int, output_path: Optional[Path] = None, hop_length: int = 256, log_scale: bool = True, phoneme_durations: Optional[List[int]] = None ): if output_path is not None and output_path.is_file(): return try: import pyworld except ImportError: raise ImportError("Please install PyWORLD: pip install pyworld") _waveform = waveform.squeeze(0).double().numpy() pitch, t = pyworld.dio( _waveform, sample_rate, frame_period=hop_length / sample_rate * 1000 ) pitch = pyworld.stonemask(_waveform, pitch, t, sample_rate) if phoneme_durations is not None: pitch = trim_or_pad_to_target_length(pitch, sum(phoneme_durations)) try: from scipy.interpolate import interp1d except ImportError: raise ImportError("Please install SciPy: pip install scipy") nonzero_ids = np.where(pitch != 0)[0] interp_fn = interp1d( nonzero_ids, pitch[nonzero_ids], fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]), bounds_error=False, ) pitch = interp_fn(np.arange(0, len(pitch))) d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations])) pitch = np.array( [ np.mean(pitch[d_cumsum[i-1]: d_cumsum[i]]) for i in range(1, len(d_cumsum)) ] ) assert len(pitch) == len(phoneme_durations) if log_scale: pitch = np.log(pitch + 1) if output_path is not None: np.save(output_path.as_posix(), pitch) else: return pitch
null
181,994
import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale def trim_or_pad_to_target_length( data_1d_or_2d: np.ndarray, target_length: int ) -> np.ndarray: assert len(data_1d_or_2d.shape) in {1, 2} delta = data_1d_or_2d.shape[0] - target_length if delta >= 0: # trim if being longer data_1d_or_2d = data_1d_or_2d[: target_length] else: # pad if being shorter if len(data_1d_or_2d.shape) == 1: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros(-delta)], axis=0 ) else: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros((-delta, data_1d_or_2d.shape[1]))], axis=0 ) return data_1d_or_2d def extract_energy( waveform: torch.Tensor, output_path: Optional[Path] = None, hop_length: int = 256, n_fft: int = 1024, log_scale: bool = True, phoneme_durations: Optional[List[int]] = None ): if output_path is not None and output_path.is_file(): return assert len(waveform.shape) == 2 and waveform.shape[0] == 1 waveform = waveform.view(1, 1, waveform.shape[1]) waveform = F.pad( waveform.unsqueeze(1), [n_fft // 2, n_fft // 2, 0, 0], mode="reflect" ) waveform = waveform.squeeze(1) fourier_basis = np.fft.fft(np.eye(n_fft)) cutoff = int((n_fft / 2 + 1)) fourier_basis = np.vstack( [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] ) forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) forward_transform = F.conv1d( waveform, forward_basis, stride=hop_length, padding=0 ) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) energy = torch.norm(magnitude, dim=1).squeeze(0).numpy() if phoneme_durations is not None: energy = trim_or_pad_to_target_length(energy, sum(phoneme_durations)) d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations])) energy = np.array( [ np.mean(energy[d_cumsum[i - 1]: d_cumsum[i]]) for i in range(1, len(d_cumsum)) ] ) assert len(energy) == len(phoneme_durations) if log_scale: energy = np.log(energy + 1) if output_path is not None: np.save(output_path.as_posix(), energy) else: return energy
null
181,995
import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale def get_global_cmvn(feature_root: Path, output_path: Optional[Path] = None): mean_x, mean_x2, n_frames = None, None, 0 feature_paths = feature_root.glob("*.npy") for p in tqdm(feature_paths): with open(p, 'rb') as f: frames = np.load(f).squeeze() n_frames += frames.shape[0] cur_mean_x = frames.sum(axis=0) if mean_x is None: mean_x = cur_mean_x else: mean_x += cur_mean_x cur_mean_x2 = (frames ** 2).sum(axis=0) if mean_x2 is None: mean_x2 = cur_mean_x2 else: mean_x2 += cur_mean_x2 mean_x /= n_frames mean_x2 /= n_frames var_x = mean_x2 - mean_x ** 2 std_x = np.sqrt(np.maximum(var_x, 1e-10)) if output_path is not None: with open(output_path, 'wb') as f: np.savez(f, mean=mean_x, std=std_x) else: return {"mean": mean_x, "std": std_x}
null
181,996
import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale def ipa_phonemize(text, lang="en-us", use_g2p=False): if use_g2p: assert lang == "en-us", "g2pE phonemizer only works for en-us" try: from g2p_en import G2p g2p = G2p() return " ".join("|" if p == " " else p for p in g2p(text)) except ImportError: raise ImportError( "Please install phonemizer: pip install g2p_en" ) else: try: from phonemizer import phonemize from phonemizer.separator import Separator return phonemize( text, backend='espeak', language=lang, separator=Separator(word="| ", phone=" ") ) except ImportError: raise ImportError( "Please install phonemizer: pip install phonemizer" )
null
181,997
import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale class ForceAlignmentInfo(object): tokens: List[str] frame_durations: List[int] start_sec: Optional[float] end_sec: Optional[float] def get_mfa_alignment_by_sample_id( textgrid_zip_path: str, sample_id: str, sample_rate: int, hop_length: int, silence_phones: List[str] = ("sil", "sp", "spn") ) -> ForceAlignmentInfo: try: import tgt except ImportError: raise ImportError("Please install TextGridTools: pip install tgt") filename = f"{sample_id}.TextGrid" out_root = Path(tempfile.gettempdir()) tgt_path = out_root / filename with zipfile.ZipFile(textgrid_zip_path) as f_zip: f_zip.extract(filename, path=out_root) textgrid = tgt.io.read_textgrid(tgt_path.as_posix()) os.remove(tgt_path) phones, frame_durations = [], [] start_sec, end_sec, end_idx = 0, 0, 0 for t in textgrid.get_tier_by_name("phones")._objects: s, e, p = t.start_time, t.end_time, t.text # Trim leading silences if len(phones) == 0: if p in silence_phones: continue else: start_sec = s phones.append(p) if p not in silence_phones: end_sec = e end_idx = len(phones) r = sample_rate / hop_length frame_durations.append(int(np.round(e * r) - np.round(s * r))) # Trim tailing silences phones = phones[:end_idx] frame_durations = frame_durations[:end_idx] return ForceAlignmentInfo( tokens=phones, frame_durations=frame_durations, start_sec=start_sec, end_sec=end_sec ) def get_mfa_alignment( textgrid_zip_path: str, sample_ids: List[str], sample_rate: int, hop_length: int ) -> Dict[str, ForceAlignmentInfo]: return { i: get_mfa_alignment_by_sample_id( textgrid_zip_path, i, sample_rate, hop_length ) for i in tqdm(sample_ids) }
null
181,998
import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale class ForceAlignmentInfo(object): tokens: List[str] frame_durations: List[int] start_sec: Optional[float] end_sec: Optional[float] def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]: with open(path, "r") as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) rows = [dict(e) for e in reader] return rows def get_unit_alignment( id_to_unit_tsv_path: str, sample_ids: List[str] ) -> Dict[str, ForceAlignmentInfo]: id_to_units = { e["id"]: e["units"] for e in load_tsv_to_dicts(id_to_unit_tsv_path) } id_to_units = {i: id_to_units[i].split() for i in sample_ids} id_to_units_collapsed = { i: [uu for uu, _ in groupby(u)] for i, u in id_to_units.items() } id_to_durations = { i: [len(list(g)) for _, g in groupby(u)] for i, u in id_to_units.items() } return { i: ForceAlignmentInfo( tokens=id_to_units_collapsed[i], frame_durations=id_to_durations[i], start_sec=None, end_sec=None ) for i in sample_ids }
null
181,999
import logging import matplotlib.pyplot as plt import numpy as np from pathlib import Path import soundfile as sf import sys import torch import torchaudio from fairseq import checkpoint_utils, options, tasks, utils from fairseq.logging import progress_bar from fairseq.tasks.text_to_speech import plot_tts_output from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset class TextToSpeechDataset(SpeechToTextDataset): def __init__( self, split: str, is_train_split: bool, cfg: S2TDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step=1, speaker_to_id=None, durations: Optional[List[List[int]]] = None, pitches: Optional[List[str]] = None, energies: Optional[List[str]] = None ): super(TextToSpeechDataset, self).__init__( split, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id ) self.durations = durations self.pitches = pitches self.energies = energies def __getitem__(self, index: int) -> TextToSpeechDatasetItem: s2t_item = super().__getitem__(index) duration, pitch, energy = None, None, None if self.durations is not None: duration = torch.tensor( self.durations[index] + [0], dtype=torch.long # pad 0 for EOS ) if self.pitches is not None: pitch = get_features_or_waveform(self.pitches[index]) pitch = torch.from_numpy( np.concatenate((pitch, [0])) # pad 0 for EOS ).float() if self.energies is not None: energy = get_features_or_waveform(self.energies[index]) energy = torch.from_numpy( np.concatenate((energy, [0])) # pad 0 for EOS ).float() return TextToSpeechDatasetItem( index=index, source=s2t_item.source, target=s2t_item.target, speaker_id=s2t_item.speaker_id, duration=duration, pitch=pitch, energy=energy ) def collater(self, samples: List[TextToSpeechDatasetItem]) -> Dict[str, Any]: if len(samples) == 0: return {} src_lengths, order = torch.tensor( [s.target.shape[0] for s in samples], dtype=torch.long ).sort(descending=True) id_ = torch.tensor([s.index for s in samples], dtype=torch.long).index_select(0, order) feat = _collate_frames( [s.source for s in samples], self.cfg.use_audio_input ).index_select(0, order) target_lengths = torch.tensor( [s.source.shape[0] for s in samples], dtype=torch.long ).index_select(0, order) src_tokens = fairseq_data_utils.collate_tokens( [s.target for s in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False, ).index_select(0, order) speaker = None if self.speaker_to_id is not None: speaker = torch.tensor( [s.speaker_id for s in samples], dtype=torch.long ).index_select(0, order).view(-1, 1) bsz, _, d = feat.size() prev_output_tokens = torch.cat( (feat.new_zeros((bsz, 1, d)), feat[:, :-1, :]), dim=1 ) durations, pitches, energies = None, None, None if self.durations is not None: durations = fairseq_data_utils.collate_tokens( [s.duration for s in samples], 0 ).index_select(0, order) assert src_tokens.shape[1] == durations.shape[1] if self.pitches is not None: pitches = _collate_frames([s.pitch for s in samples], True) pitches = pitches.index_select(0, order) assert src_tokens.shape[1] == pitches.shape[1] if self.energies is not None: energies = _collate_frames([s.energy for s in samples], True) energies = energies.index_select(0, order) assert src_tokens.shape[1] == energies.shape[1] src_texts = [self.tgt_dict.string(samples[i].target) for i in order] return { "id": id_, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, "prev_output_tokens": prev_output_tokens, }, "speaker": speaker, "target": feat, "durations": durations, "pitches": pitches, "energies": energies, "target_lengths": target_lengths, "ntokens": sum(target_lengths).item(), "nsentences": len(samples), "src_texts": src_texts, } def postprocess_results( dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target ): def to_np(x): return None if x is None else x.detach().cpu().numpy() sample_ids = [dataset.ids[i] for i in sample["id"].tolist()] texts = sample["src_texts"] attns = [to_np(hypo["attn"]) for hypo in hypos] eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos] feat_preds = [to_np(hypo["feature"]) for hypo in hypos] wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos] if dump_target: feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos] wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos] else: feat_targs = [None for _ in hypos] wave_targs = [None for _ in hypos] return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds, feat_targs, wave_targs)
null
182,000
import logging import matplotlib.pyplot as plt import numpy as np from pathlib import Path import soundfile as sf import sys import torch import torchaudio from fairseq import checkpoint_utils, options, tasks, utils from fairseq.logging import progress_bar from fairseq.tasks.text_to_speech import plot_tts_output from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset def plot_tts_output( data_2d, title_2d, data_1d, title_1d, figsize=(24, 4), v_min=DEFAULT_V_MIN, v_max=3, ret_np=False, suptitle="" ): try: import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable except ImportError: raise ImportError("Please install Matplotlib: pip install matplotlib") data_2d = [ x.detach().cpu().float().numpy() if isinstance(x, torch.Tensor) else x for x in data_2d ] fig, axes = plt.subplots(1, len(data_2d) + 1, figsize=figsize) if suptitle: fig.suptitle(suptitle[:400]) # capped at 400 chars axes = [axes] if len(data_2d) == 0 else axes for ax, x, name in zip(axes, data_2d, title_2d): ax.set_title(name) divider = make_axes_locatable(ax) cax = divider.append_axes('right', size='5%', pad=0.05) im = ax.imshow( x, origin="lower", aspect="auto", vmin=max(x.min(), v_min), vmax=min(x.max(), v_max) ) fig.colorbar(im, cax=cax, orientation='vertical') if isinstance(data_1d, torch.Tensor): data_1d = data_1d.detach().cpu().numpy() axes[-1].plot(data_1d) axes[-1].set_title(title_1d) plt.tight_layout() if ret_np: fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close(fig) return data def dump_result( is_na_model, args, vocoder, sample_id, text, attn, eos_prob, feat_pred, wave_pred, feat_targ, wave_targ, ): sample_rate = args.output_sample_rate out_root = Path(args.results_path) if args.dump_features: feat_dir = out_root / "feat" feat_dir.mkdir(exist_ok=True, parents=True) np.save(feat_dir / f"{sample_id}.npy", feat_pred) if args.dump_target: feat_tgt_dir = out_root / "feat_tgt" feat_tgt_dir.mkdir(exist_ok=True, parents=True) np.save(feat_tgt_dir / f"{sample_id}.npy", feat_targ) if args.dump_attentions: attn_dir = out_root / "attn" attn_dir.mkdir(exist_ok=True, parents=True) np.save(attn_dir / f"{sample_id}.npy", attn.numpy()) if args.dump_eos_probs and not is_na_model: eos_dir = out_root / "eos" eos_dir.mkdir(exist_ok=True, parents=True) np.save(eos_dir / f"{sample_id}.npy", eos_prob) if args.dump_plots: images = [feat_pred.T] if is_na_model else [feat_pred.T, attn] names = ["output"] if is_na_model else ["output", "alignment"] if feat_targ is not None: images = [feat_targ.T] + images names = [f"target (idx={sample_id})"] + names if is_na_model: plot_tts_output(images, names, attn, "alignment", suptitle=text) else: plot_tts_output(images, names, eos_prob, "eos prob", suptitle=text) plot_dir = out_root / "plot" plot_dir.mkdir(exist_ok=True, parents=True) plt.savefig(plot_dir / f"{sample_id}.png") plt.close() if args.dump_waveforms: ext = args.audio_format if wave_pred is not None: wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}" wav_dir.mkdir(exist_ok=True, parents=True) sf.write(wav_dir / f"{sample_id}.{ext}", wave_pred, sample_rate) if args.dump_target and wave_targ is not None: wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt" wav_tgt_dir.mkdir(exist_ok=True, parents=True) sf.write(wav_tgt_dir / f"{sample_id}.{ext}", wave_targ, sample_rate)
null
182,001
import logging import matplotlib.pyplot as plt import numpy as np from pathlib import Path import soundfile as sf import sys import torch import torchaudio from fairseq import checkpoint_utils, options, tasks, utils from fairseq.logging import progress_bar from fairseq.tasks.text_to_speech import plot_tts_output from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset def make_parser(): parser = options.get_speech_generation_parser() parser.add_argument("--dump-features", action="store_true") parser.add_argument("--dump-waveforms", action="store_true") parser.add_argument("--dump-attentions", action="store_true") parser.add_argument("--dump-eos-probs", action="store_true") parser.add_argument("--dump-plots", action="store_true") parser.add_argument("--dump-target", action="store_true") parser.add_argument("--output-sample-rate", default=22050, type=int) parser.add_argument("--teacher-forcing", action="store_true") parser.add_argument( "--audio-format", type=str, default="wav", choices=["wav", "flac"] ) return parser def main(args): assert(args.dump_features or args.dump_waveforms or args.dump_attentions or args.dump_eos_probs or args.dump_plots) if args.max_tokens is None and args.batch_size is None: args.max_tokens = 8000 logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu task = tasks.setup_task(args) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [args.path], task=task, ) model = models[0].cuda() if use_cuda else models[0] # use the original n_frames_per_step task.args.n_frames_per_step = saved_cfg.task.n_frames_per_step task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task) data_cfg = task.data_cfg sample_rate = data_cfg.config.get("features", {}).get("sample_rate", 22050) resample_fn = { False: lambda x: x, True: lambda x: torchaudio.sox_effects.apply_effects_tensor( x.detach().cpu().unsqueeze(0), sample_rate, [['rate', str(args.output_sample_rate)]] )[0].squeeze(0) }.get(args.output_sample_rate != sample_rate) if args.output_sample_rate != sample_rate: logger.info(f"resampling to {args.output_sample_rate}Hz") generator = task.build_generator([model], args) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size, ).next_epoch_itr(shuffle=False) Path(args.results_path).mkdir(exist_ok=True, parents=True) is_na_model = getattr(model, "NON_AUTOREGRESSIVE", False) dataset = task.dataset(args.gen_subset) vocoder = task.args.vocoder with progress_bar.build_progress_bar(args, itr) as t: for sample in t: sample = utils.move_to_cuda(sample) if use_cuda else sample hypos = generator.generate(model, sample, has_targ=args.dump_target) for result in postprocess_results( dataset, sample, hypos, resample_fn, args.dump_target ): dump_result(is_na_model, args, vocoder, *result) def cli_main(): parser = make_parser() args = options.parse_args_and_arch(parser) main(args)
null
182,003
import numpy as np import os.path as op import torchaudio import tqdm from tabulate import tabulate from examples.speech_synthesis.utils import ( gross_pitch_error, voicing_decision_error, f0_frame_error ) from examples.speech_synthesis.evaluation.eval_sp import load_eval_spec def eval_f0_error(samples, distortion_fn): def gross_pitch_error(true_t, true_f, est_t, est_f): def eval_gross_pitch_error(samples): return eval_f0_error(samples, gross_pitch_error)
null
182,011
import csv import numpy as np import os.path as op import torch import tqdm from tabulate import tabulate import torchaudio from examples.speech_synthesis.utils import batch_mel_spectral_distortion from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion def eval_distortion(samples, distortion_fn, device="cuda"): def batch_mel_cepstral_distortion( y1, y2, sr, normalize_type="path", mfcc_fn=None ): def eval_mel_cepstral_distortion(samples, device="cuda"): return eval_distortion(samples, batch_mel_cepstral_distortion, device)
null
182,015
import argparse import logging from pathlib import Path import shutil from tempfile import NamedTemporaryFile from collections import Counter, defaultdict import pandas as pd import torchaudio from tqdm import tqdm from fairseq.data.audio.audio_utils import convert_waveform from examples.speech_to_text.data_utils import ( create_zip, gen_config_yaml, gen_vocab, get_zip_manifest, load_tsv_to_dicts, save_df_to_tsv ) from examples.speech_synthesis.data_utils import ( extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn, ipa_phonemize, get_mfa_alignment, get_unit_alignment ) def convert_waveform( waveform: Union[np.ndarray, torch.Tensor], sample_rate: int, normalize_volume: bool = False, to_mono: bool = False, to_sample_rate: Optional[int] = None ) -> Tuple[Union[np.ndarray, torch.Tensor], int]: """convert a waveform: - to a target sample rate - from multi-channel to mono channel - volume normalization Args: waveform (numpy.ndarray or torch.Tensor): 2D original waveform (channels x length) sample_rate (int): original sample rate normalize_volume (bool): perform volume normalization to_mono (bool): convert to mono channel if having multiple channels to_sample_rate (Optional[int]): target sample rate Returns: waveform (numpy.ndarray): converted 2D waveform (channels x length) sample_rate (float): target sample rate """ try: import torchaudio.sox_effects as ta_sox except ImportError: raise ImportError("Please install torchaudio: pip install torchaudio") effects = [] if normalize_volume: effects.append(["gain", "-n"]) if to_sample_rate is not None and to_sample_rate != sample_rate: effects.append(["rate", f"{to_sample_rate}"]) if to_mono and waveform.shape[0] > 1: effects.append(["channels", "1"]) if len(effects) > 0: is_np_input = isinstance(waveform, np.ndarray) _waveform = torch.from_numpy(waveform) if is_np_input else waveform converted, converted_sample_rate = ta_sox.apply_effects_tensor( _waveform, sample_rate, effects ) if is_np_input: converted = converted.numpy() return converted, converted_sample_rate return waveform, sample_rate def gen_vocab( input_path: Path, output_path_prefix: Path, model_type="bpe", vocab_size=1000, special_symbols: Optional[List[str]] = None ): # Train SentencePiece Model arguments = [ f"--input={input_path.as_posix()}", f"--model_prefix={output_path_prefix.as_posix()}", f"--model_type={model_type}", f"--vocab_size={vocab_size}", "--character_coverage=1.0", f"--num_threads={cpu_count()}", f"--unk_id={UNK_TOKEN_ID}", f"--bos_id={BOS_TOKEN_ID}", f"--eos_id={EOS_TOKEN_ID}", f"--pad_id={PAD_TOKEN_ID}", ] if special_symbols is not None: _special_symbols = ",".join(special_symbols) arguments.append(f"--user_defined_symbols={_special_symbols}") sp.SentencePieceTrainer.Train(" ".join(arguments)) # Export fairseq dictionary spm = sp.SentencePieceProcessor() spm.Load(output_path_prefix.as_posix() + ".model") vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())} assert ( vocab.get(UNK_TOKEN_ID) == UNK_TOKEN and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN ) vocab = { i: s for i, s in vocab.items() if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN} } with open(output_path_prefix.as_posix() + ".txt", "w") as f_out: for _, s in sorted(vocab.items(), key=lambda x: x[0]): f_out.write(f"{s} 1\n") def create_zip(data_root: Path, zip_path: Path): paths = list(data_root.glob("*.npy")) paths.extend(data_root.glob("*.flac")) with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f: for path in tqdm(paths): f.write(path, arcname=path.name) def get_zip_manifest( zip_path: Path, zip_root: Optional[Path] = None, is_audio=False ): _zip_path = Path.joinpath(zip_root or Path(""), zip_path) with zipfile.ZipFile(_zip_path, mode="r") as f: info = f.infolist() paths, lengths = {}, {} for i in tqdm(info): utt_id = Path(i.filename).stem offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}" with open(_zip_path, "rb") as f: f.seek(offset) byte_data = f.read(file_size) assert len(byte_data) > 1 if is_audio: assert is_sf_audio_data(byte_data), i else: assert is_npy_data(byte_data), i byte_data_fp = io.BytesIO(byte_data) if is_audio: lengths[utt_id] = sf.info(byte_data_fp).frames else: lengths[utt_id] = np.load(byte_data_fp).shape[0] return paths, lengths def gen_config_yaml( manifest_root: Path, spm_filename: Optional[str] = None, vocab_name: Optional[str] = None, yaml_filename: str = "config.yaml", specaugment_policy: Optional[str] = "lb", prepend_tgt_lang_tag: bool = False, sampling_alpha: Optional[float] = None, input_channels: Optional[int] = 1, input_feat_per_channel: Optional[int] = 80, audio_root: str = "", cmvn_type: str = "utterance", gcmvn_path: Optional[Path] = None, extra=None ): manifest_root = manifest_root.absolute() writer = S2TDataConfigWriter(manifest_root / yaml_filename) assert spm_filename is not None or vocab_name is not None vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \ else vocab_name writer.set_vocab_filename(vocab_name) if input_channels is not None: writer.set_input_channels(input_channels) if input_feat_per_channel is not None: writer.set_input_feat_per_channel(input_feat_per_channel) specaugment_setters = { "lb": writer.set_specaugment_lb_policy, "ld": writer.set_specaugment_ld_policy, "sm": writer.set_specaugment_sm_policy, "ss": writer.set_specaugment_ss_policy, } specaugment_setter = specaugment_setters.get(specaugment_policy, None) if specaugment_setter is not None: specaugment_setter() if spm_filename is not None: writer.set_bpe_tokenizer( { "bpe": "sentencepiece", "sentencepiece_model": (manifest_root / spm_filename).as_posix(), } ) if prepend_tgt_lang_tag: writer.set_prepend_tgt_lang_tag(True) if sampling_alpha is not None: writer.set_sampling_alpha(sampling_alpha) if cmvn_type not in ["global", "utterance"]: raise NotImplementedError if specaugment_policy is not None: writer.set_feature_transforms( "_train", [f"{cmvn_type}_cmvn", "specaugment"] ) writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"]) if cmvn_type == "global": if gcmvn_path is None: raise ValueError("Please provide path of global cmvn file.") else: writer.set_global_cmvn(gcmvn_path.as_posix()) if len(audio_root) > 0: writer.set_audio_root(audio_root) if extra is not None: writer.set_extra(extra) writer.flush() def save_df_to_tsv(dataframe, path: Union[str, Path]): _path = path if isinstance(path, str) else path.as_posix() dataframe.to_csv( _path, sep="\t", header=True, index=False, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, ) def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]: with open(path, "r") as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) rows = [dict(e) for e in reader] return rows def extract_logmel_spectrogram( waveform: torch.Tensor, sample_rate: int, output_path: Optional[Path] = None, win_length: int = 1024, hop_length: int = 256, n_fft: int = 1024, win_fn: callable = torch.hann_window, n_mels: int = 80, f_min: float = 0., f_max: float = 8000, eps: float = 1e-5, overwrite: bool = False, target_length: Optional[int] = None ): if output_path is not None and output_path.is_file() and not overwrite: return spectrogram_transform = TTSSpectrogram( n_fft=n_fft, win_length=win_length, hop_length=hop_length, window_fn=win_fn ) mel_scale_transform = TTSMelScale( n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max, n_stft=n_fft // 2 + 1 ) spectrogram = spectrogram_transform(waveform) mel_spec = mel_scale_transform(spectrogram) logmel_spec = torch.clamp(mel_spec, min=eps).log() assert len(logmel_spec.shape) == 3 and logmel_spec.shape[0] == 1 logmel_spec = logmel_spec.squeeze().t() # D x T -> T x D if target_length is not None: logmel_spec = trim_or_pad_to_target_length(logmel_spec, target_length) if output_path is not None: np.save(output_path.as_posix(), logmel_spec) else: return logmel_spec def extract_pitch( waveform: torch.Tensor, sample_rate: int, output_path: Optional[Path] = None, hop_length: int = 256, log_scale: bool = True, phoneme_durations: Optional[List[int]] = None ): if output_path is not None and output_path.is_file(): return try: import pyworld except ImportError: raise ImportError("Please install PyWORLD: pip install pyworld") _waveform = waveform.squeeze(0).double().numpy() pitch, t = pyworld.dio( _waveform, sample_rate, frame_period=hop_length / sample_rate * 1000 ) pitch = pyworld.stonemask(_waveform, pitch, t, sample_rate) if phoneme_durations is not None: pitch = trim_or_pad_to_target_length(pitch, sum(phoneme_durations)) try: from scipy.interpolate import interp1d except ImportError: raise ImportError("Please install SciPy: pip install scipy") nonzero_ids = np.where(pitch != 0)[0] if len(nonzero_ids) == 0: print((f"{output_path} has all empty values in the pitch contour")) return elif len(nonzero_ids) == 1: print((f"{output_path} has only one non-zero values in the pitch contour")) return else: interp_fn = interp1d( nonzero_ids, pitch[nonzero_ids], fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]), bounds_error=False, ) pitch = interp_fn(np.arange(0, len(pitch))) d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations])) pitch = np.array( [ np.mean(pitch[d_cumsum[i-1]: d_cumsum[i]]) for i in range(1, len(d_cumsum)) ] ) assert len(pitch) == len(phoneme_durations) if log_scale: pitch = np.log(pitch + 1) if output_path is not None: np.save(output_path.as_posix(), pitch) else: return pitch def extract_energy( waveform: torch.Tensor, output_path: Optional[Path] = None, hop_length: int = 256, n_fft: int = 1024, log_scale: bool = True, phoneme_durations: Optional[List[int]] = None ): if output_path is not None and output_path.is_file(): return assert len(waveform.shape) == 2 and waveform.shape[0] == 1 waveform = waveform.view(1, 1, waveform.shape[1]) waveform = F.pad( waveform.unsqueeze(1), [n_fft // 2, n_fft // 2, 0, 0], mode="reflect" ) waveform = waveform.squeeze(1) fourier_basis = np.fft.fft(np.eye(n_fft)) cutoff = int((n_fft / 2 + 1)) fourier_basis = np.vstack( [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] ) forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) forward_transform = F.conv1d( waveform, forward_basis, stride=hop_length, padding=0 ) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) energy = torch.norm(magnitude, dim=1).squeeze(0).numpy() if phoneme_durations is not None: energy = trim_or_pad_to_target_length(energy, sum(phoneme_durations)) d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations])) energy = np.array( [ np.mean(energy[d_cumsum[i - 1]: d_cumsum[i]]) for i in range(1, len(d_cumsum)) ] ) assert len(energy) == len(phoneme_durations) if log_scale: energy = np.log(energy + 1) if output_path is not None: np.save(output_path.as_posix(), energy) else: return energy def get_global_cmvn(feature_root: Path, output_path: Optional[Path] = None): mean_x, mean_x2, n_frames = None, None, 0 feature_paths = feature_root.glob("*.npy") for p in tqdm(feature_paths): with open(p, 'rb') as f: frames = np.load(f).squeeze() n_frames += frames.shape[0] cur_mean_x = frames.sum(axis=0) if mean_x is None: mean_x = cur_mean_x else: mean_x += cur_mean_x cur_mean_x2 = (frames ** 2).sum(axis=0) if mean_x2 is None: mean_x2 = cur_mean_x2 else: mean_x2 += cur_mean_x2 mean_x /= n_frames mean_x2 /= n_frames var_x = mean_x2 - mean_x ** 2 std_x = np.sqrt(np.maximum(var_x, 1e-10)) if output_path is not None: with open(output_path, 'wb') as f: np.savez(f, mean=mean_x, std=std_x) else: return {"mean": mean_x, "std": std_x} def ipa_phonemize(text, lang="en-us", use_g2p=False): if use_g2p: assert lang == "en-us", "g2pE phonemizer only works for en-us" try: from g2p_en import G2p g2p = G2p() return " ".join("|" if p == " " else p for p in g2p(text)) except ImportError: raise ImportError( "Please install phonemizer: pip install g2p_en" ) else: try: from phonemizer import phonemize from phonemizer.separator import Separator return phonemize( text, backend='espeak', language=lang, separator=Separator(word="| ", phone=" ") ) except ImportError: raise ImportError( "Please install phonemizer: pip install phonemizer" ) def get_mfa_alignment( textgrid_zip_path: str, sample_ids: List[str], sample_rate: int, hop_length: int ) -> Dict[str, ForceAlignmentInfo]: return { i: get_mfa_alignment_by_sample_id( textgrid_zip_path, i, sample_rate, hop_length ) for i in tqdm(sample_ids) } def get_unit_alignment( id_to_unit_tsv_path: str, sample_ids: List[str] ) -> Dict[str, ForceAlignmentInfo]: id_to_units = { e["id"]: e["units"] for e in load_tsv_to_dicts(id_to_unit_tsv_path) } id_to_units = {i: id_to_units[i].split() for i in sample_ids} id_to_units_collapsed = { i: [uu for uu, _ in groupby(u)] for i, u in id_to_units.items() } id_to_durations = { i: [len(list(g)) for _, g in groupby(u)] for i, u in id_to_units.items() } return { i: ForceAlignmentInfo( tokens=id_to_units_collapsed[i], frame_durations=id_to_durations[i], start_sec=None, end_sec=None ) for i in sample_ids } def process(args): assert "train" in args.splits out_root = Path(args.output_root).absolute() out_root.mkdir(exist_ok=True) print("Fetching data...") audio_manifest_root = Path(args.audio_manifest_root).absolute() samples = [] for s in args.splits: for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"): e["split"] = s samples.append(e) sample_ids = [s["id"] for s in samples] # Get alignment info id_to_alignment = None if args.textgrid_zip is not None: assert args.id_to_units_tsv is None id_to_alignment = get_mfa_alignment( args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length ) elif args.id_to_units_tsv is not None: # assume identical hop length on the unit sequence id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids) # Extract features and pack features into ZIP feature_name = "logmelspec80" zip_path = out_root / f"{feature_name}.zip" pitch_zip_path = out_root / "pitch.zip" energy_zip_path = out_root / "energy.zip" gcmvn_npz_path = out_root / "gcmvn_stats.npz" if zip_path.exists() and gcmvn_npz_path.exists(): print(f"{zip_path} and {gcmvn_npz_path} exist.") else: feature_root = out_root / feature_name feature_root.mkdir(exist_ok=True) pitch_root = out_root / "pitch" energy_root = out_root / "energy" if args.add_fastspeech_targets: pitch_root.mkdir(exist_ok=True) energy_root.mkdir(exist_ok=True) print("Extracting Mel spectrogram features...") for sample in tqdm(samples): waveform, sample_rate = torchaudio.load(sample["audio"]) waveform, sample_rate = convert_waveform( waveform, sample_rate, normalize_volume=args.normalize_volume, to_sample_rate=args.sample_rate ) sample_id = sample["id"] target_length = None if id_to_alignment is not None: a = id_to_alignment[sample_id] target_length = sum(a.frame_durations) if a.start_sec is not None and a.end_sec is not None: start_frame = int(a.start_sec * sample_rate) end_frame = int(a.end_sec * sample_rate) waveform = waveform[:, start_frame: end_frame] extract_logmel_spectrogram( waveform, sample_rate, feature_root / f"{sample_id}.npy", win_length=args.win_length, hop_length=args.hop_length, n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min, f_max=args.f_max, target_length=target_length ) if args.add_fastspeech_targets: assert id_to_alignment is not None extract_pitch( waveform, sample_rate, pitch_root / f"{sample_id}.npy", hop_length=args.hop_length, log_scale=True, phoneme_durations=id_to_alignment[sample_id].frame_durations ) extract_energy( waveform, energy_root / f"{sample_id}.npy", hop_length=args.hop_length, n_fft=args.n_fft, log_scale=True, phoneme_durations=id_to_alignment[sample_id].frame_durations ) print("ZIPing features...") create_zip(feature_root, zip_path) get_global_cmvn(feature_root, gcmvn_npz_path) shutil.rmtree(feature_root) if args.add_fastspeech_targets: create_zip(pitch_root, pitch_zip_path) shutil.rmtree(pitch_root) create_zip(energy_root, energy_zip_path) shutil.rmtree(energy_root) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4 if args.add_fastspeech_targets: pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path) energy_paths, energy_lengths = get_zip_manifest(energy_zip_path) # Generate TSV manifest print("Generating manifest...") manifest_by_split = {split: defaultdict(list) for split in args.splits} for sample in tqdm(samples): sample_id, split = sample["id"], sample["split"] normalized_utt = sample["tgt_text"] if id_to_alignment is not None: normalized_utt = " ".join(id_to_alignment[sample_id].tokens) elif args.ipa_vocab: normalized_utt = ipa_phonemize( normalized_utt, lang=args.lang, use_g2p=args.use_g2p ) manifest_by_split[split]["id"].append(sample_id) manifest_by_split[split]["audio"].append(audio_paths[sample_id]) manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id]) manifest_by_split[split]["tgt_text"].append(normalized_utt) manifest_by_split[split]["speaker"].append(sample["speaker"]) manifest_by_split[split]["src_text"].append(sample["src_text"]) if args.add_fastspeech_targets: assert id_to_alignment is not None duration = " ".join( str(d) for d in id_to_alignment[sample_id].frame_durations ) manifest_by_split[split]["duration"].append(duration) manifest_by_split[split]["pitch"].append(pitch_paths[sample_id]) manifest_by_split[split]["energy"].append(energy_paths[sample_id]) for split in args.splits: save_df_to_tsv( pd.DataFrame.from_dict(manifest_by_split[split]), out_root / f"{split}.tsv" ) # Generate vocab vocab_name, spm_filename = None, None if id_to_alignment is not None or args.ipa_vocab: vocab = Counter() for t in manifest_by_split["train"]["tgt_text"]: vocab.update(t.split(" ")) vocab_name = "vocab.txt" with open(out_root / vocab_name, "w") as f: for s, c in vocab.most_common(): f.write(f"{s} {c}\n") else: spm_filename_prefix = "spm_char" spm_filename = f"{spm_filename_prefix}.model" with NamedTemporaryFile(mode="w") as f: for t in manifest_by_split["train"]["tgt_text"]: f.write(t + "\n") f.flush() # needed to ensure gen_vocab sees dumped text gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char") # Generate speaker list speakers = sorted({sample["speaker"] for sample in samples}) speakers_path = out_root / "speakers.txt" with open(speakers_path, "w") as f: for speaker in speakers: f.write(f"{speaker}\n") # Generate config YAML win_len_t = args.win_length / args.sample_rate hop_len_t = args.hop_length / args.sample_rate extra = { "sample_rate": args.sample_rate, "features": { "type": "spectrogram+melscale+log", "eps": 1e-2, "n_mels": args.n_mels, "n_fft": args.n_fft, "window_fn": "hann", "win_length": args.win_length, "hop_length": args.hop_length, "sample_rate": args.sample_rate, "win_len_t": win_len_t, "hop_len_t": hop_len_t, "f_min": args.f_min, "f_max": args.f_max, "n_stft": args.n_fft // 2 + 1 } } if len(speakers) > 1: extra["speaker_set_filename"] = "speakers.txt" gen_config_yaml( out_root, spm_filename=spm_filename, vocab_name=vocab_name, audio_root=out_root.as_posix(), input_channels=None, input_feat_per_channel=None, specaugment_policy=None, cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra )
null
182,037
import logging from collections import namedtuple import torch import torch.nn as nn from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.speech_to_text import ( TransformerDecoder, S2TTransformerEncoder, ) from fairseq.models.transformer import TransformerEncoder from fairseq.modules import ( TransformerEncoderLayer, GradMultiply, LayerNorm, ) def dualinputs2ttransformer_base(args): def dualinputs2ttransformer_l(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.2) args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12) args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 6) dualinputs2ttransformer_base(args)
null
182,038
import copy import torch.nn as nn from fairseq import checkpoint_utils from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( register_model, register_model_architecture, FairseqEncoder, ) from fairseq.models.speech_to_text import XMTransformerModel, Wav2VecEncoderWithAdaptor from fairseq.models.speech_to_text.xm_transformer import ( set_default_adaptor_args, set_default_w2v_encoder_args, ) from fairseq.models.transformer import TransformerEncoder, TransformerDecoder from fairseq.models.wav2vec import TransformerSentenceEncoderLayer from fairseq.utils import safe_hasattr from .s2t_dualinputtransformer import ( DualInputS2TTransformerModel, TransformerMultiInputDecoder, DualInputEncoder, ) def set_default_w2v_encoder_args(args): args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False) args.dropout_input = getattr(args, "dropout_input", 0) args.final_dropout = getattr(args, "final_dropout", 0) args.apply_mask = getattr(args, "apply_mask", False) args.dropout = getattr(args, "dropout", 0) args.attention_dropout = getattr(args, "attention_dropout", 0) args.activation_dropout = getattr(args, "activation_dropout", 0) args.mask_length = getattr(args, "mask_length", 10) args.mask_prob = getattr(args, "mask_prob", 0.5) args.mask_selection = getattr(args, "mask_selection", "static") args.mask_other = getattr(args, "mask_other", 0) args.no_mask_overlap = getattr(args, "no_mask_overlap", False) args.mask_channel_length = getattr(args, "mask_channel_length", 10) args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5) args.mask_channel_before = getattr(args, "mask_channel_before", False) args.mask_channel_selection = getattr(args, "mask_channel_selection", "static") args.mask_channel_other = getattr(args, "mask_channel_other", 0) args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False) args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0) args.feature_grad_mult = 0.1 args.layerdrop = getattr(args, "layerdrop", 0.0) args.normalize = getattr(args, "normalize", False) def set_default_adaptor_args(args): args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3) args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3) args.adaptor_stride = getattr(args, "adaptor_stride", 2) args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False) def dualinputxmtransformer_base(args): # wav2vec encoder set_default_w2v_encoder_args(args) set_default_adaptor_args(args) # mbart model args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr( args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim ) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.adaptive_input = getattr(args, "adaptive_input", False) args.mbart_attention_dropout = getattr(args, "mbart_attention_dropout", 0.0) args.mbart_activation_dropout = getattr(args, "mbart_activation_dropout", 0.0) args.mbart_dropout = getattr(args, "mbart_dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
null
182,084
from collections import namedtuple import os import ast import numpy as np from fairseq import checkpoint_utils, options, tasks, utils import tqdm def main(args): def cli_main(): parser = options.get_interactive_generation_parser() parser.add_argument('--prompts', type=str, default=None, required=True) parser.add_argument('--output', type=str, default=None, required=True) parser.add_argument('--debug', action='store_true') parser.add_argument('--samples-per-prompt', type=int, default=1) args = options.parse_args_and_arch(parser) np.random.seed(args.seed) utils.set_torch_seed(args.seed) main(args)
null
182,121
import argparse import logging import os import soundfile as sf from examples.textless_nlp.gslm.unit2speech.tts_data import ( TacotronInputDataset, ) from examples.textless_nlp.gslm.unit2speech.utils import ( load_quantized_audio_from_file, load_tacotron, load_waveglow, synthesize_audio, ) def get_parser(): parser = argparse.ArgumentParser( description="Wav2Vec 2.0 speech generator." ) parser.add_argument( "--quantized_unit_path", type=str, help="K-means model file path to use for inference", ) parser.add_argument( "--tts_model_path", type=str, help="TTS model file path to use for inference", ) parser.add_argument( "--waveglow_path", type=str, help="Path to the waveglow checkpoint (vocoder).", ) parser.add_argument("--max_decoder_steps", type=int, default=2000) parser.add_argument("--denoiser_strength", type=float, default=0.1) parser.add_argument( "--out_audio_dir", type=str, help="Output directory to dump audio files", ) return parser
null
182,159
import csv from pathlib import Path import zipfile from functools import reduce from multiprocessing import cpu_count from typing import Any, Dict, List, Optional, Union import io import numpy as np import pandas as pd import sentencepiece as sp from fairseq.data.audio.audio_utils import ( convert_waveform, _get_kaldi_fbank, _get_torchaudio_fbank, is_npy_data, is_sf_audio_data ) import torch import soundfile as sf from tqdm import tqdm def convert_waveform( waveform: Union[np.ndarray, torch.Tensor], sample_rate: int, normalize_volume: bool = False, to_mono: bool = False, to_sample_rate: Optional[int] = None ) -> Tuple[Union[np.ndarray, torch.Tensor], int]: """convert a waveform: - to a target sample rate - from multi-channel to mono channel - volume normalization Args: waveform (numpy.ndarray or torch.Tensor): 2D original waveform (channels x length) sample_rate (int): original sample rate normalize_volume (bool): perform volume normalization to_mono (bool): convert to mono channel if having multiple channels to_sample_rate (Optional[int]): target sample rate Returns: waveform (numpy.ndarray): converted 2D waveform (channels x length) sample_rate (float): target sample rate """ try: import torchaudio.sox_effects as ta_sox except ImportError: raise ImportError("Please install torchaudio: pip install torchaudio") effects = [] if normalize_volume: effects.append(["gain", "-n"]) if to_sample_rate is not None and to_sample_rate != sample_rate: effects.append(["rate", f"{to_sample_rate}"]) if to_mono and waveform.shape[0] > 1: effects.append(["channels", "1"]) if len(effects) > 0: is_np_input = isinstance(waveform, np.ndarray) _waveform = torch.from_numpy(waveform) if is_np_input else waveform converted, converted_sample_rate = ta_sox.apply_effects_tensor( _waveform, sample_rate, effects ) if is_np_input: converted = converted.numpy() return converted, converted_sample_rate return waveform, sample_rate def _get_kaldi_fbank( waveform: np.ndarray, sample_rate: int, n_bins=80 ) -> Optional[np.ndarray]: """Get mel-filter bank features via PyKaldi.""" try: from kaldi.feat.fbank import FbankOptions, Fbank from kaldi.feat.mel import MelBanksOptions from kaldi.feat.window import FrameExtractionOptions from kaldi.matrix import Vector mel_opts = MelBanksOptions() mel_opts.num_bins = n_bins frame_opts = FrameExtractionOptions() frame_opts.samp_freq = sample_rate opts = FbankOptions() opts.mel_opts = mel_opts opts.frame_opts = frame_opts fbank = Fbank(opts=opts) features = fbank.compute(Vector(waveform.squeeze()), 1.0).numpy() return features except ImportError: return None def _get_torchaudio_fbank( waveform: np.ndarray, sample_rate, n_bins=80 ) -> Optional[np.ndarray]: """Get mel-filter bank features via TorchAudio.""" try: import torchaudio.compliance.kaldi as ta_kaldi waveform = torch.from_numpy(waveform) features = ta_kaldi.fbank( waveform, num_mel_bins=n_bins, sample_frequency=sample_rate ) return features.numpy() except ImportError: return None def extract_fbank_features( waveform: torch.FloatTensor, sample_rate: int, output_path: Optional[Path] = None, n_mel_bins: int = 80, overwrite: bool = False, ): if output_path is not None and output_path.is_file() and not overwrite: return _waveform = convert_waveform(waveform, sample_rate, to_mono=True) # Kaldi compliance: 16-bit signed integers _waveform = _waveform * (2 ** 15) _waveform = _waveform[0].numpy() features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins) if features is None: features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable fbank feature extraction" ) if output_path is not None: np.save(output_path.as_posix(), features) return features
null
182,160
import csv from pathlib import Path import zipfile from functools import reduce from multiprocessing import cpu_count from typing import Any, Dict, List, Optional, Union import io import numpy as np import pandas as pd import sentencepiece as sp from fairseq.data.audio.audio_utils import ( convert_waveform, _get_kaldi_fbank, _get_torchaudio_fbank, is_npy_data, is_sf_audio_data ) import torch import soundfile as sf from tqdm import tqdm def create_zip(data_root: Path, zip_path: Path): paths = list(data_root.glob("*.npy")) with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f: for path in tqdm(paths): f.write(path, arcname=path.name)
null
182,161
import csv from pathlib import Path import zipfile from functools import reduce from multiprocessing import cpu_count from typing import Any, Dict, List, Optional, Union import io import numpy as np import pandas as pd import sentencepiece as sp from fairseq.data.audio.audio_utils import ( convert_waveform, _get_kaldi_fbank, _get_torchaudio_fbank, is_npy_data, is_sf_audio_data ) import torch import soundfile as sf from tqdm import tqdm def is_npy_data(data: bytes) -> bool: def is_sf_audio_data(data: bytes) -> bool: def get_zip_manifest( zip_path: Path, zip_root: Optional[Path] = None, is_audio=False ): _zip_path = Path.joinpath(zip_root or Path(""), zip_path) with zipfile.ZipFile(_zip_path, mode="r") as f: info = f.infolist() paths, lengths = {}, {} for i in tqdm(info): utt_id = Path(i.filename).stem offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}" with open(_zip_path, "rb") as f: f.seek(offset) byte_data = f.read(file_size) assert len(byte_data) > 1 if is_audio: assert is_sf_audio_data(byte_data), i else: assert is_npy_data(byte_data), i byte_data_fp = io.BytesIO(byte_data) if is_audio: lengths[utt_id] = sf.info(byte_data_fp).frames else: lengths[utt_id] = np.load(byte_data_fp).shape[0] return paths, lengths
null
182,168
import argparse import logging import os from pathlib import Path import shutil from itertools import groupby from tempfile import NamedTemporaryFile from typing import Tuple import numpy as np import pandas as pd import soundfile as sf from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats, ) import torch from torch.utils.data import Dataset from tqdm import tqdm from fairseq.data.audio.audio_utils import get_waveform, convert_waveform MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"] class MUSTC(Dataset): """ Create a Dataset for MuST-C. Each item is a tuple of the form: waveform, sample_rate, source utterance, target utterance, speaker_id, utterance_id """ SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"] LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"] def __init__(self, root: str, lang: str, split: str) -> None: assert split in self.SPLITS and lang in self.LANGUAGES _root = Path(root) / f"en-{lang}" / "data" / split wav_root, txt_root = _root / "wav", _root / "txt" assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir() # Load audio segments try: import yaml except ImportError: print("Please install PyYAML to load the MuST-C YAML files") with open(txt_root / f"{split}.yaml") as f: segments = yaml.load(f, Loader=yaml.BaseLoader) # Load source and target utterances for _lang in ["en", lang]: with open(txt_root / f"{split}.{_lang}") as f: utterances = [r.strip() for r in f] assert len(segments) == len(utterances) for i, u in enumerate(utterances): segments[i][_lang] = u # Gather info self.data = [] for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]): wav_path = wav_root / wav_filename sample_rate = sf.info(wav_path.as_posix()).samplerate seg_group = sorted(_seg_group, key=lambda x: x["offset"]) for i, segment in enumerate(seg_group): offset = int(float(segment["offset"]) * sample_rate) n_frames = int(float(segment["duration"]) * sample_rate) _id = f"{wav_path.stem}_{i}" self.data.append( ( wav_path.as_posix(), offset, n_frames, sample_rate, segment["en"], segment[lang], segment["speaker_id"], _id, ) ) def __getitem__( self, n: int ) -> Tuple[torch.Tensor, int, str, str, str, str]: wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, \ utt_id = self.data[n] waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset) waveform = torch.from_numpy(waveform) return waveform, sr, src_utt, tgt_utt, spk_id, utt_id def __len__(self) -> int: return len(self.data) def gen_vocab( input_path: Path, output_path_prefix: Path, model_type="bpe", vocab_size=1000, special_symbols: Optional[List[str]] = None ): # Train SentencePiece Model arguments = [ f"--input={input_path.as_posix()}", f"--model_prefix={output_path_prefix.as_posix()}", f"--model_type={model_type}", f"--vocab_size={vocab_size}", "--character_coverage=1.0", f"--num_threads={cpu_count()}", f"--unk_id={UNK_TOKEN_ID}", f"--bos_id={BOS_TOKEN_ID}", f"--eos_id={EOS_TOKEN_ID}", f"--pad_id={PAD_TOKEN_ID}", ] if special_symbols is not None: _special_symbols = ",".join(special_symbols) arguments.append(f"--user_defined_symbols={_special_symbols}") sp.SentencePieceTrainer.Train(" ".join(arguments)) # Export fairseq dictionary spm = sp.SentencePieceProcessor() spm.Load(output_path_prefix.as_posix() + ".model") vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())} assert ( vocab.get(UNK_TOKEN_ID) == UNK_TOKEN and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN ) vocab = { i: s for i, s in vocab.items() if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN} } with open(output_path_prefix.as_posix() + ".txt", "w") as f_out: for _, s in sorted(vocab.items(), key=lambda x: x[0]): f_out.write(f"{s} 1\n") def extract_fbank_features( waveform: torch.FloatTensor, sample_rate: int, output_path: Optional[Path] = None, n_mel_bins: int = 80, overwrite: bool = False, ): if output_path is not None and output_path.is_file() and not overwrite: return _waveform, _ = convert_waveform(waveform, sample_rate, to_mono=True) # Kaldi compliance: 16-bit signed integers _waveform = _waveform * (2 ** 15) _waveform = _waveform[0].numpy() features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins) if features is None: features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable fbank feature extraction" ) if output_path is not None: np.save(output_path.as_posix(), features) return features def create_zip(data_root: Path, zip_path: Path): paths = list(data_root.glob("*.npy")) paths.extend(data_root.glob("*.flac")) with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f: for path in tqdm(paths): f.write(path, arcname=path.name) def get_zip_manifest( zip_path: Path, zip_root: Optional[Path] = None, is_audio=False ): _zip_path = Path.joinpath(zip_root or Path(""), zip_path) with zipfile.ZipFile(_zip_path, mode="r") as f: info = f.infolist() paths, lengths = {}, {} for i in tqdm(info): utt_id = Path(i.filename).stem offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}" with open(_zip_path, "rb") as f: f.seek(offset) byte_data = f.read(file_size) assert len(byte_data) > 1 if is_audio: assert is_sf_audio_data(byte_data), i else: assert is_npy_data(byte_data), i byte_data_fp = io.BytesIO(byte_data) if is_audio: lengths[utt_id] = sf.info(byte_data_fp).frames else: lengths[utt_id] = np.load(byte_data_fp).shape[0] return paths, lengths def gen_config_yaml( manifest_root: Path, spm_filename: Optional[str] = None, vocab_name: Optional[str] = None, yaml_filename: str = "config.yaml", specaugment_policy: Optional[str] = "lb", prepend_tgt_lang_tag: bool = False, sampling_alpha: Optional[float] = None, input_channels: Optional[int] = 1, input_feat_per_channel: Optional[int] = 80, audio_root: str = "", cmvn_type: str = "utterance", gcmvn_path: Optional[Path] = None, extra=None ): manifest_root = manifest_root.absolute() writer = S2TDataConfigWriter(manifest_root / yaml_filename) assert spm_filename is not None or vocab_name is not None vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \ else vocab_name writer.set_vocab_filename(vocab_name) if input_channels is not None: writer.set_input_channels(input_channels) if input_feat_per_channel is not None: writer.set_input_feat_per_channel(input_feat_per_channel) specaugment_setters = { "lb": writer.set_specaugment_lb_policy, "ld": writer.set_specaugment_ld_policy, "sm": writer.set_specaugment_sm_policy, "ss": writer.set_specaugment_ss_policy, } specaugment_setter = specaugment_setters.get(specaugment_policy, None) if specaugment_setter is not None: specaugment_setter() if spm_filename is not None: writer.set_bpe_tokenizer( { "bpe": "sentencepiece", "sentencepiece_model": (manifest_root / spm_filename).as_posix(), } ) if prepend_tgt_lang_tag: writer.set_prepend_tgt_lang_tag(True) if sampling_alpha is not None: writer.set_sampling_alpha(sampling_alpha) if cmvn_type not in ["global", "utterance"]: raise NotImplementedError if specaugment_policy is not None: writer.set_feature_transforms( "_train", [f"{cmvn_type}_cmvn", "specaugment"] ) writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"]) if cmvn_type == "global": if gcmvn_path is None: raise ValueError("Please provide path of global cmvn file.") else: writer.set_global_cmvn(gcmvn_path.as_posix()) if len(audio_root) > 0: writer.set_audio_root(audio_root) if extra is not None: writer.set_extra(extra) writer.flush() def save_df_to_tsv(dataframe, path: Union[str, Path]): _path = path if isinstance(path, str) else path.as_posix() dataframe.to_csv( _path, sep="\t", header=True, index=False, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, ) def filter_manifest_df( df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000 ): filters = { "no speech": df["audio"] == "", f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames, "empty sentence": df["tgt_text"] == "", } if is_train_split: filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames if extra_filters is not None: filters.update(extra_filters) invalid = reduce(lambda x, y: x | y, filters.values()) valid = ~invalid print( "| " + ", ".join(f"{n}: {f.sum()}" for n, f in filters.items()) + f", total {invalid.sum()} filtered, {valid.sum()} remained." ) return df[valid] def cal_gcmvn_stats(features_list): features = np.concatenate(features_list) square_sums = (features ** 2).sum(axis=0) mean = features.mean(axis=0) features = np.subtract(features, mean) var = square_sums / features.shape[0] - mean ** 2 std = np.sqrt(np.maximum(var, 1e-8)) return {"mean": mean.astype("float32"), "std": std.astype("float32")} def convert_waveform( waveform: Union[np.ndarray, torch.Tensor], sample_rate: int, normalize_volume: bool = False, to_mono: bool = False, to_sample_rate: Optional[int] = None ) -> Tuple[Union[np.ndarray, torch.Tensor], int]: """convert a waveform: - to a target sample rate - from multi-channel to mono channel - volume normalization Args: waveform (numpy.ndarray or torch.Tensor): 2D original waveform (channels x length) sample_rate (int): original sample rate normalize_volume (bool): perform volume normalization to_mono (bool): convert to mono channel if having multiple channels to_sample_rate (Optional[int]): target sample rate Returns: waveform (numpy.ndarray): converted 2D waveform (channels x length) sample_rate (float): target sample rate """ try: import torchaudio.sox_effects as ta_sox except ImportError: raise ImportError("Please install torchaudio: pip install torchaudio") effects = [] if normalize_volume: effects.append(["gain", "-n"]) if to_sample_rate is not None and to_sample_rate != sample_rate: effects.append(["rate", f"{to_sample_rate}"]) if to_mono and waveform.shape[0] > 1: effects.append(["channels", "1"]) if len(effects) > 0: is_np_input = isinstance(waveform, np.ndarray) _waveform = torch.from_numpy(waveform) if is_np_input else waveform converted, converted_sample_rate = ta_sox.apply_effects_tensor( _waveform, sample_rate, effects ) if is_np_input: converted = converted.numpy() return converted, converted_sample_rate return waveform, sample_rate def process(args): root = Path(args.data_root).absolute() for lang in MUSTC.LANGUAGES: cur_root = root / f"en-{lang}" if not cur_root.is_dir(): print(f"{cur_root.as_posix()} does not exist. Skipped.") continue # Extract features audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80") audio_root.mkdir(exist_ok=True) for split in MUSTC.SPLITS: print(f"Fetching split {split}...") dataset = MUSTC(root.as_posix(), lang, split) if args.use_audio_input: print("Converting audios...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): tgt_sample_rate = 16_000 _wavform, _ = convert_waveform( waveform, sample_rate, to_mono=True, to_sample_rate=tgt_sample_rate ) sf.write( (audio_root / f"{utt_id}.flac").as_posix(), _wavform.numpy(), tgt_sample_rate ) else: print("Extracting log mel filter bank features...") gcmvn_feature_list = [] if split == 'train' and args.cmvn_type == "global": print("And estimating cepstral mean and variance stats...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): features = extract_fbank_features( waveform, sample_rate, audio_root / f"{utt_id}.npy" ) if split == 'train' and args.cmvn_type == "global": if len(gcmvn_feature_list) < args.gcmvn_max_num: gcmvn_feature_list.append(features) if split == 'train' and args.cmvn_type == "global": # Estimate and save cmv stats = cal_gcmvn_stats(gcmvn_feature_list) with open(cur_root / "gcmvn.npz", "wb") as f: np.savez(f, mean=stats["mean"], std=stats["std"]) # Pack features into ZIP zip_path = cur_root / f"{audio_root.name}.zip" print("ZIPing audios/features...") create_zip(audio_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) # Generate TSV manifest print("Generating manifest...") train_text = [] for split in MUSTC.SPLITS: is_train_split = split.startswith("train") manifest = {c: [] for c in MANIFEST_COLUMNS} dataset = MUSTC(args.data_root, lang, split) for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset): manifest["id"].append(utt_id) manifest["audio"].append(audio_paths[utt_id]) manifest["n_frames"].append(audio_lengths[utt_id]) manifest["tgt_text"].append( src_utt if args.task == "asr" else tgt_utt ) manifest["speaker"].append(speaker_id) if is_train_split: train_text.extend(manifest["tgt_text"]) df = pd.DataFrame.from_dict(manifest) df = filter_manifest_df(df, is_train_split=is_train_split) save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv") # Generate vocab v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) # Generate config YAML if args.use_audio_input: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy=None, extra={"use_audio_input": True} ) else: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="lb", cmvn_type=args.cmvn_type, gcmvn_path=( cur_root / "gcmvn.npz" if args.cmvn_type == "global" else None ), ) # Clean up shutil.rmtree(audio_root)
null
182,183
import argparse import glob from subprocess import check_call import numpy as np def score(sim, fwd_mean, bwd_mean, margin): def score_candidates( sim_mat, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False ): print(" - scoring {:d} candidates".format(sim_mat.shape[0])) scores = np.zeros(candidate_inds.shape) for i in range(scores.shape[0]): for j in range(scores.shape[1]): k = int(candidate_inds[i, j]) scores[i, j] = score(sim_mat[i, j], fwd_mean[i], bwd_mean[k], margin) return scores
null
182,187
import logging from typing import Any, Dict, Optional, List, Tuple import torch import torch.nn as nn from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture, ) from torch import Tensor def transformer_pointer_generator(args): def transformer_pointer_generator_wmt_en_de(args): transformer_pointer_generator(args)
null
182,197
import ast from collections import namedtuple from dataclasses import dataclass, field from enum import Enum, auto import hydra from hydra.core.config_store import ConfigStore import logging import math import os from omegaconf import OmegaConf from typing import Optional import sys import editdistance import torch from hydra.core.hydra_config import HydraConfig from fairseq import checkpoint_utils, progress_bar, tasks, utils from fairseq.data.data_utils import post_process from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig from fairseq.logging.meters import StopwatchMeter from omegaconf import open_dict from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig logger = logging.getLogger(__name__) class DecoderType(Enum): VITERBI = auto() KENLM = auto() FAIRSEQ = auto() KALDI = auto() class UnsupGenerateConfig(FairseqDataclass): fairseq: FairseqConfig = FairseqConfig() lm_weight: float = field( default=2.0, metadata={"help": "language model weight"}, ) w2l_decoder: DecoderType = field( default=DecoderType.VITERBI, metadata={"help": "type of decoder to use"}, ) kaldi_decoder_config: Optional[KaldiDecoderConfig] = None lexicon: Optional[str] = field( default=None, metadata={ "help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning" }, ) lm_model: Optional[str] = field( default=None, metadata={"help": "path to language model (kenlm or fairseq)"}, ) unit_lm: bool = field( default=False, metadata={"help": "whether to use unit lm"}, ) beam_threshold: float = field( default=50.0, metadata={"help": "beam score threshold"}, ) beam_size_token: float = field( default=100.0, metadata={"help": "max tokens per beam"}, ) beam: int = field( default=5, metadata={"help": "decoder beam size"}, ) nbest: int = field( default=1, metadata={"help": "number of results to return"}, ) word_score: float = field( default=1.0, metadata={"help": "word score to add at end of word"}, ) unk_weight: float = field( default=-math.inf, metadata={"help": "unknown token weight"}, ) sil_weight: float = field( default=0.0, metadata={"help": "silence token weight"}, ) targets: Optional[str] = field( default=None, metadata={"help": "extension of ground truth labels to compute UER"}, ) results_path: Optional[str] = field( default=None, metadata={"help": "where to store results"}, ) post_process: Optional[str] = field( default=None, metadata={"help": "how to post process results"}, ) vocab_usage_power: float = field( default=2, metadata={"help": "for unsupervised param tuning"}, ) viterbi_transcript: Optional[str] = field( default=None, metadata={"help": "for unsupervised param tuning"}, ) min_lm_ppl: float = field( default=0, metadata={"help": "for unsupervised param tuning"}, ) min_vt_uer: float = field( default=0, metadata={"help": "for unsupervised param tuning"}, ) blank_weight: float = field( default=0, metadata={"help": "value to add or set for blank emission"}, ) blank_mode: str = field( default="set", metadata={ "help": "can be add or set, how to modify blank emission with blank weight" }, ) sil_is_blank: bool = field( default=False, metadata={"help": "if true, <SIL> token is same as blank token"}, ) unsupervised_tuning: bool = field( default=False, metadata={ "help": "if true, returns a score based on unsupervised param selection metric instead of UER" }, ) is_ax: bool = field( default=False, metadata={ "help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume" }, ) def get_dataset_itr(cfg, task): return task.get_batch_iterator( dataset=task.dataset(cfg.fairseq.dataset.gen_subset), max_tokens=cfg.fairseq.dataset.max_tokens, max_sentences=cfg.fairseq.dataset.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple, num_shards=cfg.fairseq.dataset.num_shards, shard_id=cfg.fairseq.dataset.shard_id, num_workers=cfg.fairseq.dataset.num_workers, data_buffer_size=cfg.fairseq.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) def process_predictions( cfg: UnsupGenerateConfig, hypos, tgt_dict, target_tokens, res_files, ): retval = [] word_preds = [] transcriptions = [] dec_scores = [] for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]): if torch.is_tensor(hypo["tokens"]): tokens = hypo["tokens"].int().cpu() tokens = tokens[tokens >= tgt_dict.nspecial] hyp_pieces = tgt_dict.string(tokens) else: hyp_pieces = " ".join(hypo["tokens"]) if "words" in hypo and len(hypo["words"]) > 0: hyp_words = " ".join(hypo["words"]) else: hyp_words = post_process(hyp_pieces, cfg.post_process) to_write = {} if res_files is not None: to_write[res_files["hypo.units"]] = hyp_pieces to_write[res_files["hypo.words"]] = hyp_words tgt_words = "" if target_tokens is not None: if isinstance(target_tokens, str): tgt_pieces = tgt_words = target_tokens else: tgt_pieces = tgt_dict.string(target_tokens) tgt_words = post_process(tgt_pieces, cfg.post_process) if res_files is not None: to_write[res_files["ref.units"]] = tgt_pieces to_write[res_files["ref.words"]] = tgt_words if not cfg.fairseq.common_eval.quiet: logger.info(f"HYPO {i}:" + hyp_words) if tgt_words: logger.info("TARGET:" + tgt_words) if "am_score" in hypo and "lm_score" in hypo: logger.info( f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}" ) elif "score" in hypo: logger.info(f"DECODER SCORE: {hypo['score']}") logger.info("___________________") hyp_words_arr = hyp_words.split() tgt_words_arr = tgt_words.split() retval.append( ( editdistance.eval(hyp_words_arr, tgt_words_arr), len(hyp_words_arr), len(tgt_words_arr), hyp_pieces, hyp_words, ) ) word_preds.append(hyp_words_arr) transcriptions.append(to_write) dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL if len(retval) > 1: best = None for r, t in zip(retval, transcriptions): if best is None or r[0] < best[0][0]: best = r, t for dest, tran in best[1].items(): print(tran, file=dest) dest.flush() return best[0] assert len(transcriptions) == 1 for dest, tran in transcriptions[0].items(): print(tran, file=dest) return retval[0] def prepare_result_files(cfg: UnsupGenerateConfig): def get_res_file(file_prefix): if cfg.fairseq.dataset.num_shards > 1: file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}" path = os.path.join( cfg.results_path, "{}{}.txt".format( cfg.fairseq.dataset.gen_subset, file_prefix, ), ) return open(path, "w", buffering=1) if not cfg.results_path: return None return { "hypo.words": get_res_file(""), "hypo.units": get_res_file("_units"), "ref.words": get_res_file("_ref"), "ref.units": get_res_file("_ref_units"), "hypo.nbest.words": get_res_file("_nbest_words"), } GenResult = namedtuple( "GenResult", [ "count", "errs_t", "gen_timer", "lengths_hyp_unit_t", "lengths_hyp_t", "lengths_t", "lm_score_t", "num_feats", "num_sentences", "num_symbols", "vt_err_t", "vt_length_t", ], ) def gen_hypos(generator, models, num_feats, sample, task, use_cuda): sample = utils.move_to_cuda(sample) if use_cuda else sample if "features" in sample["net_input"]: sample["net_input"]["dense_x_only"] = True num_feats += ( sample["net_input"]["features"].shape[0] * sample["net_input"]["features"].shape[1] ) hypos = task.inference_step(generator, models, sample, None) return hypos, num_feats class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { "sum": self.sum, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.n = state_dict["n"] self.start_time = None self.round = state_dict.get("round", None) def avg(self): return self.sum / self.n if self.n > 0 else self.sum def elapsed_time(self): if self.start_time is None: return 0.0 return time.perf_counter() - self.start_time def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val class W2lViterbiDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) def decode(self, emissions): B, T, N = emissions.size() hypos = [] if self.asg_transitions is None: transitions = torch.FloatTensor(N, N).zero_() else: transitions = torch.FloatTensor(self.asg_transitions).view(N, N) viterbi_path = torch.IntTensor(B, T) workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N)) CpuViterbiPath.compute( B, T, N, get_data_ptr_as_bytes(emissions), get_data_ptr_as_bytes(transitions), get_data_ptr_as_bytes(viterbi_path), get_data_ptr_as_bytes(workspace), ) return [ [{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}] for b in range(B) ] class W2lKenLMDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) self.unit_lm = getattr(args, "unit_lm", False) if args.lexicon: self.lexicon = load_words(args.lexicon) self.word_dict = create_word_dict(self.lexicon) self.unk_word = self.word_dict.get_index("<unk>") self.lm = KenLM(args.kenlm_model, self.word_dict) self.trie = Trie(self.vocab_size, self.silence) start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): word_idx = self.word_dict.get_index(word) _, score = self.lm.score(start_state, word_idx) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) if self.asg_transitions is None: N = 768 # self.asg_transitions = torch.FloatTensor(N, N).zero_() self.asg_transitions = [] self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, self.asg_transitions, self.unit_lm, ) else: assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(args.kenlm_model, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def get_timesteps(self, token_idxs: List[int]) -> List[int]: """Returns frame numbers corresponding to every non-blank token. Parameters ---------- token_idxs : List[int] IDs of decoded tokens. Returns ------- List[int] Frame numbers corresponding to every non-blank token. """ timesteps = [] for i, token_idx in enumerate(token_idxs): if token_idx == self.blank: continue if i == 0 or token_idx != token_idxs[i-1]: timesteps.append(i) return timesteps def decode(self, emissions): B, T, N = emissions.size() hypos = [] for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append( [ { "tokens": self.get_tokens(result.tokens), "score": result.score, "timesteps": self.get_timesteps(result.tokens), "words": [ self.word_dict.get_entry(x) for x in result.words if x >= 0 ], } for result in nbest_results ] ) return hypos class W2lFairseqLMDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) self.unit_lm = getattr(args, "unit_lm", False) self.lexicon = load_words(args.lexicon) if args.lexicon else None self.idx_to_wrd = {} checkpoint = torch.load(args.kenlm_model, map_location="cpu") if "cfg" in checkpoint and checkpoint["cfg"] is not None: lm_args = checkpoint["cfg"] else: lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) with open_dict(lm_args.task): lm_args.task.data = osp.dirname(args.kenlm_model) task = tasks.setup_task(lm_args.task) model = task.build_model(lm_args.model) model.load_state_dict(checkpoint["model"], strict=False) self.trie = Trie(self.vocab_size, self.silence) self.word_dict = task.dictionary self.unk_word = self.word_dict.unk() self.lm = FairseqLM(self.word_dict, model) if self.lexicon: start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): if self.unit_lm: word_idx = i self.idx_to_wrd[i] = word score = 0 else: word_idx = self.word_dict.index(word) _, score = self.lm.score(start_state, word_idx, no_cache=True) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unit_lm, ) else: assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(args.kenlm_model, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def decode(self, emissions): B, T, N = emissions.size() hypos = [] def idx_to_word(idx): if self.unit_lm: return self.idx_to_wrd[idx] else: return self.word_dict[idx] def make_hypo(result): hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score} if self.lexicon: hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0] return hypo for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append([make_hypo(result) for result in nbest_results]) self.lm.empty_cache() return hypos class KaldiDecoder(object): def __init__( self, cfg: KaldiDecoderConfig, beam: int, nbest: int = 1, ): try: from kaldi.asr import FasterRecognizer, LatticeFasterRecognizer from kaldi.base import set_verbose_level from kaldi.decoder import ( FasterDecoder, FasterDecoderOptions, LatticeFasterDecoder, LatticeFasterDecoderOptions, ) from kaldi.lat.functions import DeterminizeLatticePhonePrunedOptions from kaldi.fstext import read_fst_kaldi, SymbolTable except: warnings.warn( "pykaldi is required for this functionality. Please install from https://github.com/pykaldi/pykaldi" ) # set_verbose_level(2) self.acoustic_scale = cfg.acoustic_scale self.nbest = nbest if cfg.hlg_graph_path is None: assert ( cfg.kaldi_initializer_config is not None ), "Must provide hlg graph path or kaldi initializer config" cfg.hlg_graph_path = initalize_kaldi(cfg.kaldi_initializer_config) assert os.path.exists(cfg.hlg_graph_path), cfg.hlg_graph_path if cfg.is_lattice: self.dec_cls = LatticeFasterDecoder opt_cls = LatticeFasterDecoderOptions self.rec_cls = LatticeFasterRecognizer else: assert self.nbest == 1, "nbest > 1 requires lattice decoder" self.dec_cls = FasterDecoder opt_cls = FasterDecoderOptions self.rec_cls = FasterRecognizer self.decoder_options = opt_cls() self.decoder_options.beam = beam self.decoder_options.max_active = cfg.max_active self.decoder_options.beam_delta = cfg.beam_delta self.decoder_options.hash_ratio = cfg.hash_ratio if cfg.is_lattice: self.decoder_options.lattice_beam = cfg.lattice_beam self.decoder_options.prune_interval = cfg.prune_interval self.decoder_options.determinize_lattice = cfg.determinize_lattice self.decoder_options.prune_scale = cfg.prune_scale det_opts = DeterminizeLatticePhonePrunedOptions() det_opts.max_mem = cfg.max_mem det_opts.phone_determinize = cfg.phone_determinize det_opts.word_determinize = cfg.word_determinize det_opts.minimize = cfg.minimize self.decoder_options.det_opts = det_opts self.output_symbols = {} with open(cfg.output_dict, "r") as f: for line in f: items = line.rstrip().split() assert len(items) == 2 self.output_symbols[int(items[1])] = items[0] logger.info(f"Loading FST from {cfg.hlg_graph_path}") self.fst = read_fst_kaldi(cfg.hlg_graph_path) self.symbol_table = SymbolTable.read_text(cfg.output_dict) self.executor = ThreadPoolExecutor(max_workers=cfg.num_threads) def generate(self, models, sample, **unused): """Generate a batch of inferences.""" # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions, padding = self.get_emissions(models, encoder_input) return self.decode(emissions, padding) def get_emissions(self, models, encoder_input): """Run encoder and normalize emissions""" model = models[0] all_encoder_out = [m(**encoder_input) for m in models] if len(all_encoder_out) > 1: if "encoder_out" in all_encoder_out[0]: encoder_out = { "encoder_out": sum(e["encoder_out"] for e in all_encoder_out) / len(all_encoder_out), "encoder_padding_mask": all_encoder_out[0]["encoder_padding_mask"], } padding = encoder_out["encoder_padding_mask"] else: encoder_out = { "logits": sum(e["logits"] for e in all_encoder_out) / len(all_encoder_out), "padding_mask": all_encoder_out[0]["padding_mask"], } padding = encoder_out["padding_mask"] else: encoder_out = all_encoder_out[0] padding = ( encoder_out["padding_mask"] if "padding_mask" in encoder_out else encoder_out["encoder_padding_mask"] ) if hasattr(model, "get_logits"): emissions = model.get_logits(encoder_out, normalize=True) else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return ( emissions.cpu().float().transpose(0, 1), padding.cpu() if padding is not None and padding.any() else None, ) def decode_one(self, logits, padding): from kaldi.matrix import Matrix decoder = self.dec_cls(self.fst, self.decoder_options) asr = self.rec_cls( decoder, self.symbol_table, acoustic_scale=self.acoustic_scale ) if padding is not None: logits = logits[~padding] mat = Matrix(logits.numpy()) out = asr.decode(mat) if self.nbest > 1: from kaldi.fstext import shortestpath from kaldi.fstext.utils import ( convert_compact_lattice_to_lattice, convert_lattice_to_std, convert_nbest_to_list, get_linear_symbol_sequence, ) lat = out["lattice"] sp = shortestpath(lat, nshortest=self.nbest) sp = convert_compact_lattice_to_lattice(sp) sp = convert_lattice_to_std(sp) seq = convert_nbest_to_list(sp) results = [] for s in seq: _, o, w = get_linear_symbol_sequence(s) words = list(self.output_symbols[z] for z in o) results.append( { "tokens": words, "words": words, "score": w.value, "emissions": logits, } ) return results else: words = out["text"].split() return [ { "tokens": words, "words": words, "score": out["likelihood"], "emissions": logits, } ] def decode(self, emissions, padding): if padding is None: padding = [None] * len(emissions) ret = list( map( lambda e, p: self.executor.submit(self.decode_one, e, p), emissions, padding, ) ) return ret def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda): task = tasks.setup_task(cfg.fairseq.task) saved_cfg.task.labels = cfg.fairseq.task.labels task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task) # Set dictionary tgt_dict = task.target_dictionary logger.info( "| {} {} {} examples".format( cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset, len(task.dataset(cfg.fairseq.dataset.gen_subset)), ) ) # Load dataset (possibly sharded) itr = get_dataset_itr(cfg, task) # Initialize generator gen_timer = StopwatchMeter() def build_generator(cfg: UnsupGenerateConfig): w2l_decoder = cfg.w2l_decoder if w2l_decoder == DecoderType.VITERBI: from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(cfg, task.target_dictionary) elif w2l_decoder == DecoderType.KENLM: from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder return W2lKenLMDecoder(cfg, task.target_dictionary) elif w2l_decoder == DecoderType.FAIRSEQ: from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder return W2lFairseqLMDecoder(cfg, task.target_dictionary) elif w2l_decoder == DecoderType.KALDI: from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder assert cfg.kaldi_decoder_config is not None return KaldiDecoder( cfg.kaldi_decoder_config, cfg.beam, ) else: raise NotImplementedError( "only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found " + str(w2l_decoder) ) generator = build_generator(cfg) kenlm = None fairseq_lm = None if cfg.lm_model is not None: import kenlm kenlm = kenlm.Model(cfg.lm_model) num_sentences = 0 if cfg.results_path is not None and not os.path.exists(cfg.results_path): os.makedirs(cfg.results_path) res_files = prepare_result_files(cfg) errs_t = 0 lengths_hyp_t = 0 lengths_hyp_unit_t = 0 lengths_t = 0 count = 0 num_feats = 0 all_hyp_pieces = [] all_hyp_words = [] num_symbols = ( len([s for s in tgt_dict.symbols if not s.startswith("madeup")]) - tgt_dict.nspecial ) targets = None if cfg.targets is not None: tgt_path = os.path.join( cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets ) if os.path.exists(tgt_path): with open(tgt_path, "r") as f: targets = f.read().splitlines() viterbi_transcript = None if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0: logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}") with open(cfg.viterbi_transcript, "r") as vf: viterbi_transcript = vf.readlines() viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript] gen_timer.start() start = 0 end = len(itr) hypo_futures = None if cfg.w2l_decoder == DecoderType.KALDI: logger.info("Extracting features") hypo_futures = [] samples = [] with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t: for i, sample in enumerate(t): if "net_input" not in sample or i < start or i >= end: continue if "padding_mask" not in sample["net_input"]: sample["net_input"]["padding_mask"] = None hypos, num_feats = gen_hypos( generator, models, num_feats, sample, task, use_cuda ) hypo_futures.append(hypos) samples.append(sample) itr = list(zip(hypo_futures, samples)) start = 0 end = len(itr) logger.info("Finished extracting features") with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t: for i, sample in enumerate(t): if i < start or i >= end: continue if hypo_futures is not None: hypos, sample = sample hypos = [h.result() for h in hypos] else: if "net_input" not in sample: continue hypos, num_feats = gen_hypos( generator, models, num_feats, sample, task, use_cuda ) for i, sample_id in enumerate(sample["id"].tolist()): if targets is not None: target_tokens = targets[sample_id] elif "target" in sample or "target_label" in sample: toks = ( sample["target"][i, :] if "target_label" not in sample else sample["target_label"][i, :] ) target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu() else: target_tokens = None # Process top predictions ( errs, length_hyp, length, hyp_pieces, hyp_words, ) = process_predictions( cfg, hypos[i], tgt_dict, target_tokens, res_files, ) errs_t += errs lengths_hyp_t += length_hyp lengths_hyp_unit_t += ( len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words) ) lengths_t += length count += 1 all_hyp_pieces.append(hyp_pieces) all_hyp_words.append(hyp_words) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) lm_score_sum = 0 if kenlm is not None: if cfg.unit_lm: lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces) else: lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words) elif fairseq_lm is not None: lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0]) vt_err_t = 0 vt_length_t = 0 if viterbi_transcript is not None: unit_hyps = [] if cfg.targets is not None and cfg.lexicon is not None: lex = {} with open(cfg.lexicon, "r") as lf: for line in lf: items = line.rstrip().split() lex[items[0]] = items[1:] for h in all_hyp_pieces: hyp_ws = [] for w in h.split(): assert w in lex, w hyp_ws.extend(lex[w]) unit_hyps.append(hyp_ws) else: unit_hyps.extend([h.split() for h in all_hyp_words]) vt_err_t = sum( editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps) ) vt_length_t = sum(len(h) for h in viterbi_transcript) if res_files is not None: for r in res_files.values(): r.close() gen_timer.stop(lengths_hyp_t) return GenResult( count, errs_t, gen_timer, lengths_hyp_unit_t, lengths_hyp_t, lengths_t, lm_score_sum, num_feats, num_sentences, num_symbols, vt_err_t, vt_length_t, )
null
182,225
import torch class ScalarBias(torch.autograd.Function): def forward(ctx, input, dim, bias_init): def backward(ctx, grad): def scalar_bias(input, dim, bias_init=0): return ScalarBias.apply(input, dim, bias_init)
null
182,237
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.unfold import unfold1d class LightweightConv1dTBC(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, bias=False, ): def reset_parameters(self): def forward(self, x, incremental_state=None, unfold=False): def prepare_for_onnx_export_(self): def _forward_unfolded(self, x, incremental_state): def _forward_expanded(self, x, incremental_state): def reorder_incremental_state(self, incremental_state, new_order): def _get_input_buffer(self, incremental_state): def _set_input_buffer(self, incremental_state, new_buffer): def extra_repr(self): def LightweightConv( input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, bias=False, ): if torch.cuda.is_available(): try: from fairseq.modules.lightconv_layer import LightconvLayer return LightconvLayer( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias, ) except ImportError as e: print(e) return LightweightConv1dTBC( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias, )
null
182,242
from typing import Optional, Tuple import torch import torch.nn as nn from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ The provided code snippet includes necessary dependencies for implementing the `init_bert_params` function. Write a Python function `def init_bert_params(module)` to solve the following problem: Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). Here is the function: def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ def normal_(data): # with FSDP, module params will be on CUDA, so we cast them back to CPU # so that the RNG is consistent with and without FSDP data.copy_( data.cpu().normal_(mean=0.0, std=0.02).to(data.device) ) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data)
Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated).
182,246
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import reduce, wraps from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention def exists(val): def cache_fn(f): cache = None @wraps(f) def cached_fn(*args, **kwargs): nonlocal cache if exists(cache): return cache cache = f(*args, **kwargs) return cache return cached_fn
null
182,247
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import reduce, wraps from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention def to(t): return {'device': t.device, 'dtype': t.dtype}
null
182,249
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import reduce, wraps from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention def expand_dim(t, dim, k): t = t.unsqueeze(dim) expand_shape = [-1] * len(t.shape) expand_shape[dim] = k return t.expand(*expand_shape) def batched_index_select(values, indices): last_dim = values.shape[-1] return values.gather(2, expand_dim(indices, -1, last_dim))
null
182,253
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import reduce, wraps from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention def reshape_dim(t, dim, split_dims): shape = list(t.shape) num_dims = len(shape) dim = (dim + num_dims) % num_dims shape[dim:dim+1] = split_dims return t.reshape(shape)
null
182,257
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import reduce, wraps from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention def rotate_every_two(x): x = rearrange(x, '... (d j) -> ... d j', j=2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return rearrange(x, '... d j -> ... (d j)') def apply_rotary_pos_emb(q, k, sinu_pos): sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j=2) sin, cos = sinu_pos.unbind(dim=-2) sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j=2), (sin, cos)) q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k)) return q, k
null
182,258
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import reduce, wraps from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention def find_modules(nn_module, type): return [module for module in nn_module.modules() if isinstance(module, type)] class Kmeans(nn.Module): def __init__(self, num_heads, head_dim, num_clusters, ema_decay=0.999, commitment=1e-4): super().__init__() self.commitment = commitment self.ema_decay = ema_decay self.register_buffer('means', torch.randn(num_heads, num_clusters, head_dim)) self.register_buffer('initted', torch.tensor(False)) self.num_new_means = 0 self.new_means = None def init(self, x): if self.initted: return _, h, _, d, device, _ = *x.shape, x.device, x.dtype num_clusters = self.means.shape[1] means = x.transpose(0, 1).contiguous().view(h, -1, d) num_samples = means.shape[1] if num_samples >= num_clusters: indices = torch.randperm(num_samples, device=device)[:num_clusters] else: indices = torch.randint(0, num_samples, (num_clusters,), device=device) means = means[:, indices] for _ in range(KMEAN_INIT_ITERS): means = kmeans_iter(x, means) self.num_new_means = 0 self.means.data.copy_(means) self.initted.data.copy_(torch.tensor(True)) def update(self, new_means=None): new_means = default(new_means, self.new_means) assert exists(new_means), 'new kmeans has not been supplied' ema_inplace(self.means, new_means, self.ema_decay) del self.new_means self.new_means = None self.num_new_means = 0 def forward(self, x, update_means=False): self.init(x) b, dtype = x.shape[0], x.dtype means = self.means.type(dtype) x = F.normalize(x, 2, dim=-1).type(dtype) with torch.no_grad(): dists, buckets = dists_and_buckets(x, means) routed_means = batched_index_select(expand_dim(means, 0, b), buckets) loss = F.mse_loss(x, routed_means) * self.commitment if update_means: with torch.no_grad(): means = kmeans_iter(x, means, buckets) self.new_means = ema(self.new_means, means, self.num_new_means / (self.num_new_means + 1)) self.num_new_means += 1 return dists, loss def update_kmeans_on_backwards(module): module.kmean_modules = find_modules(module, Kmeans) def hook(_, grad_in, grad_out): for m in module.kmean_modules: m.update() return module.register_backward_hook(hook)
null
182,265
import logging import re from operator import attrgetter, itemgetter import torch import numpy as np import torch.distributed as dist import torch.nn as nn from .modules import PQConv2d, PQEmbedding, PQLinear from .pq import PQ def get_layers(model, filter_regexp, remove_weights=False): """ Filters out the layers according to a regexp. Note that we omit biases. Args: - model: a nn.Module - filter_regexp: a regexp to filter the layers to keep according to their name in model.named_parameters(). For instance, the regexp: down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) is keeping blocks down_layers from 1 to 6, and inside each block is keeping conv1, conv2 and identity.conv. Remarks: - We add (module\\.)? at the beginning of the regexp to account for the possible use of nn.parallel.DataParallel """ # get all parameter names all_layers = map(itemgetter(0), model.named_parameters()) # remove biases all_layers = filter(lambda x: "bias" not in x, all_layers) # remove .weight in all other names (or .weight_orig is spectral norm) all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names if remove_weights: all_layers = map(lambda x: x.replace(".weights", ""), all_layers) all_layers = map(lambda x: x.replace(".weight", ""), all_layers) # return filtered layers filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" r = re.compile(filter_regexp) return list(filter(r.match, all_layers)) def get_param(module, layer_name, param_config): """ Given a quantization configuration, get the right parameter for the module to be quantized. Args: - module: a nn.Module - layer_name: the name of the layer - param_config: a dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. Remarks: - if 'fuzzy_name' is passed as a parameter, layers whose layer_name include 'fuzzy_name' will be assigned the given parameter. In the following example, conv.expand layers will have a block size of 9 while conv.reduce will have a block size of 4 and all other layers will have a block size of 2. { 'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}), 'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4}) } """ layer_type = module.__class__.__name__ if layer_type not in param_config: raise KeyError(f"Layer type {layer_type} not in config for layer {module}") feature, params = param_config[module.__class__.__name__] if feature != "fuzzy_name": feature_value = str(getattr(module, feature)) if feature_value not in params: if "*" in params: feature_value = "*" else: raise KeyError( f"{feature}={feature_value} not in config for layer {module}" ) else: feature_values = [name for name in params if name in layer_name] if len(feature_values) == 0: if "*" in params: feature_value = "*" else: raise KeyError(f"name={layer_name} not in config for {module}") else: feature_value = feature_values[0] return params[feature_value] def attrsetter(*items): def resolve_attr(obj, attr): attrs = attr.split(".") head = attrs[:-1] tail = attrs[-1] for name in head: obj = getattr(obj, name) return obj, tail def g(obj, val): for attr in items: resolved_obj, resolved_attr = resolve_attr(obj, attr) setattr(resolved_obj, resolved_attr, val) return g class PQ(EM): """ Quantizes the layer weights W with the standard Product Quantization technique. This learns a codebook of codewords or centroids of size block_size from W. For further reference on using PQ to quantize neural networks, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks", Stock et al., ICLR 2020. PQ is performed in two steps: (1) The matrix W (weights or fully-connected or convolutional layer) is reshaped to (block_size, -1). - If W is fully-connected (2D), its columns are split into blocks of size block_size. - If W is convolutional (4D), its filters are split along the spatial dimension. (2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix. Args: - W: weight matrix to quantize of size (in_features x out_features) - block_size: size of the blocks (subvectors) - n_centroids: number of centroids - n_iter: number of k-means iterations - eps: for cluster reassignment when an empty cluster is found - max_tentatives for cluster reassignment when an empty cluster is found - verbose: print information after each iteration Remarks: - block_size be compatible with the shape of W """ def __init__( self, W, block_size, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True, ): self.block_size = block_size W_reshaped = self._reshape(W) super(PQ, self).__init__( W_reshaped, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) def _reshape(self, W): """ Reshapes the matrix W as expained in step (1). """ # fully connected: by convention the weight has size out_features x in_features if len(W.size()) == 2: self.out_features, self.in_features = W.size() assert ( self.in_features % self.block_size == 0 ), "Linear: n_blocks must be a multiple of in_features" return ( W.reshape(self.out_features, -1, self.block_size) .permute(2, 1, 0) .flatten(1, 2) ) # convolutional: we reshape along the spatial dimension elif len(W.size()) == 4: self.out_channels, self.in_channels, self.k_h, self.k_w = W.size() assert ( self.in_channels * self.k_h * self.k_w ) % self.block_size == 0, ( "Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w" ) return ( W.reshape(self.out_channels, -1, self.block_size) .permute(2, 1, 0) .flatten(1, 2) ) # not implemented else: raise NotImplementedError(W.size()) def encode(self): """ Performs self.n_iter EM steps. """ self.initialize_centroids() for i in range(self.n_iter): try: self.step(i) except EmptyClusterResolveError: break def decode(self): """ Returns the encoded full weight matrix. Must be called after the encode function. """ # fully connected case if "k_h" not in self.__dict__: return ( self.centroids[self.assignments] .reshape(-1, self.out_features, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) # convolutional case else: return ( self.centroids[self.assignments] .reshape(-1, self.out_channels, self.block_size) .permute(1, 0, 2) .reshape(self.out_channels, self.in_channels, self.k_h, self.k_w) ) The provided code snippet includes necessary dependencies for implementing the `quantize_model_` function. Write a Python function `def quantize_model_( model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-6, max_tentatives=100, remove_weights=False, verbose=True, state_dict=None, )` to solve the following problem: Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step] Here is the function: def quantize_model_( model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-6, max_tentatives=100, remove_weights=False, verbose=True, state_dict=None, ): """ Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step] """ quantized_layers = get_layers(model, layers_to_quantize[step], remove_weights=remove_weights) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) verbose = verbose and is_master_process # get block size and centroids module = attrgetter(layer)(model) block_size = get_param(module, layer, block_sizes_config) n_centroids = get_param(module, layer, n_centroids_config) if verbose: logging.info( f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids" ) # quantize layer weight = module.weight.data.clone() is_bias = "bias" in [x[0] for x in module.named_parameters()] bias = module.bias.data.clone() if is_bias else None quantizer = PQ( weight, block_size, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) # quantization performed on all GPUs with same seed quantizer.encode() centroids = quantizer.centroids.contiguous() assignments = quantizer.assignments.contiguous() # If n_iter = 0 and state_dict is provided, then # we initialize random assignments and centroids to # random values of the appropriate dimensions # because the quantized model parameters will # overwritten by the state_dict later on. if n_iter == 0 and state_dict: # Initialize random centroids of the correct size centroids = torch.rand(centroids.size()) centroids.cuda() # Get counts and assignment keys from layer in loaded checkpoint. counts_key = layer+"."+"counts" assignment_key = layer+"."+"assignments" # Get number of different bins to include. counts = list(state_dict[counts_key].shape)[0] print(layer) print(state_dict[counts_key]) print(counts) # Initialize random assignments of the correct size # with an appropriate number of bins. num_assignments = list(state_dict[assignment_key].shape)[0] num_extra = num_assignments - counts print(num_assignments) print(num_extra) assignments_bins = torch.arange(counts) assignments_rand = torch.randint(0, counts-1, (num_extra, )) assignments = torch.cat((assignments_bins, assignments_rand), 0) # assignments = assignments.type(torch.IntTensor) assignments.cuda() print("assignments") print(assignments) # broadcast results to make sure weights are up-to-date if dist.is_initialized(): dist.broadcast(centroids, 0) dist.broadcast(assignments, 0) # instantiate the quantized counterpart if isinstance(module, nn.Linear): out_features, in_features = map( lambda k: module.__dict__[k], ["out_features", "in_features"] ) quantized_module = PQLinear( centroids, assignments, bias, in_features, out_features ) elif isinstance(module, nn.Embedding): num_embeddings, embedding_dim = map( lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"] ) quantized_module = PQEmbedding( centroids, assignments, num_embeddings, embedding_dim ) elif isinstance(module, nn.Conv2d): out_channels, in_channels, kernel_size = map( lambda k: module.__dict__[k], ["out_channels", "in_channels", "kernel_size"], ) stride, padding, dilation, groups, padding_mode = map( lambda k: module.__dict__[k], ["stride", "padding", "dilation", "groups", "padding_mode"], ) quantized_module = PQConv2d( centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode, ) else: raise ValueError(f"Module {module} not yet supported for quantization") # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # update statistics size_tracker.update(weight, block_size, n_centroids) # return name of quantized layers return quantized_layers
Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step]
182,267
import logging from operator import attrgetter import torch.distributed as dist import torch.nn as nn from ..pq.utils import attrsetter, get_layers from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d} def get_layers(model, filter_regexp, remove_weights=False): """ Filters out the layers according to a regexp. Note that we omit biases. Args: - model: a nn.Module - filter_regexp: a regexp to filter the layers to keep according to their name in model.named_parameters(). For instance, the regexp: down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) is keeping blocks down_layers from 1 to 6, and inside each block is keeping conv1, conv2 and identity.conv. Remarks: - We add (module\\.)? at the beginning of the regexp to account for the possible use of nn.parallel.DataParallel """ # get all parameter names all_layers = map(itemgetter(0), model.named_parameters()) # remove biases all_layers = filter(lambda x: "bias" not in x, all_layers) # remove .weight in all other names (or .weight_orig is spectral norm) all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names if remove_weights: all_layers = map(lambda x: x.replace(".weights", ""), all_layers) all_layers = map(lambda x: x.replace(".weight", ""), all_layers) # return filtered layers filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" r = re.compile(filter_regexp) return list(filter(r.match, all_layers)) def attrsetter(*items): def resolve_attr(obj, attr): attrs = attr.split(".") head = attrs[:-1] tail = attrs[-1] for name in head: obj = getattr(obj, name) return obj, tail def g(obj, val): for attr in items: resolved_obj, resolved_attr = resolve_attr(obj, attr) setattr(resolved_obj, resolved_attr, val) return g The provided code snippet includes necessary dependencies for implementing the `quantize_model_` function. Write a Python function `def quantize_model_(model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False)` to solve the following problem: Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps Here is the function: def quantize_model_(model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False): """ Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps """ # quantize all layers # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) # recover module module = attrgetter(layer)(model) if is_master_process: logging.info( f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}" ) # quantization params q_params = { "p": p, "update_step": update_step, "bits": bits, "method": method, "counter": 0, } # instantiate the quantized counterpart if isinstance(module, tuple(MAPPING.keys())): QuantizedModule = MAPPING[module.__class__] quantized_module = QuantizedModule.__new__(QuantizedModule) params = module.__dict__ params.update(q_params) quantized_module.__dict__.update(params) else: if is_master_process: logging.info(f"Module {module} not yet supported for quantization") continue # activation quantization a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method) # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # return name of quantized layers return quantized_layers
Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps
182,269
import torch def quantize(w, scale, zero_point, bits=8): # In the default behavior, max_val = 255. max_val = 2 ** bits - 1 return ( torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point ) * scale def emulate_int8_histogram(w, scale=None, zero_point=None, bits=8): if scale is None: obs = torch.ao.quantization.observer.HistogramObserver() obs.to(device=w.device) _ = obs(w.float()) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point
null
182,270
import torch def quantize(w, scale, zero_point, bits=8): # In the default behavior, max_val = 255. max_val = 2 ** bits - 1 return ( torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point ) * scale def emulate_int8_channel(w, scale=None, zero_point=None, bits=8): if scale is None: obs = torch.ao.quantization.observer.PerChannelMinMaxObserver( ch_axis=-1, qscheme=torch.per_channel_symmetric ) obs.to(device=w.device) _ = obs(w) scale, zero_point, ch_axis = obs.get_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point
null
182,271
import torch def quantize(w, scale, zero_point, bits=8): # In the default behavior, max_val = 255. max_val = 2 ** bits - 1 return ( torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point ) * scale def emulate_int8_tensor(w, scale=None, zero_point=None, bits=8): if scale is None: obs = torch.ao.quantization.observer.MinMaxObserver() obs.to(device=w.device) _ = obs(w) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point
null
182,273
import ast import collections import contextlib import logging import numpy as np import os import re import time import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.data import data_utils from fairseq.dataclass.configs import CheckpointConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict, OmegaConf logger = logging.getLogger(__name__) def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): from fairseq import meters # only one worker should attempt to create the required dir if trainer.data_parallel_rank == 0: os.makedirs(cfg.save_dir, exist_ok=True) prev_best = getattr(save_checkpoint, "best", val_loss) if val_loss is not None: best_function = max if cfg.maximize_best_checkpoint_metric else min save_checkpoint.best = best_function(val_loss, prev_best) if cfg.no_save: return trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if hasattr(save_checkpoint, "best"): extra_state.update({"best": save_checkpoint.best}) if getattr(epoch_itr, "sharded_checkpoint", False): local_state_dict = extra_state["train_iterator"] all_state_dicts = distributed_utils.all_gather_list( local_state_dict, max_size=getattr(trainer.cfg.common, "all_gather_list_size", 16384), group=trainer.data_parallel_process_group, ) extra_state["train_iterator"] = all_state_dicts if not trainer.should_save_checkpoint_on_current_rank: if trainer.always_call_state_dict_during_save_checkpoint: trainer.state_dict() return write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") def is_better(a, b): return a >= b if cfg.maximize_best_checkpoint_metric else a <= b suffix = trainer.checkpoint_suffix checkpoint_conds = collections.OrderedDict() checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 ) checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not end_of_epoch and cfg.save_interval_updates > 0 and updates % cfg.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) if val_loss is not None and cfg.keep_best_checkpoints > 0: worst_best = getattr(save_checkpoint, "best", None) chkpts = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( cfg.best_checkpoint_metric, suffix ), ) if len(chkpts) > 0: p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0] worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), "")) # add random digits to resolve ties with data_utils.numpy_seed(epoch, updates, val_loss): rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints) checkpoint_conds[ "checkpoint.best_{}_{:.3f}{}{}.pt".format( cfg.best_checkpoint_metric, val_loss, rand_sfx, suffix ) ] = worst_best is None or is_better(val_loss, worst_best) checkpoint_conds[ "checkpoint_last{}.pt".format(suffix) ] = not cfg.no_last_checkpoints checkpoints = [ os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond ] if len(checkpoints) > 0: trainer.save_checkpoint(checkpoints[0], extra_state) for cp in checkpoints[1:]: if cfg.write_checkpoints_asynchronously: # TODO[ioPath]: Need to implement a delayed asynchronous # file copying/moving feature. logger.warning( f"ioPath is not copying {checkpoints[0]} to {cp} " "since async write mode is on." ) else: assert PathManager.copy( checkpoints[0], cp, overwrite=True ), f"Failed to copy {checkpoints[0]} to {cp}" write_timer.stop() logger.info( "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if not end_of_epoch and cfg.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order if cfg.keep_interval_updates_pattern == -1: checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix) ) else: checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix), keep_match=True, ) checkpoints = [ x[0] for x in checkpoints if x[1] % cfg.keep_interval_updates_pattern != 0 ] for old_chk in checkpoints[cfg.keep_interval_updates :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) if cfg.keep_last_epochs > 0: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix) ) for old_chk in checkpoints[cfg.keep_last_epochs :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) if cfg.keep_best_checkpoints > 0: # only keep the best N checkpoints according to validation metric checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( cfg.best_checkpoint_metric, suffix ), ) if not cfg.maximize_best_checkpoint_metric: checkpoints = checkpoints[::-1] for old_chk in checkpoints[cfg.keep_best_checkpoints :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) class CheckpointConfig(FairseqDataclass): save_dir: str = field( default="checkpoints", metadata={"help": "path to save checkpoints"} ) restore_file: str = field( default="checkpoint_last.pt", metadata={ "help": "filename from which to load checkpoint " "(default: <save-dir>/checkpoint_last.pt" }, ) finetune_from_model: Optional[str] = field( default=None, metadata={ "help": "finetune from a pretrained model; note that meters and lr scheduler will be reset" }, ) reset_dataloader: bool = field( default=False, metadata={ "help": "if set, does not reload dataloader state from the checkpoint" }, ) reset_lr_scheduler: bool = field( default=False, metadata={ "help": "if set, does not load lr scheduler state from the checkpoint" }, ) reset_meters: bool = field( default=False, metadata={"help": "if set, does not load meters from the checkpoint"}, ) reset_optimizer: bool = field( default=False, metadata={"help": "if set, does not load optimizer state from the checkpoint"}, ) optimizer_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override optimizer args when loading a checkpoint" }, ) save_interval: int = field( default=1, metadata={"help": "save a checkpoint every N epochs"} ) save_interval_updates: int = field( default=0, metadata={"help": "save a checkpoint (and validate) every N updates"} ) keep_interval_updates: int = field( default=-1, metadata={ "help": "keep the last N checkpoints saved with --save-interval-updates" }, ) keep_interval_updates_pattern: int = field( default=-1, metadata={ "help": "when used with --keep-interval-updates, skips deleting " "any checkpoints with update X where " "X %% keep_interval_updates_pattern == 0" }, ) keep_last_epochs: int = field( default=-1, metadata={"help": "keep last N epoch checkpoints"} ) keep_best_checkpoints: int = field( default=-1, metadata={"help": "keep best N checkpoints based on scores"} ) no_save: bool = field( default=False, metadata={"help": "don't save models or checkpoints"} ) no_epoch_checkpoints: bool = field( default=False, metadata={"help": "only store last and best checkpoints"} ) no_last_checkpoints: bool = field( default=False, metadata={"help": "don't store last checkpoints"} ) no_save_optimizer_state: bool = field( default=False, metadata={"help": "don't save optimizer-state as part of checkpoint"}, ) best_checkpoint_metric: str = field( default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'} ) maximize_best_checkpoint_metric: bool = field( default=False, metadata={ "help": 'select the largest metric value for saving "best" checkpoints' }, ) patience: int = field( default=-1, metadata={ "help": ( "early stop training if valid performance doesn't " "improve for N consecutive validation runs; note " "that this is influenced by --validate-interval" ) }, ) checkpoint_suffix: str = field( default="", metadata={"help": "suffix to add to the checkpoint file name"} ) checkpoint_shard_count: int = field( default=1, metadata={ "help": "Number of shards containing the checkpoint - " "if the checkpoint is over 300GB, it is preferable " "to split it into shards to prevent OOM on CPU while loading " "the checkpoint" }, ) load_checkpoint_on_all_dp_ranks: bool = field( default=False, metadata={ "help": "load checkpoints on all data parallel devices " "(default: only load on rank 0 and broadcast to other devices)" }, ) write_checkpoints_asynchronously: bool = field( default=False, metadata={ "help": ( "Write checkpoints asynchronously in a separate " "thread. NOTE: This feature is currently being tested." ), "argparse_alias": "--save-async", }, ) model_parallel_size: int = II("common.model_parallel_size") class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False The provided code snippet includes necessary dependencies for implementing the `load_checkpoint` function. Write a Python function `def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args)` to solve the following problem: Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. Here is the function: def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): """ Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. """ reset_optimizer = cfg.reset_optimizer reset_lr_scheduler = cfg.reset_lr_scheduler optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) reset_meters = cfg.reset_meters reset_dataloader = cfg.reset_dataloader if cfg.finetune_from_model is not None and ( reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader ): raise ValueError( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" ) suffix = trainer.checkpoint_suffix if ( cfg.restore_file == "checkpoint_last.pt" ): # default value of restore_file is 'checkpoint_last.pt' checkpoint_path = os.path.join( cfg.save_dir, "checkpoint_last{}.pt".format(suffix) ) first_launch = not PathManager.exists(checkpoint_path) if cfg.finetune_from_model is not None and first_launch: # if there is no last checkpoint to restore, start the finetune from pretrained model # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. if PathManager.exists(cfg.finetune_from_model): checkpoint_path = cfg.finetune_from_model reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info( f"loading pretrained model from {checkpoint_path}: " "optimizer, lr scheduler, meters, dataloader will be reset" ) else: raise ValueError( f"--funetune-from-model {cfg.finetune_from_model} does not exist" ) elif suffix is not None: checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") else: checkpoint_path = cfg.restore_file if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: raise ValueError( "--finetune-from-model and --restore-file (non-default value) " "can not be specified together: " + str(cfg) ) extra_state = trainer.load_checkpoint( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters, ) if ( extra_state is not None and "best" in extra_state and not reset_optimizer and not reset_meters ): save_checkpoint.best = extra_state["best"] if extra_state is not None and not reset_dataloader: # restore iterator from checkpoint itr_state = extra_state["train_iterator"] epoch_itr = trainer.get_train_iterator( epoch=itr_state.get("epoch", 1), load_dataset=True, **passthrough_args ) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator( epoch=1, load_dataset=True, **passthrough_args ) trainer.lr_step(epoch_itr.epoch) return extra_state, epoch_itr
Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``.
182,275
import ast import collections import contextlib import logging import numpy as np import os import re import time import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.data import data_utils from fairseq.dataclass.configs import CheckpointConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict, OmegaConf def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): """Loads a checkpoint to CPU (with upgrading for backward compatibility). If doing single-GPU training or if the checkpoint is only being loaded by at most one process on each node (current default behavior is for only rank 0 to read the checkpoint from disk), load_on_all_ranks should be False to avoid errors from torch.distributed not having been initialized or torch.distributed.barrier() hanging. If all processes on each node may be loading the checkpoint simultaneously, load_on_all_ranks should be set to True to avoid I/O conflicts. There's currently no support for > 1 but < all processes loading the checkpoint on each node. """ local_path = PathManager.get_local_path(path) # The locally cached file returned by get_local_path() may be stale for # remote files that are periodically updated/overwritten (ex: # checkpoint_last.pt) - so we remove the local copy, sync across processes # (if needed), and then download a fresh copy. if local_path != path and PathManager.path_requires_pathmanager(path): try: os.remove(local_path) except FileNotFoundError: # With potentially multiple processes removing the same file, the # file being missing is benign (missing_ok isn't available until # Python 3.8). pass if load_on_all_ranks: torch.distributed.barrier() local_path = PathManager.get_local_path(path) with open(local_path, "rb") as f: state = torch.load(f, map_location=torch.device("cpu")) if "args" in state and state["args"] is not None and arg_overrides is not None: args = state["args"] for arg_name, arg_val in arg_overrides.items(): setattr(args, arg_name, arg_val) if "cfg" in state and state["cfg"] is not None: # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils old_primitive = _utils.is_primitive_type _utils.is_primitive_type = lambda _: True state["cfg"] = OmegaConf.create(state["cfg"]) _utils.is_primitive_type = old_primitive OmegaConf.set_struct(state["cfg"], True) if arg_overrides is not None: overwrite_args_by_name(state["cfg"], arg_overrides) state = _upgrade_state_dict(state) return state class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False The provided code snippet includes necessary dependencies for implementing the `load_pretrained_component_from_model` function. Write a Python function `def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str )` to solve the following problem: Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. Here is the function: def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str ): """ Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. """ if not PathManager.exists(checkpoint): raise IOError("Model file not found: {}".format(checkpoint)) state = load_checkpoint_to_cpu(checkpoint) if isinstance(component, FairseqEncoder): component_type = "encoder" elif isinstance(component, FairseqDecoder): component_type = "decoder" else: raise ValueError( "component to load must be either a FairseqEncoder or " "FairseqDecoder. Loading other component types are not supported." ) component_state_dict = OrderedDict() for key in state["model"].keys(): if key.startswith(component_type): # encoder.input_layers.0.0.weight --> input_layers.0.0.weight component_subkey = key[len(component_type) + 1 :] component_state_dict[component_subkey] = state["model"][key] component.load_state_dict(component_state_dict, strict=True) return component
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file.
182,277
import ast import collections import contextlib import logging import numpy as np import os import re import time import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.data import data_utils from fairseq.dataclass.configs import CheckpointConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict, OmegaConf class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False The provided code snippet includes necessary dependencies for implementing the `load_ema_from_checkpoint` function. Write a Python function `def load_ema_from_checkpoint(fpath)` to solve the following problem: Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. Here is the function: def load_ema_from_checkpoint(fpath): """Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() new_state = None with PathManager.open(fpath, 'rb') as f: new_state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, 'cpu') ), ) # EMA model is stored in a separate "extra state" model_params = new_state['extra_state']['ema'] for key in list(model_params.keys()): p = model_params[key] if isinstance(p, torch.HalfTensor): p = p.float() if key not in params_dict: params_dict[key] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: raise ValueError("Key {} is repeated in EMA model params.".format(key)) if len(params_dict) == 0: raise ValueError( f"Input checkpoint path '{fpath}' does not contain " "ema model weights, is this model trained with EMA?" ) new_state['model'] = params_dict return new_state
Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors.
182,280
import argparse import contextlib import copy import importlib import logging import os import sys import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional, TYPE_CHECKING import torch import torch.nn.functional as F from torch import Tensor import collections def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, collections.OrderedDict): # OrderedDict has attributes that needs to be preserved od = collections.OrderedDict((key, _apply(value)) for key, value in x.items()) od.__dict__ = x.__dict__ return od elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cuda(sample, device=None): device = device or torch.cuda.current_device() def _move_to_cuda(tensor): # non_blocking is ignored if tensor is not pinned, so we can always set # to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620) return tensor.to(device=device, non_blocking=True) return apply_to_sample(_move_to_cuda, sample)
null
182,281
import argparse import contextlib import copy import importlib import logging import os import sys import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional, TYPE_CHECKING import torch import torch.nn.functional as F from torch import Tensor import collections def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, collections.OrderedDict): # OrderedDict has attributes that needs to be preserved od = collections.OrderedDict((key, _apply(value)) for key, value in x.items()) od.__dict__ = x.__dict__ return od elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cpu(sample): def _move_to_cpu(tensor): # PyTorch has poor support for half tensors (float16) on CPU. # Move any such tensors to float32. if tensor.dtype in {torch.bfloat16, torch.float16}: tensor = tensor.to(dtype=torch.float32) return tensor.cpu() return apply_to_sample(_move_to_cpu, sample)
null
182,282
import argparse import contextlib import copy import importlib import logging import os import sys import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional, TYPE_CHECKING import torch import torch.nn.functional as F from torch import Tensor import collections try: import torch_xla.core.xla_model as xm except ImportError: xm = None def apply_to_sample(f, sample): def move_to_tpu(sample): import torch_xla.core.xla_model as xm device = xm.xla_device() def _move_to_tpu(tensor): return tensor.to(device) return apply_to_sample(_move_to_tpu, sample)
null
182,300
import argparse import contextlib import copy import importlib import logging import os import sys import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional, TYPE_CHECKING import torch import torch.nn.functional as F from torch import Tensor import collections def deprecation_warning(message, stacklevel=3): # don't use DeprecationWarning, since it's ignored by default warnings.warn(message, stacklevel=stacklevel) def relu_squared(x: torch.Tensor): return F.relu(x).pow(2) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x) The provided code snippet includes necessary dependencies for implementing the `get_activation_fn` function. Write a Python function `def get_activation_fn(activation: str) -> Callable` to solve the following problem: Returns the activation function corresponding to `activation` Here is the function: def get_activation_fn(activation: str) -> Callable: """Returns the activation function corresponding to `activation`""" from fairseq.modules import gelu, gelu_accurate if activation == "relu": return F.relu elif activation == "relu_squared": return relu_squared elif activation == "gelu": return gelu elif activation == "gelu_fast": deprecation_warning( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x else: raise RuntimeError("--activation-fn {} not supported".format(activation))
Returns the activation function corresponding to `activation`
182,332
import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.models.ema import build_ema from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler from omegaconf import OmegaConf def _catalog_shared_params(module, memo=None, prefix=""): if memo is None: first_call = True memo = {} else: first_call = False for name, param in module._parameters.items(): param_prefix = prefix + ("." if prefix else "") + name if param not in memo: memo[param] = [] memo[param].append(param_prefix) for name, m in module._modules.items(): if m is None: continue submodule_prefix = prefix + ("." if prefix else "") + name _catalog_shared_params(m, memo, submodule_prefix) if first_call: return [x for x in memo.values() if len(x) > 1]
null
182,333
import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.models.ema import build_ema from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler from omegaconf import OmegaConf def _get_module_by_path(module, path): path = path.split(".") for name in path: module = getattr(module, name) return module
null
182,334
import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.models.ema import build_ema from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler from omegaconf import OmegaConf def _set_module_by_path(module, path, value): path = path.split(".") for name in path[:-1]: module = getattr(module, name) setattr(module, path[-1], value)
null
182,352
from pathlib import Path from typing import BinaryIO, Optional, Tuple, Union, List import numpy as np import torch import torch.nn.functional as F def get_window( window_fn: callable, n_fft: int, win_length: int ) -> torch.Tensor: padding = n_fft - win_length assert padding >= 0 return F.pad(window_fn(win_length), (padding // 2, padding - padding // 2))
null
182,353
from pathlib import Path from typing import BinaryIO, Optional, Tuple, Union, List import numpy as np import torch import torch.nn.functional as F def get_fourier_basis(n_fft: int) -> torch.Tensor: basis = np.fft.fft(np.eye(n_fft)) basis = np.vstack( [np.real(basis[:n_fft // 2 + 1, :]), np.imag(basis[:n_fft // 2 + 1, :])] ) return torch.from_numpy(basis).float()
null
182,354
from pathlib import Path from typing import BinaryIO, Optional, Tuple, Union, List import numpy as np import torch import torch.nn.functional as F def get_mel_filters( sample_rate: int, n_fft: int, n_mels: int, f_min: float, f_max: float ) -> torch.Tensor: try: import librosa except ImportError: raise ImportError("Please install librosa: pip install librosa") basis = librosa.filters.mel(sample_rate, n_fft, n_mels, f_min, f_max) return torch.from_numpy(basis).float()
null
182,359
import csv import io import logging import re from collections import defaultdict from pathlib import Path from typing import Dict, List, Optional from dataclasses import dataclass import numpy as np import torch from fairseq.data import ( ConcatDataset, Dictionary, FairseqDataset, ResamplingDataset, data_utils as fairseq_data_utils, ) from fairseq.data.audio.audio_utils import ( get_fbank, get_waveform, read_from_stored_zip, is_npy_data, is_sf_audio_data, parse_path, FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS, ) from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform from fairseq.data.audio.data_cfg import S2TDataConfig def get_features_from_npy_or_audio(path): ext = Path(path).suffix if ext not in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS: raise ValueError(f'Unsupported file format for "{path}"') return np.load(path) if ext == ".npy" else get_fbank(path) def get_features_or_waveform_from_stored_zip( path, byte_offset, byte_size, need_waveform=False, use_sample_rate=None, ): assert path.endswith(".zip") data = read_from_stored_zip(path, byte_offset, byte_size) f = io.BytesIO(data) if is_npy_data(data): features_or_waveform = np.load(f) elif is_sf_audio_data(data): features_or_waveform = \ get_waveform( f, always_2d=False, output_sample_rate=use_sample_rate )[0] if need_waveform else get_fbank(f) else: raise ValueError(f'Unknown file format for "{path}"') return features_or_waveform def get_waveform( path_or_fp: Union[str, BinaryIO], normalization: bool = True, mono: bool = True, frames: int = -1, start: int = 0, always_2d: bool = True, output_sample_rate: Optional[int] = None, normalize_volume: bool = False ) -> Tuple[np.ndarray, int]: """Get the waveform and sample rate of a 16-bit WAV/FLAC/OGG Vorbis audio. Args: path_or_fp (str or BinaryIO): the path or file-like object normalization (bool): normalize values to [-1, 1] (Default: True) mono (bool): convert multi-channel audio to mono-channel one frames (int): the number of frames to read. (-1 for reading all) start (int): Where to start reading. A negative value counts from the end. always_2d (bool): always return 2D array even for mono-channel audios output_sample_rate (Optional[int]): output sample rate normalize_volume (bool): normalize volume Returns: waveform (numpy.ndarray): 1D or 2D waveform (channels x length) sample_rate (float): sample rate """ if isinstance(path_or_fp, str): ext = Path(path_or_fp).suffix if ext not in SF_AUDIO_FILE_EXTENSIONS: raise ValueError(f"Unsupported audio format: {ext}") try: import soundfile as sf except ImportError: raise ImportError("Please install soundfile: pip install soundfile") waveform, sample_rate = sf.read( path_or_fp, dtype="float32", always_2d=True, frames=frames, start=start ) waveform = waveform.T # T x C -> C x T waveform, sample_rate = convert_waveform( waveform, sample_rate, normalize_volume=normalize_volume, to_mono=mono, to_sample_rate=output_sample_rate ) if not normalization: waveform *= 2 ** 15 # denormalized to 16-bit signed integers if not always_2d: waveform = waveform.squeeze(axis=0) return waveform, sample_rate def parse_path(path: str) -> Tuple[str, List[int]]: """Parse data path which is either a path to 1. a .npy/.wav/.flac/.ogg file 2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]" Args: path (str): the data path to parse Returns: file_path (str): the file path slice_ptr (list of int): empty in case 1; byte offset and length for the slice in case 2 """ if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS: _path, slice_ptr = path, [] else: _path, *slice_ptr = path.split(":") if not Path(_path).is_file(): raise FileNotFoundError(f"File not found: {_path}") assert len(slice_ptr) in {0, 2}, f"Invalid path: {path}" slice_ptr = [int(i) for i in slice_ptr] return _path, slice_ptr The provided code snippet includes necessary dependencies for implementing the `get_features_or_waveform` function. Write a Python function `def get_features_or_waveform( path: str, need_waveform=False, use_sample_rate=None )` to solve the following problem: Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. use_sample_rate (int): change sample rate for the input wave file Returns: features_or_waveform (numpy.ndarray): speech features or waveform. Here is the function: def get_features_or_waveform( path: str, need_waveform=False, use_sample_rate=None ): """Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. use_sample_rate (int): change sample rate for the input wave file Returns: features_or_waveform (numpy.ndarray): speech features or waveform. """ _path, slice_ptr = parse_path(path) if len(slice_ptr) == 0: if need_waveform: return get_waveform( _path, always_2d=False, output_sample_rate=use_sample_rate )[0] return get_features_from_npy_or_audio(_path) elif len(slice_ptr) == 2: features_or_waveform = get_features_or_waveform_from_stored_zip( _path, slice_ptr[0], slice_ptr[1], need_waveform=need_waveform, use_sample_rate=use_sample_rate ) else: raise ValueError(f"Invalid path: {path}") return features_or_waveform
Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. use_sample_rate (int): change sample rate for the input wave file Returns: features_or_waveform (numpy.ndarray): speech features or waveform.
182,371
import itertools import logging import math import operator import os import queue import time from threading import Thread import numpy as np import torch from fairseq.data import data_utils def _chunk_iterator(itr, chunk_size): chunk = [] for x in itr: chunk.append(x) if len(chunk) == chunk_size: yield chunk chunk = [] if len(chunk) > 0: yield chunk
null
182,372
import logging import numpy as np import torch from fairseq.data import FairseqDataset, data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) def check_alignment(alignment, src_len, tgt_len): if alignment is None or len(alignment) == 0: return False if ( alignment[:, 0].max().item() >= src_len - 1 or alignment[:, 1].max().item() >= tgt_len - 1 ): logger.warning("alignment size mismatch found, skipping alignment!") return False return True def compute_alignment_weights(alignments): """ Given a tensor of shape [:, 2] containing the source-target indices corresponding to the alignments, a weight vector containing the inverse frequency of each target index is computed. For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then a tensor containing [1., 0.5, 0.5, 1] should be returned (since target index 3 is repeated twice) """ align_tgt = alignments[:, 1] _, align_tgt_i, align_tgt_c = torch.unique( align_tgt, return_inverse=True, return_counts=True ) align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] return 1.0 / align_weights.float() id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, ) # sort by descending source length src_lengths = torch.LongTensor( [s["source"].ne(pad_idx).long().sum() for s in samples] ) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) tgt_lengths = torch.LongTensor( [s["target"].ne(pad_idx).long().sum() for s in samples] ).index_select(0, sort_order) ntokens = tgt_lengths.sum().item() if samples[0].get("prev_output_tokens", None) is not None: prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target) elif input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) else: ntokens = src_lengths.sum().item() batch = { "id": id, "nsentences": len(samples), "ntokens": ntokens, "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths,}, "target": target, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select( 0, sort_order ) if samples[0].get("alignment", None) is not None: bsz, tgt_sz = batch["target"].shape src_sz = batch["net_input"]["src_tokens"].shape[1] offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz if left_pad_source: offsets[:, 0] += src_sz - src_lengths if left_pad_target: offsets[:, 1] += tgt_sz - tgt_lengths alignments = [ alignment + offset for align_idx, offset, src_len, tgt_len in zip( sort_order, offsets, src_lengths, tgt_lengths ) for alignment in [samples[align_idx]["alignment"].view(-1, 2)] if check_alignment(alignment, src_len, tgt_len) ] if len(alignments) > 0: alignments = torch.cat(alignments, dim=0) align_weights = compute_alignment_weights(alignments) batch["alignments"] = alignments batch["align_weights"] = align_weights if samples[0].get("constraints", None) is not None: # Collate the packed constraints across the samples, padding to # the length of the longest sample. lens = [sample.get("constraints").size(0) for sample in samples] max_len = max(lens) constraints = torch.zeros((len(samples), max(lens))).long() for i, sample in enumerate(samples): constraints[i, 0 : lens[i]] = samples[i].get("constraints") batch["constraints"] = constraints.index_select(0, sort_order) return batch
null
182,386
import json import os, collections import pickle import logging import numpy as np import six def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class SquadExample(object): """A single training/test example for simple sequence classification. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False, answers=[]): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible self.answers = answers def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (str(self.qas_id)) s += ", question_text: %s" % ( str(self.question_text)) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", end_position: %d" % (self.end_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) if self.orig_answer_text: s += ", ori_answer_text: %s" % (self.orig_answer_text) s += ", answer_text: %s" % (' '.join(self.doc_tokens[self.start_position: self.end_position + 1])) return s The provided code snippet includes necessary dependencies for implementing the `read_squad_examples` function. Write a Python function `def read_squad_examples(input_file, is_training, version_2_with_negative)` to solve the following problem: Read a SQuAD json file into a list of SquadExample. Here is the function: def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r") as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = qa.get("is_impossible", False) answers = [] if is_training: if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join( doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: print("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" elif not is_impossible: answers = qa["answers"] example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible, answers=answers) examples.append(example) return examples
Read a SQuAD json file into a list of SquadExample.
182,387
import json import os, collections import pickle import logging import numpy as np import six class SquadFeature(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, p_mask, doc_offset, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.p_mask = p_mask self.doc_offset = doc_offset self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, cls_token='[CLS]', sep_token='[SEP]', additional_seq=True): features = [] unique_id = 1000000000 for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] p_mask = [] token_to_orig_map = {} token_is_max_context = {} tokens.append(cls_token) p_mask.append(0) for token in query_tokens: tokens.append(token) p_mask.append(1) tokens.append(sep_token) p_mask.append(1) if additional_seq: tokens.append(sep_token) p_mask.append(1) doc_offset = len(tokens) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) p_mask.append(0) tokens.append(sep_token) p_mask.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) assert len(p_mask) == len(input_ids) start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 if example_index < 10: print("*** Example ***") print("unique_id: %s" % (unique_id)) print("example_index: %s" % (example_index)) print("doc_span_index: %s" % (doc_span_index)) print("tokens: %s" % " ".join( [x for x in tokens])) print("token_to_orig_map: %s" % " ".join( ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) print("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) ])) print("input_ids: %s" % " ".join([str(x) for x in input_ids])) if is_training and example.is_impossible: print("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) print("start_position: %d" % (start_position)) print("end_position: %d" % (end_position)) print("answer: %s" % (answer_text)) feature = SquadFeature( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, p_mask=p_mask, doc_offset=doc_offset, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible) features.append(feature) unique_id += 1 return features
null
182,390
import collections import json import math import re import string import logging logger = logging.getLogger(__name__) from . import BasicTokenizer def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position : (orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs The provided code snippet includes necessary dependencies for implementing the `compute_predictions_logits` function. Write a Python function `def compute_predictions_logits( all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold, tokenizer, )` to solve the following problem: Write final predictions to the json file and log-odds of null if needed. Here is the function: def compute_predictions_logits( all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold, tokenizer, ): """Write final predictions to the json file and log-odds of null if needed.""" if output_prediction_file: logger.info(f"Writing predictions to: {output_prediction_file}") if output_nbest_file: logger.info(f"Writing nbest to: {output_nbest_file}") if output_null_log_odds_file and version_2_with_negative: logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}") example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"] ) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() all_null_scores = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] doc_offset = feature.doc_offset for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens) or start_index < doc_offset: continue if end_index >= len(feature.tokens) or end_index < doc_offset: continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index], ) ) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit, ) ) prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"] ) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # tok_text = " ".join(tok_tokens) # # # De-tokenize WordPieces that have been split off. # tok_text = tok_text.replace(" ##", "") # tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest) == 1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1, "No valid predictions" total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1, "No valid predictions" if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff all_predictions[example.qas_id] = best_non_null_entry.text all_null_scores[example.qas_id] = score_diff all_nbest_json[example.qas_id] = nbest_json if output_prediction_file: with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") if output_nbest_file: with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if output_null_log_odds_file and version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions, all_null_scores
Write final predictions to the json file and log-odds of null if needed.
182,398
import argparse import copy import logging import os from typing import Any, Dict, Iterator, List import torch from fairseq import utils from fairseq.data import encoders from omegaconf import open_dict from torch import nn def from_pretrained( model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", archive_map=None, **kwargs ): from fairseq import checkpoint_utils, file_utils if archive_map is not None: if model_name_or_path in archive_map: model_name_or_path = archive_map[model_name_or_path] if data_name_or_path is not None and data_name_or_path in archive_map: data_name_or_path = archive_map[data_name_or_path] # allow archive_map to set default arg_overrides (e.g., tokenizer, bpe) # for each model if isinstance(model_name_or_path, dict): for k, v in model_name_or_path.items(): if k == "checkpoint_file": checkpoint_file = v elif ( k != "path" # only set kwargs that don't already have overrides and k not in kwargs ): kwargs[k] = v model_name_or_path = model_name_or_path["path"] model_path = file_utils.load_archive_file(model_name_or_path) # convenience hack for loading data and BPE codes from model archive if data_name_or_path.startswith("."): kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path)) else: kwargs["data"] = file_utils.load_archive_file(data_name_or_path) for file, arg in { "code": "bpe_codes", "bpecodes": "bpe_codes", "sentencepiece.bpe.model": "sentencepiece_model", "merges.txt": "bpe_merges", "vocab.json": "bpe_vocab", }.items(): path = os.path.join(model_path, file) if os.path.exists(path): kwargs[arg] = path if "user_dir" in kwargs: utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"])) models, args, task = checkpoint_utils.load_model_ensemble_and_task( [os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)], arg_overrides=kwargs, ) return { "args": args, "task": task, "models": models, }
null
182,399
import torch from torch import nn import math from typing import Dict, List, Optional import warnings The provided code snippet includes necessary dependencies for implementing the `is_cuda_extension_usable` function. Write a Python function `def is_cuda_extension_usable() -> bool` to solve the following problem: Check whether ngram_repeat_block_cuda is built properly Here is the function: def is_cuda_extension_usable() -> bool: """Check whether ngram_repeat_block_cuda is built properly""" if not EXTENSION_BUILT or not torch.cuda.is_available(): return False bsz = 2 tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda") lprobs = torch.rand((8, 12), device="cuda") try: outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3) outputs = outputs + 4 # This line breaks if the extension is built incorrectly. return True except RuntimeError: warnings.warn( "NGramRepeatBlock extension must be rebuilt." 'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace' ) return False
Check whether ngram_repeat_block_cuda is built properly
182,406
import logging from fairseq.tasks import register_task from fairseq.tasks.speech_to_text import SpeechToTextTask from fairseq.tasks.translation import ( TranslationTask, TranslationConfig ) def check_import(flag): if not flag: raise ImportError( "'examples.simultaneous_translation' is not correctly imported. " "Please considering `pip install -e $FAIRSEQ_DIR`." )
null
182,411
import argparse from pathlib import Path from typing import Callable, List, Optional, Union import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, EvalLMConfig, GenerationConfig, InteractiveConfig, OptimizationConfig, EMAConfig, ) from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list def get_training_parser(default_task="translation"): parser = get_parser("Trainer", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) add_ema_args(parser) return parser def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, ): """ Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values """ if suppress_defaults: # Parse args without any default values. This requires us to parse # twice, once to identify all the necessary task/model args, and a second # time with all defaults set to None. args = parse_args_and_arch( parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False, ) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace( **{k: v for k, v in vars(args).items() if v is not None} ) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args(input_args) utils.import_user_module(usr_args) if modify_parser is not None: modify_parser(parser) # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) # Add model-specific args to parser. if hasattr(args, "arch"): model_specific_group = parser.add_argument_group( "Model-specific configuration", # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) if args.arch in ARCH_MODEL_REGISTRY: ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) elif args.arch in MODEL_REGISTRY: MODEL_REGISTRY[args.arch].add_args(model_specific_group) else: raise RuntimeError() if hasattr(args, "task"): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, "use_bmuf", False): # hack to support extra args for block distributed data parallelism from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) # Add *-specific args to parser. from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) elif hasattr(cls, "__dataclass"): gen_parser_from_dataclass(parser, cls.__dataclass()) # Modify the parser a second time, since defaults may have been reset if modify_parser is not None: modify_parser(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if ( hasattr(args, "batch_size_valid") and args.batch_size_valid is None ) or not hasattr(args, "batch_size_valid"): args.batch_size_valid = args.batch_size if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: args.max_tokens_valid = args.max_tokens if getattr(args, "memory_efficient_fp16", False): args.fp16 = True if getattr(args, "memory_efficient_bf16", False): args.bf16 = True args.tpu = getattr(args, "tpu", False) args.bf16 = getattr(args, "bf16", False) if args.bf16: args.tpu = True if args.tpu and args.fp16: raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") if getattr(args, "seed", None) is None: args.seed = 1 # default seed for training args.no_seed_provided = True else: args.no_seed_provided = False # Apply architecture configuration. if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY: ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_args( data: Union[str, Path], task: str = "translation", arch: str = "transformer", **overrides ): parser = get_training_parser(task) args = parse_args_and_arch(parser, [str(data), "--task", task, "--arch", arch]) for k, v in overrides.items(): setattr(args, k, v) return args
null
182,420
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( Embedding, TransformerDecoderEmbedding, TransformerDecoderLayer, TransformerDecoderOutputLayer, TransformerEncoderEmbedding, TransformerEncoderLayer, TransformerEncoderLayerNorm, ) from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import ( base_architecture, transformer_iwslt_de_en, transformer_wmt_en_de_big, ) from fairseq.modules import SinusoidalPositionalEmbedding logger = logging.getLogger(__name__) TORCH_PIPE = False RPC_INIT = False def import_pipe(): global TORCH_PIPE global RPC_INIT try: from torch.distributed.pipeline.sync import Pipe # noqa global Pipe from torch.distributed.pipeline.sync.utils import partition_model global partition_model from torch.distributed import rpc import tempfile TORCH_PIPE = True # Initialize single process RPC agent since TORCH_PIPE requires # RRef. RRef depends on RPC being initialized and as a result we initialize # RPC with a single node. tmpfile = tempfile.NamedTemporaryFile() if not RPC_INIT: rpc.init_rpc( name="worker", rank=0, world_size=1, rpc_backend_options=rpc.TensorPipeRpcBackendOptions( init_method="file://{}".format(tmpfile.name), ) ) RPC_INIT = True logger.info('Using torch pipe') except ImportError: try: from fairscale.nn import Pipe # noqa logger.info('Using fairscale pipe') except ImportError: raise ImportError("Please install fairscale with: pip install fairscale")
null
182,421
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( Embedding, TransformerDecoderEmbedding, TransformerDecoderLayer, TransformerDecoderOutputLayer, TransformerEncoderEmbedding, TransformerEncoderLayer, TransformerEncoderLayerNorm, ) from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import ( base_architecture, transformer_iwslt_de_en, transformer_wmt_en_de_big, ) from fairseq.modules import SinusoidalPositionalEmbedding def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) def transformer_iwslt_de_en_dist(args): transformer_iwslt_de_en(args)
null
182,422
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( Embedding, TransformerDecoderEmbedding, TransformerDecoderLayer, TransformerDecoderOutputLayer, TransformerEncoderEmbedding, TransformerEncoderLayer, TransformerEncoderLayerNorm, ) from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import ( base_architecture, transformer_iwslt_de_en, transformer_wmt_en_de_big, ) from fairseq.modules import SinusoidalPositionalEmbedding def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) def transformer_wmt_en_de_big_dist(args): transformer_wmt_en_de_big(args)
null
182,435
import io import logging import os import pickle import random import socket import struct import subprocess import warnings from argparse import Namespace from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional import torch import torch.distributed as dist from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig from omegaconf import open_dict def all_reduce(tensor, group, op="sum"): if use_xla(): assert isinstance(group, tuple) and group[0] == "tpu" tensor = [tensor] # wrap in a list to make xm.all_reduce in-place return xm.all_reduce(op, tensor, groups=group[1])[0] else: if op == "sum": op = dist.ReduceOp.SUM elif op == "max": op = dist.ReduceOp.MAX else: raise NotImplementedError dist.all_reduce(tensor, op=op, group=group) return tensor The provided code snippet includes necessary dependencies for implementing the `all_reduce_dict` function. Write a Python function `def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]` to solve the following problem: AllReduce a dictionary of values across workers. We separately reduce items that are already on the device and items on CPU for better performance. Args: data (Mapping[str, Any]): dictionary of data to all-reduce, but cannot be a nested dictionary device (torch.device): device for the reduction group: group of the collective Here is the function: def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]: """ AllReduce a dictionary of values across workers. We separately reduce items that are already on the device and items on CPU for better performance. Args: data (Mapping[str, Any]): dictionary of data to all-reduce, but cannot be a nested dictionary device (torch.device): device for the reduction group: group of the collective """ data_keys = list(data.keys()) # We want to separately reduce items that are already on the # device and items on CPU for performance reasons. cpu_data = OrderedDict() device_data = OrderedDict() for k in data_keys: t = data[k] if not torch.is_tensor(t): cpu_data[k] = torch.tensor(t, dtype=torch.double) elif t.device.type != device.type: cpu_data[k] = t.to(dtype=torch.double) else: device_data[k] = t.to(dtype=torch.double) def _all_reduce_dict(data: OrderedDict): if len(data) == 0: return data buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device) all_reduce(buf, group=group) split_buf = torch.split(buf, [t.numel() for t in data.values()]) reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())] return OrderedDict(zip(data.keys(), reduced_data)) cpu_data = _all_reduce_dict(cpu_data) device_data = _all_reduce_dict(device_data) def get_from_stack(key): if key in cpu_data: return cpu_data[key] elif key in device_data: return device_data[key] raise KeyError return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
AllReduce a dictionary of values across workers. We separately reduce items that are already on the device and items on CPU for better performance. Args: data (Mapping[str, Any]): dictionary of data to all-reduce, but cannot be a nested dictionary device (torch.device): device for the reduction group: group of the collective
182,436
import contextlib from typing import Optional import torch from fairseq.dataclass.configs import DistributedTrainingConfig from fairseq.distributed import utils as dist_utils try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP has_FSDP = True except ImportError: FSDP = torch.nn.Module has_FSDP = False class FullyShardedDataParallel(FSDP): def __init__(self, *args, use_sharded_state: bool = False, **kwargs): def unwrapped_module(self) -> torch.nn.Module: def state_dict(self, destination=None, prefix="", keep_vars=False): def load_state_dict(self, state_dict, strict=True, model_cfg=None): class DistributedTrainingConfig(FairseqDataclass): def fsdp_enable_wrap(cfg: DistributedTrainingConfig): try: from fairscale.nn import enable_wrap except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) if cfg.memory_efficient_fp16: assert cfg.fp16 # memory_efficient_fp16 should imply fp16 group = dist_utils.get_data_parallel_group() if group is None and cfg.distributed_world_size == 1: from fairscale.utils.testing import DummyProcessGroup group = DummyProcessGroup(rank=0, size=1) fsdp_config = { "process_group": group, "reshard_after_forward": not cfg.no_reshard_after_forward, "mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16, "fp32_reduce_scatter": cfg.fp32_reduce_scatter, "flatten_parameters": True, "cpu_offload": cfg.cpu_offload, "compute_dtype": torch.float16 if cfg.fp16 else torch.float32, "bucket_cap_mb": cfg.bucket_cap_mb, "state_dict_device": torch.device("cpu"), # reduce GPU mem usage } with enable_wrap( wrapper_cls=FullyShardedDataParallel, use_sharded_state=cfg.use_sharded_state, **fsdp_config, ): yield
null
182,441
import types import torch class FusedAdamV1(torch.optim.Optimizer): """ Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False, use_fp16_stats=False, ): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError("FusedAdam does not support the AMSGrad variant.") defaults = { "lr": lr, "bias_correction": bias_correction, "betas": betas, "eps": eps, "weight_decay": weight_decay, "max_grad_norm": max_grad_norm, } super().__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 self.use_fp16_stats = use_fp16_stats self.FLOAT16_MAX = 65504.0 def supports_memory_efficient_fp16(self): return True def supports_flat_params(self): return True def supports_step_with_scale(self): return True def step(self, closure=None, grads=None, scale=1.0, grad_norms=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads if grad_norms is None: grad_norms = [None] * len(self.param_groups) for group, grads_this_group, grad_norm in zip( self.param_groups, grads_group, grad_norms ): if grads_this_group is None: grads_this_group = [None] * len(group["params"]) # compute combined scale factor for this group combined_scale = scale if group.get("max_grad_norm", 0) > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group.get("bias_correction", 1) else 0 for p, grad in zip(group["params"], grads_this_group): # note: p.grad should not ever be set for correct # operation of mixed precision optimizer that sometimes # sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) if p.device.type == "cpu": p_data_fp32 = p.data.cuda(non_blocking=True).float() out_p = torch.tensor([], dtype = torch.float) else: p_data_fp32 = p.data.float() out_p = p.data state = self.state[p] # State initialization dtype = torch.float16 if self.use_fp16_stats else p_data_fp32.dtype if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32, dtype=dtype) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32, dtype=dtype) if self.use_fp16_stats: state["exp_avg_scale"] = 1.0 state["exp_avg_sq_scale"] = 1.0 else: device = p_data_fp32.device state["exp_avg"] = state["exp_avg"].to(device, dtype) state["exp_avg_sq"] = state["exp_avg_sq"].to(device, dtype) exp_avg = state["exp_avg"] exp_avg_sq = state["exp_avg_sq"] if self.use_fp16_stats: assert exp_avg.dtype == torch.float16 exp_avg = exp_avg.float() * state["exp_avg_scale"] exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"] beta1, beta2 = group["betas"] state["step"] += 1 with torch.cuda.device(p_data_fp32.device): fused_adam_cuda.adam( p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group["lr"], beta1, beta2, group["eps"], combined_scale, state["step"], self.eps_mode, bias_correction, group["weight_decay"], ) if p.device.type == "cpu": p.data.copy_(p_data_fp32, non_blocking=True) if self.use_fp16_stats: def inf_norm(t): return torch.norm(t, float("inf")) # from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py state["exp_avg_scale"], state["exp_avg_sq_scale"] = ( 1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX, 1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX, ) state["exp_avg"], state["exp_avg_sq"] = ( (exp_avg / state["exp_avg_scale"]).half(), (exp_avg_sq / state["exp_avg_sq_scale"]).half(), ) return loss try: from apex.optimizers import FusedAdam from apex.multi_tensor_apply import multi_tensor_applier class FusedAdamV2(FusedAdam): """ Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. """ def __init__(self, *args, use_fp16_stats=False, **kwargs): if use_fp16_stats: raise NotImplementedError("--fp16-adam-stats is only supported with FusedAdamV1") super().__init__(*args, **kwargs) if not hasattr(self, "multi_tensor_adam"): raise Exception( "Apex installation is outdated. Please install an updated version of apex." ) def supports_memory_efficient_fp16(self): return True def supports_flat_params(self): return True def step( self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, ): """Performs a single optimization step.""" loss = None if closure is not None: loss = closure() for group in self.param_groups: bias_correction = 1 if group["bias_correction"] else 0 beta1, beta2 = group["betas"] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if "step" in group: group["step"] += 1 else: group["step"] = 1 # create lists for multi-tensor apply g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p in group["params"]: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like( p.data, dtype=torch.float ) else: state["exp_avg"] = state["exp_avg"].to( device=p.data.device, dtype=torch.float ) state["exp_avg_sq"] = state["exp_avg_sq"].to( device=p.data.device, dtype=torch.float ) if p.dtype == torch.float16: g_16.append(p.grad.data.float()) p_16.append(p.data.float()) orig_p_16.append(p.data) m_16.append(state["exp_avg"]) v_16.append(state["exp_avg_sq"]) elif p.dtype == torch.float32: g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state["exp_avg"]) v_32.append(state["exp_avg_sq"]) else: raise RuntimeError("FusedAdam only support fp16 and fp32.") with torch.cuda.device(p.device): if len(g_16) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) for orig_p, p in zip(orig_p_16, p_16): orig_p.copy_(p.data) if len(g_32) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) return loss except ImportError: pass The provided code snippet includes necessary dependencies for implementing the `get_fused_adam_class` function. Write a Python function `def get_fused_adam_class()` to solve the following problem: Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. Here is the function: def get_fused_adam_class(): """ Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. """ try: # The "deprecated" interface in recent versions of apex is a bit # faster than the main interface, since we don't use the apex # optimizer. This can be installed by passing the # `--deprecated_fused_adam` option when building apex. global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") return FusedAdamV1 except ImportError: try: # fallback to the newer interface from apex.optimizers import FusedAdam as _FusedAdam # noqa from apex.multi_tensor_apply import multi_tensor_applier if multi_tensor_applier.available: return FusedAdamV2 except ImportError: pass return None
Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated.
182,451
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder from fairseq.modules import LayerNorm from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_getattr, safe_hasattr from .hub_interface import RobertaHubInterface def base_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 12) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 3072) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.0) args.pooler_dropout = safe_getattr(args, "pooler_dropout", 0.0) args.max_source_positions = safe_getattr(args, "max_positions", 512) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) # BERT has a few structural differences compared to the original Transformer args.encoder_learned_pos = safe_getattr(args, "encoder_learned_pos", True) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", True) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", True) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", False) args.pooler_activation_fn = safe_getattr(args, "pooler_activation_fn", "tanh") args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False) # Adaptive input config args.adaptive_input = safe_getattr(args, "adaptive_input", False) # LayerDrop config args.encoder_layerdrop = safe_getattr(args, "encoder_layerdrop", 0.0) args.encoder_layers_to_keep = safe_getattr(args, "encoder_layers_to_keep", None) # Quantization noise config args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) # R4F config args.spectral_norm_classification_head = safe_getattr( args, "spectral_norm_classification_head", False ) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def roberta_prenorm_architecture(args): args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", True) base_architecture(args)
null
182,453
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder from fairseq.modules import LayerNorm from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_getattr, safe_hasattr from .hub_interface import RobertaHubInterface def base_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 12) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 3072) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.0) args.pooler_dropout = safe_getattr(args, "pooler_dropout", 0.0) args.max_source_positions = safe_getattr(args, "max_positions", 512) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) # BERT has a few structural differences compared to the original Transformer args.encoder_learned_pos = safe_getattr(args, "encoder_learned_pos", True) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", True) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", True) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", False) args.pooler_activation_fn = safe_getattr(args, "pooler_activation_fn", "tanh") args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False) # Adaptive input config args.adaptive_input = safe_getattr(args, "adaptive_input", False) # LayerDrop config args.encoder_layerdrop = safe_getattr(args, "encoder_layerdrop", 0.0) args.encoder_layers_to_keep = safe_getattr(args, "encoder_layers_to_keep", None) # Quantization noise config args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) # R4F config args.spectral_norm_classification_head = safe_getattr( args, "spectral_norm_classification_head", False ) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def roberta_large_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 24) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16) base_architecture(args)
null
182,454
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder from fairseq.modules import LayerNorm from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_getattr, safe_hasattr from .hub_interface import RobertaHubInterface def base_architecture(args): def safe_getattr(obj, k, default=None): def xlm_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 16) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1280) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 1280 * 4) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16) base_architecture(args)
null
182,459
from typing import Optional import logging import torch import torch.nn as nn from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface def bart_large_architecture(args): def mbart_large_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) bart_large_architecture(args)
null
182,461
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def transformer_lm_big(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_baevski_wiki103(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 16) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.dropout = safe_getattr(args, "dropout", 0.3) args.adaptive_input = safe_getattr(args, "adaptive_input", True) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", "20000,60000") args.adaptive_softmax_cutoff = safe_getattr( args, "adaptive_softmax_cutoff", "20000,60000" ) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) transformer_lm_big(args)
null
182,462
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def transformer_lm_big(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_baevski_gbw(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) transformer_lm_big(args)
null
182,463
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, 'scale_fc', False) args.scale_attn = safe_getattr(args, 'scale_attn', False) args.scale_heads = safe_getattr(args, 'scale_heads', False) args.scale_resids = safe_getattr(args, 'scale_resids', False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
182,464
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, 'scale_fc', False) args.scale_attn = safe_getattr(args, 'scale_attn', False) args.scale_heads = safe_getattr(args, 'scale_heads', False) args.scale_resids = safe_getattr(args, 'scale_resids', False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt2_small(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
182,465
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, 'scale_fc', False) args.scale_attn = safe_getattr(args, 'scale_attn', False) args.scale_heads = safe_getattr(args, 'scale_heads', False) args.scale_resids = safe_getattr(args, 'scale_resids', False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt2_tiny(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) args.decoder_layers = safe_getattr(args, "decoder_layers", 2) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
182,466
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, 'scale_fc', False) args.scale_attn = safe_getattr(args, 'scale_attn', False) args.scale_heads = safe_getattr(args, 'scale_heads', False) args.scale_resids = safe_getattr(args, 'scale_resids', False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt2_medium(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) args.decoder_layers = safe_getattr(args, "decoder_layers", 36) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
182,467
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): def safe_getattr(obj, k, default=None): def transformer_lm_gpt2_big(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) args.decoder_layers = safe_getattr(args, "decoder_layers", 48) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
182,468
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_small(args): # 125M params args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) base_gpt3_architecture(args)
null
182,469
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_medium(args): # 350M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args)
null
182,470
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_large(args): # 760M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args)
null
182,471
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_xl(args): # 1.3B params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args)
null
182,472
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_2_7(args): # 2.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args)
null
182,473
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_6_7(args): # 6.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args)
null
182,474
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_13(args): # 13B params args.decoder_layers = safe_getattr(args, "decoder_layers", 40) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) base_gpt3_architecture(args)
null
182,475
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_175(args): # 175B params args.decoder_layers = safe_getattr(args, "decoder_layers", 96) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) base_gpt3_architecture(args)
null
182,481
import logging import os import sys from typing import Dict, List, Optional import torch from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) def default_architecture(args): def hf_gpt2_large(args): args.embed_dim = getattr(args, "embed_dim", 1280) args.num_attention_heads = getattr(args, "num_attention_heads", 20) args.num_layers = getattr(args, "num_layers", 36) default_architecture(args)
null
182,486
import math import torch from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def ensemble_encoder(func): def wrapper(self, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func(self, *args, **kwargs) encoder_outs = [func(model, *args, **kwargs, return_all_hiddens=True) for model in self.ensemble_models] _encoder_out = encoder_outs[0].copy() def stack(key): outs = [e[key][0] for e in encoder_outs] return [torch.stack(outs, -1) if outs[0] is not None else None] _encoder_out["encoder_out"] = stack("encoder_out") _encoder_out["encoder_embedding"] = stack("encoder_embedding") num_layers = len(_encoder_out["encoder_states"]) if num_layers > 0: _encoder_out["encoder_states"] = [ torch.stack([e["encoder_states"][i] for e in encoder_outs], -1) for i in range(num_layers) ] return _encoder_out return wrapper
null
182,509
import logging import os import signal import threading import torch import torch.nn as nn from torch.nn.parallel import DistributedDataParallel from fairseq.distributed import ( DistributedTimeoutWrapper, LegacyDistributedDataParallel, ModuleProxyWrapper, TPUDistributedDataParallel, ) logger = logging.getLogger(__name__) _GOSSIP_DISABLED = False try: import gossip except ImportError: _GOSSIP_DISABLED = True The provided code snippet includes necessary dependencies for implementing the `DistributedFairseqModel` function. Write a Python function `def DistributedFairseqModel(args, model, process_group, device)` to solve the following problem: Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to Here is the function: def DistributedFairseqModel(args, model, process_group, device): """ Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to """ assert isinstance(model, nn.Module) if args.tpu: wrapped_model = TPUDistributedDataParallel( module=model.to(device), process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"c10d", "pytorch_ddp"}: wrapped_model = DistributedDataParallel( module=model.to(device), device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, bucket_cap_mb=args.bucket_cap_mb, process_group=process_group, find_unused_parameters=args.find_unused_parameters, gradient_as_bucket_view=args.gradient_as_bucket_view, ) if args.ddp_comm_hook == "fp16": logger.info("enable fp16 communication hook in DDP") try: from torch.distributed.algorithms.ddp_comm_hooks import ( register_ddp_comm_hook, DDPCommHookType, ) except: logger.error( "Could not import from torch.distributed.algorithms.ddp_comm_hooks; you may need to update your pytorch version" ) raise register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, wrapped_model) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"no_c10d", "legacy_ddp"}: wrapped_model = LegacyDistributedDataParallel( module=model.to(device), buffer_size=2 ** 28, process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "slow_mo": if _GOSSIP_DISABLED: raise ImportError( "Cannot find gossip library. Please install from: " "github.com/facebookresearch/stochastic_gradient_push" ) # The values of slowmo_momentum below were obtained by tuning on the # En-De 16 dataset by training the transformer_wmt_en_de_large model if args.slowmo_momentum is None: if args.distributed_world_size <= 16: args.slowmo_momentum = 0.0 elif args.distributed_world_size <= 32: args.slowmo_momentum = 0.2 elif args.distributed_world_size <= 64: args.slowmo_momentum = 0.5 else: args.slowmo_momentum = 0.6 wrapped_model = gossip.GossipDataParallel( module=model.to(device), device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, nprocs_per_node=args.nprocs_per_node, slowmo_momentum=args.slowmo_momentum, localsgd=(args.slowmo_algorithm == "LocalSGD"), localsgd_frequency=args.localsgd_frequency, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "fully_sharded": try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) assert isinstance(model, FSDP), "expected model to already be wrapped in FSDP" wrapped_model = model if args.memory_efficient_fp16: wrapped_model = wrapped_model.half() if not args.cpu_offload: wrapped_model = wrapped_model.to(device=device) else: raise ValueError("Unknown --ddp-backend: " + args.ddp_backend) # kill hung distributed jobs after a timeout if getattr(args, "heartbeat_timeout", -1) > 0: wrapped_model = DistributedTimeoutWrapper( wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1) ) return wrapped_model
Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to
182,516
import math from typing import Any, Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqIncrementalDecoder from fairseq.models.transformer import TransformerConfig from fairseq.modules import ( AdaptiveSoftmax, BaseLayer, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor def module_name_fordropout(module_name: str) -> str: if module_name == 'TransformerDecoderBase': return 'TransformerDecoder' else: return module_name
null
182,517
import math from typing import Any, Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqIncrementalDecoder from fairseq.models.transformer import TransformerConfig from fairseq.modules import ( AdaptiveSoftmax, BaseLayer, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
null
182,518
from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from fairseq import utils, options from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoderDecoderModel from fairseq.models.transformer import ( TransformerEncoderBase, TransformerDecoderBase, TransformerConfig, ) from torch import Tensor from fairseq.modules import AdaptiveInput def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m
null
182,519
from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer.transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from fairseq.models.transformer.transformer_base import ( TransformerModelBase, ) def base_architecture(args): def tiny_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64) args.encoder_layers = getattr(args, "encoder_layers", 2) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) args.decoder_layers = getattr(args, "decoder_layers", 2) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) return base_architecture(args)
null
182,520
from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer.transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from fairseq.models.transformer.transformer_base import ( TransformerModelBase, ) def edge_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", 128 ) args.decoder_layers = getattr(args, "decoder_layers", 2) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.model_param_type = getattr( args, "model_param_type", "edgeformer" ) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
null