repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
sign-topic
sign-topic-main/examples/speech_recognition/new/decoders/viterbi_decoder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from typing import List, Dict from .base_decoder import BaseDecoder class ViterbiDecoder(BaseDecoder): def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: def get_pred(e): toks = e.argmax(dim=-1).unique_consecutive() return toks[toks != self.blank] return [[{"tokens": get_pred(x), "score": 0}] for x in emissions]
641
24.68
73
py
sign-topic
sign-topic-main/examples/speech_recognition/new/decoders/flashlight_decoder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import gc import os.path as osp import warnings from collections import deque, namedtuple from typing import Any, Dict, Tuple import numpy as np import torch from fairseq import tasks from fairseq.data.dictionary import Dictionary from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models.fairseq_model import FairseqModel from fairseq.utils import apply_to_sample from omegaconf import open_dict, OmegaConf from typing import List from .decoder_config import FlashlightDecoderConfig from .base_decoder import BaseDecoder try: from flashlight.lib.text.decoder import ( LM, CriterionType, DecodeResult, KenLM, LexiconDecoder, LexiconDecoderOptions, LexiconFreeDecoder, LexiconFreeDecoderOptions, LMState, SmearingMode, Trie, ) from flashlight.lib.text.dictionary import create_word_dict, load_words except ImportError: warnings.warn( "flashlight python bindings are required to use this functionality. " "Please install from " "https://github.com/facebookresearch/flashlight/tree/master/bindings/python" ) LM = object LMState = object class KenLMDecoder(BaseDecoder): def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: super().__init__(tgt_dict) self.nbest = cfg.nbest self.unitlm = cfg.unitlm if cfg.lexicon: self.lexicon = load_words(cfg.lexicon) self.word_dict = create_word_dict(self.lexicon) self.unk_word = self.word_dict.get_index("<unk>") self.lm = KenLM(cfg.lmpath, self.word_dict) self.trie = Trie(self.vocab_size, self.silence) start_state = self.lm.start(False) for word, spellings in self.lexicon.items(): word_idx = self.word_dict.get_index(word) _, score = self.lm.score(start_state, word_idx) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{word} {spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, word_score=cfg.wordscore, unk_score=cfg.unkweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unitlm, ) else: assert self.unitlm, "Lexicon-free decoding requires unit LM" d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(cfg.lmpath, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def get_timesteps(self, token_idxs: List[int]) -> List[int]: """Returns frame numbers corresponding to every non-blank token. Parameters ---------- token_idxs : List[int] IDs of decoded tokens. Returns ------- List[int] Frame numbers corresponding to every non-blank token. """ timesteps = [] for i, token_idx in enumerate(token_idxs): if token_idx == self.blank: continue if i == 0 or token_idx != token_idxs[i-1]: timesteps.append(i) return timesteps def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: B, T, N = emissions.size() hypos = [] for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append( [ { "tokens": self.get_tokens(result.tokens), "score": result.score, "timesteps": self.get_timesteps(result.tokens), "words": [ self.word_dict.get_entry(x) for x in result.words if x >= 0 ], } for result in nbest_results ] ) return hypos FairseqLMState = namedtuple( "FairseqLMState", [ "prefix", "incremental_state", "probs", ], ) class FairseqLM(LM): def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None: super().__init__() self.dictionary = dictionary self.model = model self.unk = self.dictionary.unk() self.save_incremental = False # this currently does not work properly self.max_cache = 20_000 if torch.cuda.is_available(): model.cuda() model.eval() model.make_generation_fast_() self.states = {} self.stateq = deque() def start(self, start_with_nothing: bool) -> LMState: state = LMState() prefix = torch.LongTensor([[self.dictionary.eos()]]) incremental_state = {} if self.save_incremental else None with torch.no_grad(): res = self.model(prefix.cuda(), incremental_state=incremental_state) probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) if incremental_state is not None: incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) self.states[state] = FairseqLMState( prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() ) self.stateq.append(state) return state def score( self, state: LMState, token_index: int, no_cache: bool = False, ) -> Tuple[LMState, int]: """ Evaluate language model based on the current lm state and new word Parameters: ----------- state: current lm state token_index: index of the word (can be lexicon index then you should store inside LM the mapping between indices of lexicon and lm, or lm index of a word) Returns: -------- (LMState, float): pair of (new state, score for the current word) """ curr_state = self.states[state] def trim_cache(targ_size: int) -> None: while len(self.stateq) > targ_size: rem_k = self.stateq.popleft() rem_st = self.states[rem_k] rem_st = FairseqLMState(rem_st.prefix, None, None) self.states[rem_k] = rem_st if curr_state.probs is None: new_incremental_state = ( curr_state.incremental_state.copy() if curr_state.incremental_state is not None else None ) with torch.no_grad(): if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cuda(), new_incremental_state ) elif self.save_incremental: new_incremental_state = {} res = self.model( torch.from_numpy(curr_state.prefix).cuda(), incremental_state=new_incremental_state, ) probs = self.model.get_normalized_probs( res, log_probs=True, sample=None ) if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cpu(), new_incremental_state ) curr_state = FairseqLMState( curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() ) if not no_cache: self.states[state] = curr_state self.stateq.append(state) score = curr_state.probs[token_index].item() trim_cache(self.max_cache) outstate = state.child(token_index) if outstate not in self.states and not no_cache: prefix = np.concatenate( [curr_state.prefix, torch.LongTensor([[token_index]])], -1 ) incr_state = curr_state.incremental_state self.states[outstate] = FairseqLMState(prefix, incr_state, None) if token_index == self.unk: score = float("-inf") return outstate, score def finish(self, state: LMState) -> Tuple[LMState, int]: """ Evaluate eos for language model based on the current lm state Returns: -------- (LMState, float): pair of (new state, score for the current word) """ return self.score(state, self.dictionary.eos()) def empty_cache(self) -> None: self.states = {} self.stateq = deque() gc.collect() class FairseqLMDecoder(BaseDecoder): def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: super().__init__(tgt_dict) self.nbest = cfg.nbest self.unitlm = cfg.unitlm self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None self.idx_to_wrd = {} checkpoint = torch.load(cfg.lmpath, map_location="cpu") if "cfg" in checkpoint and checkpoint["cfg"] is not None: lm_args = checkpoint["cfg"] else: lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) if not OmegaConf.is_dict(lm_args): lm_args = OmegaConf.create(lm_args) with open_dict(lm_args.task): lm_args.task.data = osp.dirname(cfg.lmpath) task = tasks.setup_task(lm_args.task) model = task.build_model(lm_args.model) model.load_state_dict(checkpoint["model"], strict=False) self.trie = Trie(self.vocab_size, self.silence) self.word_dict = task.dictionary self.unk_word = self.word_dict.unk() self.lm = FairseqLM(self.word_dict, model) if self.lexicon: start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): if self.unitlm: word_idx = i self.idx_to_wrd[i] = word score = 0 else: word_idx = self.word_dict.index(word) _, score = self.lm.score(start_state, word_idx, no_cache=True) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, word_score=cfg.wordscore, unk_score=cfg.unkweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unitlm, ) else: assert self.unitlm, "Lexicon-free decoding requires unit LM" d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(cfg.lmpath, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: B, T, N = emissions.size() hypos = [] def make_hypo(result: DecodeResult) -> Dict[str, Any]: hypo = { "tokens": self.get_tokens(result.tokens), "score": result.score, } if self.lexicon: hypo["words"] = [ self.idx_to_wrd[x] if self.unitlm else self.word_dict[x] for x in result.words if x >= 0 ] return hypo for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append([make_hypo(result) for result in nbest_results]) self.lm.empty_cache() return hypos
14,746
33.136574
88
py
sign-topic
sign-topic-main/examples/speech_recognition/kaldi/kaldi_decoder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from concurrent.futures import ThreadPoolExecutor import logging from omegaconf import MISSING import os import torch from typing import Optional import warnings from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from .kaldi_initializer import KaldiInitializerConfig, initalize_kaldi logger = logging.getLogger(__name__) @dataclass class KaldiDecoderConfig(FairseqDataclass): hlg_graph_path: Optional[str] = None output_dict: str = MISSING kaldi_initializer_config: Optional[KaldiInitializerConfig] = None acoustic_scale: float = 0.5 max_active: int = 10000 beam_delta: float = 0.5 hash_ratio: float = 2.0 is_lattice: bool = False lattice_beam: float = 10.0 prune_interval: int = 25 determinize_lattice: bool = True prune_scale: float = 0.1 max_mem: int = 0 phone_determinize: bool = True word_determinize: bool = True minimize: bool = True num_threads: int = 1 class KaldiDecoder(object): def __init__( self, cfg: KaldiDecoderConfig, beam: int, nbest: int = 1, ): try: from kaldi.asr import FasterRecognizer, LatticeFasterRecognizer from kaldi.base import set_verbose_level from kaldi.decoder import ( FasterDecoder, FasterDecoderOptions, LatticeFasterDecoder, LatticeFasterDecoderOptions, ) from kaldi.lat.functions import DeterminizeLatticePhonePrunedOptions from kaldi.fstext import read_fst_kaldi, SymbolTable except: warnings.warn( "pykaldi is required for this functionality. Please install from https://github.com/pykaldi/pykaldi" ) # set_verbose_level(2) self.acoustic_scale = cfg.acoustic_scale self.nbest = nbest if cfg.hlg_graph_path is None: assert ( cfg.kaldi_initializer_config is not None ), "Must provide hlg graph path or kaldi initializer config" cfg.hlg_graph_path = initalize_kaldi(cfg.kaldi_initializer_config) assert os.path.exists(cfg.hlg_graph_path), cfg.hlg_graph_path if cfg.is_lattice: self.dec_cls = LatticeFasterDecoder opt_cls = LatticeFasterDecoderOptions self.rec_cls = LatticeFasterRecognizer else: assert self.nbest == 1, "nbest > 1 requires lattice decoder" self.dec_cls = FasterDecoder opt_cls = FasterDecoderOptions self.rec_cls = FasterRecognizer self.decoder_options = opt_cls() self.decoder_options.beam = beam self.decoder_options.max_active = cfg.max_active self.decoder_options.beam_delta = cfg.beam_delta self.decoder_options.hash_ratio = cfg.hash_ratio if cfg.is_lattice: self.decoder_options.lattice_beam = cfg.lattice_beam self.decoder_options.prune_interval = cfg.prune_interval self.decoder_options.determinize_lattice = cfg.determinize_lattice self.decoder_options.prune_scale = cfg.prune_scale det_opts = DeterminizeLatticePhonePrunedOptions() det_opts.max_mem = cfg.max_mem det_opts.phone_determinize = cfg.phone_determinize det_opts.word_determinize = cfg.word_determinize det_opts.minimize = cfg.minimize self.decoder_options.det_opts = det_opts self.output_symbols = {} with open(cfg.output_dict, "r") as f: for line in f: items = line.rstrip().split() assert len(items) == 2 self.output_symbols[int(items[1])] = items[0] logger.info(f"Loading FST from {cfg.hlg_graph_path}") self.fst = read_fst_kaldi(cfg.hlg_graph_path) self.symbol_table = SymbolTable.read_text(cfg.output_dict) self.executor = ThreadPoolExecutor(max_workers=cfg.num_threads) def generate(self, models, sample, **unused): """Generate a batch of inferences.""" # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions, padding = self.get_emissions(models, encoder_input) return self.decode(emissions, padding) def get_emissions(self, models, encoder_input): """Run encoder and normalize emissions""" model = models[0] all_encoder_out = [m(**encoder_input) for m in models] if len(all_encoder_out) > 1: if "encoder_out" in all_encoder_out[0]: encoder_out = { "encoder_out": sum(e["encoder_out"] for e in all_encoder_out) / len(all_encoder_out), "encoder_padding_mask": all_encoder_out[0]["encoder_padding_mask"], } padding = encoder_out["encoder_padding_mask"] else: encoder_out = { "logits": sum(e["logits"] for e in all_encoder_out) / len(all_encoder_out), "padding_mask": all_encoder_out[0]["padding_mask"], } padding = encoder_out["padding_mask"] else: encoder_out = all_encoder_out[0] padding = ( encoder_out["padding_mask"] if "padding_mask" in encoder_out else encoder_out["encoder_padding_mask"] ) if hasattr(model, "get_logits"): emissions = model.get_logits(encoder_out, normalize=True) else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return ( emissions.cpu().float().transpose(0, 1), padding.cpu() if padding is not None and padding.any() else None, ) def decode_one(self, logits, padding): from kaldi.matrix import Matrix decoder = self.dec_cls(self.fst, self.decoder_options) asr = self.rec_cls( decoder, self.symbol_table, acoustic_scale=self.acoustic_scale ) if padding is not None: logits = logits[~padding] mat = Matrix(logits.numpy()) out = asr.decode(mat) if self.nbest > 1: from kaldi.fstext import shortestpath from kaldi.fstext.utils import ( convert_compact_lattice_to_lattice, convert_lattice_to_std, convert_nbest_to_list, get_linear_symbol_sequence, ) lat = out["lattice"] sp = shortestpath(lat, nshortest=self.nbest) sp = convert_compact_lattice_to_lattice(sp) sp = convert_lattice_to_std(sp) seq = convert_nbest_to_list(sp) results = [] for s in seq: _, o, w = get_linear_symbol_sequence(s) words = list(self.output_symbols[z] for z in o) results.append( { "tokens": words, "words": words, "score": w.value, "emissions": logits, } ) return results else: words = out["text"].split() return [ { "tokens": words, "words": words, "score": out["likelihood"], "emissions": logits, } ] def decode(self, emissions, padding): if padding is None: padding = [None] * len(emissions) ret = list( map( lambda e, p: self.executor.submit(self.decode_one, e, p), emissions, padding, ) ) return ret
8,265
32.738776
116
py
sign-topic
sign-topic-main/examples/speech_recognition/data/collaters.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This module contains collection of classes which implement collate functionalities for various tasks. Collaters should know what data to expect for each sample and they should pack / collate them into batches """ from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import torch from fairseq.data import data_utils as fairseq_data_utils class Seq2SeqCollater(object): """ Implements collate function mainly for seq2seq tasks This expects each sample to contain feature (src_tokens) and targets. This collator is also used for aligned training task. """ def __init__( self, feature_index=0, label_index=1, pad_index=1, eos_index=2, move_eos_to_beginning=True, ): self.feature_index = feature_index self.label_index = label_index self.pad_index = pad_index self.eos_index = eos_index self.move_eos_to_beginning = move_eos_to_beginning def _collate_frames(self, frames): """Convert a list of 2d frames into a padded 3d tensor Args: frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] """ len_max = max(frame.size(0) for frame in frames) f_dim = frames[0].size(1) res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0) for i, v in enumerate(frames): res[i, : v.size(0)] = v return res def collate(self, samples): """ utility function to collate samples into batch for speech recognition. """ if len(samples) == 0: return {} # parse samples into torch tensors parsed_samples = [] for s in samples: # skip invalid samples if s["data"][self.feature_index] is None: continue source = s["data"][self.feature_index] if isinstance(source, (np.ndarray, np.generic)): source = torch.from_numpy(source) target = s["data"][self.label_index] if isinstance(target, (np.ndarray, np.generic)): target = torch.from_numpy(target).long() elif isinstance(target, list): target = torch.LongTensor(target) parsed_sample = {"id": s["id"], "source": source, "target": target} parsed_samples.append(parsed_sample) samples = parsed_samples id = torch.LongTensor([s["id"] for s in samples]) frames = self._collate_frames([s["source"] for s in samples]) # sort samples by descending number of frames frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples]) frames_lengths, sort_order = frames_lengths.sort(descending=True) id = id.index_select(0, sort_order) frames = frames.index_select(0, sort_order) target = None target_lengths = None prev_output_tokens = None if samples[0].get("target", None) is not None: ntokens = sum(len(s["target"]) for s in samples) target = fairseq_data_utils.collate_tokens( [s["target"] for s in samples], self.pad_index, self.eos_index, left_pad=False, move_eos_to_beginning=False, ) target = target.index_select(0, sort_order) target_lengths = torch.LongTensor( [s["target"].size(0) for s in samples] ).index_select(0, sort_order) prev_output_tokens = fairseq_data_utils.collate_tokens( [s["target"] for s in samples], self.pad_index, self.eos_index, left_pad=False, move_eos_to_beginning=self.move_eos_to_beginning, ) prev_output_tokens = prev_output_tokens.index_select(0, sort_order) else: ntokens = sum(len(s["source"]) for s in samples) batch = { "id": id, "ntokens": ntokens, "net_input": {"src_tokens": frames, "src_lengths": frames_lengths}, "target": target, "target_lengths": target_lengths, "nsentences": len(samples), } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens return batch
4,796
35.340909
84
py
sign-topic
sign-topic-main/examples/speech_recognition/data/data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def calc_mean_invstddev(feature): if len(feature.size()) != 2: raise ValueError("We expect the input feature to be 2-D tensor") mean = feature.mean(0) var = feature.var(0) # avoid division by ~zero eps = 1e-8 if (var < eps).any(): return mean, 1.0 / (torch.sqrt(var) + eps) return mean, 1.0 / torch.sqrt(var) def apply_mv_norm(features): # If there is less than 2 spectrograms, the variance cannot be computed (is NaN) # and normalization is not possible, so return the item as it is if features.size(0) < 2: return features mean, invstddev = calc_mean_invstddev(features) res = (features - mean) * invstddev return res def lengths_to_encoder_padding_mask(lengths, batch_first=False): """ convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = 0 for t < lengths[b] and 1 otherwise TODO: kernelize this function if benchmarking shows this function is slow """ max_lengths = torch.max(lengths).item() bsz = lengths.size(0) encoder_padding_mask = torch.arange( max_lengths ).to( # a (T, ) tensor with [0, ..., T-1] lengths.device ).view( # move to the right device 1, max_lengths ).expand( # reshape to (1, T)-shaped tensor bsz, -1 ) >= lengths.view( # expand to (B, T)-shaped tensor bsz, 1 ).expand( -1, max_lengths ) if not batch_first: return encoder_padding_mask.t(), max_lengths else: return encoder_padding_mask, max_lengths def encoder_padding_mask_to_lengths( encoder_padding_mask, max_lengths, batch_size, device ): """ convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor Conventionally, encoder output contains a encoder_padding_mask, which is a 2-D mask in a shape (T, B), whose (t, b) element indicate whether encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we need to convert this mask tensor to a 1-D tensor in shape (B, ), where [b] denotes the valid length of b-th sequence Args: encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None, indicating all are valid Return: seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the number of valid elements of b-th sequence max_lengths: maximum length of all sequence, if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(0) batch_size: batch size; if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(1) device: which device to put the result on """ if encoder_padding_mask is None: return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device) assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match" assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match" return max_lengths - torch.sum(encoder_padding_mask, dim=0)
3,429
32.960396
84
py
sign-topic
sign-topic-main/examples/speech_recognition/data/asr_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import numpy as np from fairseq.data import FairseqDataset from . import data_utils from .collaters import Seq2SeqCollater class AsrDataset(FairseqDataset): """ A dataset representing speech and corresponding transcription. Args: aud_paths: (List[str]): A list of str with paths to audio files. aud_durations_ms (List[int]): A list of int containing the durations of audio files. tgt (List[torch.LongTensor]): A list of LongTensors containing the indices of target transcriptions. tgt_dict (~fairseq.data.Dictionary): target vocabulary. ids (List[str]): A list of utterance IDs. speakers (List[str]): A list of speakers corresponding to utterances. num_mel_bins (int): Number of triangular mel-frequency bins (default: 80) frame_length (float): Frame length in milliseconds (default: 25.0) frame_shift (float): Frame shift in milliseconds (default: 10.0) """ def __init__( self, aud_paths, aud_durations_ms, tgt, tgt_dict, ids, speakers, num_mel_bins=80, frame_length=25.0, frame_shift=10.0, ): assert frame_length > 0 assert frame_shift > 0 assert all(x > frame_length for x in aud_durations_ms) self.frame_sizes = [ int(1 + (d - frame_length) / frame_shift) for d in aud_durations_ms ] assert len(aud_paths) > 0 assert len(aud_paths) == len(aud_durations_ms) assert len(aud_paths) == len(tgt) assert len(aud_paths) == len(ids) assert len(aud_paths) == len(speakers) self.aud_paths = aud_paths self.tgt_dict = tgt_dict self.tgt = tgt self.ids = ids self.speakers = speakers self.num_mel_bins = num_mel_bins self.frame_length = frame_length self.frame_shift = frame_shift self.s2s_collater = Seq2SeqCollater( 0, 1, pad_index=self.tgt_dict.pad(), eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True, ) def __getitem__(self, index): import torchaudio import torchaudio.compliance.kaldi as kaldi tgt_item = self.tgt[index] if self.tgt is not None else None path = self.aud_paths[index] if not os.path.exists(path): raise FileNotFoundError("Audio file not found: {}".format(path)) sound, sample_rate = torchaudio.load_wav(path) output = kaldi.fbank( sound, num_mel_bins=self.num_mel_bins, frame_length=self.frame_length, frame_shift=self.frame_shift, ) output_cmvn = data_utils.apply_mv_norm(output) return {"id": index, "data": [output_cmvn.detach(), tgt_item]} def __len__(self): return len(self.aud_paths) def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[int]): sample indices to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ return self.s2s_collater.collate(samples) def num_tokens(self, index): return self.frame_sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.frame_sizes[index], len(self.tgt[index]) if self.tgt is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" return np.arange(len(self))
3,955
31.162602
82
py
sign-topic
sign-topic-main/examples/speech_recognition/tasks/speech_recognition.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import re import sys import torch from examples.speech_recognition.data import AsrDataset from examples.speech_recognition.data.replabels import replabel_symbol from fairseq.data import Dictionary from fairseq.tasks import LegacyFairseqTask, register_task def get_asr_dataset_from_json(data_json_path, tgt_dict): """ Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } } """ if not os.path.isfile(data_json_path): raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) with open(data_json_path, "rb") as f: data_samples = json.load(f)["utts"] assert len(data_samples) != 0 sorted_samples = sorted( data_samples.items(), key=lambda sample: int(sample[1]["input"]["length_ms"]), reverse=True, ) aud_paths = [s[1]["input"]["path"] for s in sorted_samples] ids = [s[0] for s in sorted_samples] speakers = [] for s in sorted_samples: m = re.search("(.+?)-(.+?)-(.+?)", s[0]) speakers.append(m.group(1) + "_" + m.group(2)) frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] tgt = [ [int(i) for i in s[1]["output"]["tokenid"].split(", ")] for s in sorted_samples ] # append eos tgt = [[*t, tgt_dict.eos()] for t in tgt] return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers) @register_task("speech_recognition") class SpeechRecognitionTask(LegacyFairseqTask): """ Task for training speech recognition model. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("data", help="path to data directory") parser.add_argument( "--silence-token", default="\u2581", help="token for silence (used by w2l)" ) parser.add_argument( "--max-source-positions", default=sys.maxsize, type=int, metavar="N", help="max number of frames in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries).""" dict_path = os.path.join(args.data, "dict.txt") if not os.path.isfile(dict_path): raise FileNotFoundError("Dict not found: {}".format(dict_path)) tgt_dict = Dictionary.load(dict_path) if args.criterion == "ctc_loss": tgt_dict.add_symbol("<ctc_blank>") elif args.criterion == "asg_loss": for i in range(1, args.max_replabel + 1): tgt_dict.add_symbol(replabel_symbol(i)) print("| dictionary: {} types".format(len(tgt_dict))) return cls(args, tgt_dict) def load_dataset(self, split, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ data_json_path = os.path.join(self.args.data, "{}.json".format(split)) self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict) def build_generator(self, models, args, **unused): w2l_decoder = getattr(args, "w2l_decoder", None) if w2l_decoder == "viterbi": from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(args, self.target_dictionary) elif w2l_decoder == "kenlm": from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder return W2lKenLMDecoder(args, self.target_dictionary) elif w2l_decoder == "fairseqlm": from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder return W2lFairseqLMDecoder(args, self.target_dictionary) else: return super().build_generator(models, args) @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.tgt_dict @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary` (if applicable for this task).""" return None def max_positions(self): """Return the max speech and sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions)
5,397
33.164557
87
py
sign-topic
sign-topic-main/examples/speech_synthesis/generate_waveform.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import logging import matplotlib.pyplot as plt import numpy as np from pathlib import Path import soundfile as sf import sys import torch import torchaudio from fairseq import checkpoint_utils, options, tasks, utils from fairseq.logging import progress_bar from fairseq.tasks.text_to_speech import plot_tts_output from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset logging.basicConfig() logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def make_parser(): parser = options.get_speech_generation_parser() parser.add_argument("--dump-features", action="store_true") parser.add_argument("--dump-waveforms", action="store_true") parser.add_argument("--dump-attentions", action="store_true") parser.add_argument("--dump-eos-probs", action="store_true") parser.add_argument("--dump-plots", action="store_true") parser.add_argument("--dump-target", action="store_true") parser.add_argument("--output-sample-rate", default=22050, type=int) parser.add_argument("--teacher-forcing", action="store_true") parser.add_argument( "--audio-format", type=str, default="wav", choices=["wav", "flac"] ) return parser def postprocess_results( dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target ): def to_np(x): return None if x is None else x.detach().cpu().numpy() sample_ids = [dataset.ids[i] for i in sample["id"].tolist()] texts = sample["src_texts"] if "src_texts" in sample else [""] * len(hypos) attns = [to_np(hypo["attn"]) for hypo in hypos] eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos] feat_preds = [to_np(hypo["feature"]) for hypo in hypos] wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos] if dump_target: feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos] wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos] else: feat_targs = [None for _ in hypos] wave_targs = [None for _ in hypos] return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds, feat_targs, wave_targs) def dump_result( is_na_model, args, vocoder, sample_id, text, attn, eos_prob, feat_pred, wave_pred, feat_targ, wave_targ, ): sample_rate = args.output_sample_rate out_root = Path(args.results_path) if args.dump_features: feat_dir = out_root / "feat" feat_dir.mkdir(exist_ok=True, parents=True) np.save(feat_dir / f"{sample_id}.npy", feat_pred) if args.dump_target: feat_tgt_dir = out_root / "feat_tgt" feat_tgt_dir.mkdir(exist_ok=True, parents=True) np.save(feat_tgt_dir / f"{sample_id}.npy", feat_targ) if args.dump_attentions: attn_dir = out_root / "attn" attn_dir.mkdir(exist_ok=True, parents=True) np.save(attn_dir / f"{sample_id}.npy", attn.numpy()) if args.dump_eos_probs and not is_na_model: eos_dir = out_root / "eos" eos_dir.mkdir(exist_ok=True, parents=True) np.save(eos_dir / f"{sample_id}.npy", eos_prob) if args.dump_plots: images = [feat_pred.T] if is_na_model else [feat_pred.T, attn] names = ["output"] if is_na_model else ["output", "alignment"] if feat_targ is not None: images = [feat_targ.T] + images names = [f"target (idx={sample_id})"] + names if is_na_model: plot_tts_output(images, names, attn, "alignment", suptitle=text) else: plot_tts_output(images, names, eos_prob, "eos prob", suptitle=text) plot_dir = out_root / "plot" plot_dir.mkdir(exist_ok=True, parents=True) plt.savefig(plot_dir / f"{sample_id}.png") plt.close() if args.dump_waveforms: ext = args.audio_format if wave_pred is not None: wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}" wav_dir.mkdir(exist_ok=True, parents=True) sf.write(wav_dir / f"{sample_id}.{ext}", wave_pred, sample_rate) if args.dump_target and wave_targ is not None: wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt" wav_tgt_dir.mkdir(exist_ok=True, parents=True) sf.write(wav_tgt_dir / f"{sample_id}.{ext}", wave_targ, sample_rate) def main(args): assert(args.dump_features or args.dump_waveforms or args.dump_attentions or args.dump_eos_probs or args.dump_plots) if args.max_tokens is None and args.batch_size is None: args.max_tokens = 8000 logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu task = tasks.setup_task(args) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [args.path], task=task, arg_overrides=ast.literal_eval(args.model_overrides), ) model = models[0].cuda() if use_cuda else models[0] # use the original n_frames_per_step task.args.n_frames_per_step = saved_cfg.task.n_frames_per_step task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task) data_cfg = task.data_cfg sample_rate = data_cfg.config.get("features", {}).get("sample_rate", 22050) resample_fn = { False: lambda x: x, True: lambda x: torchaudio.sox_effects.apply_effects_tensor( x.detach().cpu().unsqueeze(0), sample_rate, [['rate', str(args.output_sample_rate)]] )[0].squeeze(0) }.get(args.output_sample_rate != sample_rate) if args.output_sample_rate != sample_rate: logger.info(f"resampling to {args.output_sample_rate}Hz") generator = task.build_generator([model], args) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size, ).next_epoch_itr(shuffle=False) Path(args.results_path).mkdir(exist_ok=True, parents=True) is_na_model = getattr(model, "NON_AUTOREGRESSIVE", False) dataset = task.dataset(args.gen_subset) vocoder = task.args.vocoder with progress_bar.build_progress_bar(args, itr) as t: for sample in t: sample = utils.move_to_cuda(sample) if use_cuda else sample hypos = generator.generate(model, sample, has_targ=args.dump_target) for result in postprocess_results( dataset, sample, hypos, resample_fn, args.dump_target ): dump_result(is_na_model, args, vocoder, *result) def cli_main(): parser = make_parser() args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
7,339
37.031088
80
py
sign-topic
sign-topic-main/examples/speech_synthesis/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from scipy.interpolate import interp1d import torchaudio from fairseq.tasks.text_to_speech import ( batch_compute_distortion, compute_rms_dist ) def batch_mel_spectral_distortion( y1, y2, sr, normalize_type="path", mel_fn=None ): """ https://arxiv.org/pdf/2011.03568.pdf Same as Mel Cepstral Distortion, but computed on log-mel spectrograms. """ if mel_fn is None or mel_fn.sample_rate != sr: mel_fn = torchaudio.transforms.MelSpectrogram( sr, n_fft=int(0.05 * sr), win_length=int(0.05 * sr), hop_length=int(0.0125 * sr), f_min=20, n_mels=80, window_fn=torch.hann_window ).to(y1[0].device) offset = 1e-6 return batch_compute_distortion( y1, y2, sr, lambda y: torch.log(mel_fn(y) + offset).transpose(-1, -2), compute_rms_dist, normalize_type ) # This code is based on # "https://github.com/bastibe/MAPS-Scripts/blob/master/helper.py" def _same_t_in_true_and_est(func): def new_func(true_t, true_f, est_t, est_f): assert type(true_t) is np.ndarray assert type(true_f) is np.ndarray assert type(est_t) is np.ndarray assert type(est_f) is np.ndarray interpolated_f = interp1d( est_t, est_f, bounds_error=False, kind='nearest', fill_value=0 )(true_t) return func(true_t, true_f, true_t, interpolated_f) return new_func @_same_t_in_true_and_est def gross_pitch_error(true_t, true_f, est_t, est_f): """The relative frequency in percent of pitch estimates that are outside a threshold around the true pitch. Only frames that are considered pitched by both the ground truth and the estimator (if applicable) are considered. """ correct_frames = _true_voiced_frames(true_t, true_f, est_t, est_f) gross_pitch_error_frames = _gross_pitch_error_frames( true_t, true_f, est_t, est_f ) return np.sum(gross_pitch_error_frames) / np.sum(correct_frames) def _gross_pitch_error_frames(true_t, true_f, est_t, est_f, eps=1e-8): voiced_frames = _true_voiced_frames(true_t, true_f, est_t, est_f) true_f_p_eps = [x + eps for x in true_f] pitch_error_frames = np.abs(est_f / true_f_p_eps - 1) > 0.2 return voiced_frames & pitch_error_frames def _true_voiced_frames(true_t, true_f, est_t, est_f): return (est_f != 0) & (true_f != 0) def _voicing_decision_error_frames(true_t, true_f, est_t, est_f): return (est_f != 0) != (true_f != 0) @_same_t_in_true_and_est def f0_frame_error(true_t, true_f, est_t, est_f): gross_pitch_error_frames = _gross_pitch_error_frames( true_t, true_f, est_t, est_f ) voicing_decision_error_frames = _voicing_decision_error_frames( true_t, true_f, est_t, est_f ) return (np.sum(gross_pitch_error_frames) + np.sum(voicing_decision_error_frames)) / (len(true_t)) @_same_t_in_true_and_est def voicing_decision_error(true_t, true_f, est_t, est_f): voicing_decision_error_frames = _voicing_decision_error_frames( true_t, true_f, est_t, est_f ) return np.sum(voicing_decision_error_frames) / (len(true_t))
3,357
31.921569
78
py
sign-topic
sign-topic-main/examples/speech_synthesis/data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import os from pathlib import Path from typing import Optional, List, Dict import zipfile import tempfile from dataclasses import dataclass from itertools import groupby import torch import torch.nn.functional as F import numpy as np from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from fairseq.data.audio.audio_utils import ( TTSSpectrogram, TTSMelScale, parse_path, read_from_stored_zip, is_npy_data ) def trim_or_pad_to_target_length( data_1d_or_2d: np.ndarray, target_length: int ) -> np.ndarray: assert len(data_1d_or_2d.shape) in {1, 2} delta = data_1d_or_2d.shape[0] - target_length if delta >= 0: # trim if being longer data_1d_or_2d = data_1d_or_2d[: target_length] else: # pad if being shorter if len(data_1d_or_2d.shape) == 1: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros(-delta)], axis=0 ) else: data_1d_or_2d = np.concatenate( [data_1d_or_2d, np.zeros((-delta, data_1d_or_2d.shape[1]))], axis=0 ) return data_1d_or_2d def extract_logmel_spectrogram( waveform: torch.Tensor, sample_rate: int, output_path: Optional[Path] = None, win_length: int = 1024, hop_length: int = 256, n_fft: int = 1024, win_fn: callable = torch.hann_window, n_mels: int = 80, f_min: float = 0., f_max: float = 8000, eps: float = 1e-5, overwrite: bool = False, target_length: Optional[int] = None ): if output_path is not None and output_path.is_file() and not overwrite: return spectrogram_transform = TTSSpectrogram( n_fft=n_fft, win_length=win_length, hop_length=hop_length, window_fn=win_fn ) mel_scale_transform = TTSMelScale( n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max, n_stft=n_fft // 2 + 1 ) spectrogram = spectrogram_transform(waveform) mel_spec = mel_scale_transform(spectrogram) logmel_spec = torch.clamp(mel_spec, min=eps).log() assert len(logmel_spec.shape) == 3 and logmel_spec.shape[0] == 1 logmel_spec = logmel_spec.squeeze().t() # D x T -> T x D if target_length is not None: logmel_spec = trim_or_pad_to_target_length(logmel_spec, target_length) if output_path is not None: np.save(output_path.as_posix(), logmel_spec) else: return logmel_spec def extract_pitch( waveform: torch.Tensor, sample_rate: int, output_path: Optional[Path] = None, hop_length: int = 256, log_scale: bool = True, phoneme_durations: Optional[List[int]] = None ): if output_path is not None and output_path.is_file(): return try: import pyworld except ImportError: raise ImportError("Please install PyWORLD: pip install pyworld") _waveform = waveform.squeeze(0).double().numpy() pitch, t = pyworld.dio( _waveform, sample_rate, frame_period=hop_length / sample_rate * 1000 ) pitch = pyworld.stonemask(_waveform, pitch, t, sample_rate) if phoneme_durations is not None: pitch = trim_or_pad_to_target_length(pitch, sum(phoneme_durations)) try: from scipy.interpolate import interp1d except ImportError: raise ImportError("Please install SciPy: pip install scipy") nonzero_ids = np.where(pitch != 0)[0] if len(nonzero_ids) == 0: print((f"{output_path} has all empty values in the pitch contour")) return elif len(nonzero_ids) == 1: print((f"{output_path} has only one non-zero values in the pitch contour")) return else: interp_fn = interp1d( nonzero_ids, pitch[nonzero_ids], fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]), bounds_error=False, ) pitch = interp_fn(np.arange(0, len(pitch))) d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations])) pitch = np.array( [ np.mean(pitch[d_cumsum[i-1]: d_cumsum[i]]) for i in range(1, len(d_cumsum)) ] ) assert len(pitch) == len(phoneme_durations) if log_scale: pitch = np.log(pitch + 1) if output_path is not None: np.save(output_path.as_posix(), pitch) else: return pitch def extract_energy( waveform: torch.Tensor, output_path: Optional[Path] = None, hop_length: int = 256, n_fft: int = 1024, log_scale: bool = True, phoneme_durations: Optional[List[int]] = None ): if output_path is not None and output_path.is_file(): return assert len(waveform.shape) == 2 and waveform.shape[0] == 1 waveform = waveform.view(1, 1, waveform.shape[1]) waveform = F.pad( waveform.unsqueeze(1), [n_fft // 2, n_fft // 2, 0, 0], mode="reflect" ) waveform = waveform.squeeze(1) fourier_basis = np.fft.fft(np.eye(n_fft)) cutoff = int((n_fft / 2 + 1)) fourier_basis = np.vstack( [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] ) forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) forward_transform = F.conv1d( waveform, forward_basis, stride=hop_length, padding=0 ) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) energy = torch.norm(magnitude, dim=1).squeeze(0).numpy() if phoneme_durations is not None: energy = trim_or_pad_to_target_length(energy, sum(phoneme_durations)) d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations])) energy = np.array( [ np.mean(energy[d_cumsum[i - 1]: d_cumsum[i]]) for i in range(1, len(d_cumsum)) ] ) assert len(energy) == len(phoneme_durations) if log_scale: energy = np.log(energy + 1) if output_path is not None: np.save(output_path.as_posix(), energy) else: return energy def get_global_cmvn(feature_root: Path, output_path: Optional[Path] = None): mean_x, mean_x2, n_frames = None, None, 0 feature_paths = feature_root.glob("*.npy") for p in tqdm(feature_paths): with open(p, 'rb') as f: frames = np.load(f).squeeze() n_frames += frames.shape[0] cur_mean_x = frames.sum(axis=0) if mean_x is None: mean_x = cur_mean_x else: mean_x += cur_mean_x cur_mean_x2 = (frames ** 2).sum(axis=0) if mean_x2 is None: mean_x2 = cur_mean_x2 else: mean_x2 += cur_mean_x2 mean_x /= n_frames mean_x2 /= n_frames var_x = mean_x2 - mean_x ** 2 std_x = np.sqrt(np.maximum(var_x, 1e-10)) if output_path is not None: with open(output_path, 'wb') as f: np.savez(f, mean=mean_x, std=std_x) else: return {"mean": mean_x, "std": std_x} def ipa_phonemize(text, lang="en-us", use_g2p=False): if use_g2p: assert lang == "en-us", "g2pE phonemizer only works for en-us" try: from g2p_en import G2p g2p = G2p() return " ".join("|" if p == " " else p for p in g2p(text)) except ImportError: raise ImportError( "Please install phonemizer: pip install g2p_en" ) else: try: from phonemizer import phonemize from phonemizer.separator import Separator return phonemize( text, backend='espeak', language=lang, separator=Separator(word="| ", phone=" ") ) except ImportError: raise ImportError( "Please install phonemizer: pip install phonemizer" ) @dataclass class ForceAlignmentInfo(object): tokens: List[str] frame_durations: List[int] start_sec: Optional[float] end_sec: Optional[float] def get_mfa_alignment_by_sample_id( textgrid_zip_path: str, sample_id: str, sample_rate: int, hop_length: int, silence_phones: List[str] = ("sil", "sp", "spn") ) -> ForceAlignmentInfo: try: import tgt except ImportError: raise ImportError("Please install TextGridTools: pip install tgt") filename = f"{sample_id}.TextGrid" out_root = Path(tempfile.gettempdir()) tgt_path = out_root / filename with zipfile.ZipFile(textgrid_zip_path) as f_zip: f_zip.extract(filename, path=out_root) textgrid = tgt.io.read_textgrid(tgt_path.as_posix()) os.remove(tgt_path) phones, frame_durations = [], [] start_sec, end_sec, end_idx = 0, 0, 0 for t in textgrid.get_tier_by_name("phones")._objects: s, e, p = t.start_time, t.end_time, t.text # Trim leading silences if len(phones) == 0: if p in silence_phones: continue else: start_sec = s phones.append(p) if p not in silence_phones: end_sec = e end_idx = len(phones) r = sample_rate / hop_length frame_durations.append(int(np.round(e * r) - np.round(s * r))) # Trim tailing silences phones = phones[:end_idx] frame_durations = frame_durations[:end_idx] return ForceAlignmentInfo( tokens=phones, frame_durations=frame_durations, start_sec=start_sec, end_sec=end_sec ) def get_mfa_alignment( textgrid_zip_path: str, sample_ids: List[str], sample_rate: int, hop_length: int ) -> Dict[str, ForceAlignmentInfo]: return { i: get_mfa_alignment_by_sample_id( textgrid_zip_path, i, sample_rate, hop_length ) for i in tqdm(sample_ids) } def get_unit_alignment( id_to_unit_tsv_path: str, sample_ids: List[str] ) -> Dict[str, ForceAlignmentInfo]: id_to_units = { e["id"]: e["units"] for e in load_tsv_to_dicts(id_to_unit_tsv_path) } id_to_units = {i: id_to_units[i].split() for i in sample_ids} id_to_units_collapsed = { i: [uu for uu, _ in groupby(u)] for i, u in id_to_units.items() } id_to_durations = { i: [len(list(g)) for _, g in groupby(u)] for i, u in id_to_units.items() } return { i: ForceAlignmentInfo( tokens=id_to_units_collapsed[i], frame_durations=id_to_durations[i], start_sec=None, end_sec=None ) for i in sample_ids } def get_feature_value_min_max(feature_paths: List[str]): v_min, v_max = 1e-8, -1e-8 for p in tqdm(feature_paths): _path, slice_ptr = parse_path(p) assert len(slice_ptr) == 2 byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1]) assert is_npy_data(byte_data) path_or_fp = io.BytesIO(byte_data) features = np.load(path_or_fp).squeeze() v_min = min(v_min, features.min().item()) v_max = max(v_max, features.max().item()) return v_min, v_max
11,364
31.942029
87
py
sign-topic
sign-topic-main/examples/speech_synthesis/evaluation/eval_sp.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Signal processing-based evaluation using waveforms """ import csv import numpy as np import os.path as op import torch import tqdm from tabulate import tabulate import torchaudio from examples.speech_synthesis.utils import batch_mel_spectral_distortion from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion def load_eval_spec(path): with open(path) as f: reader = csv.DictReader(f, delimiter='\t') samples = list(reader) return samples def eval_distortion(samples, distortion_fn, device="cuda"): nmiss = 0 results = [] for sample in tqdm.tqdm(samples): if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]): nmiss += 1 results.append(None) continue # assume single channel yref, sr = torchaudio.load(sample["ref"]) ysyn, _sr = torchaudio.load(sample["syn"]) yref, ysyn = yref[0].to(device), ysyn[0].to(device) assert sr == _sr, f"{sr} != {_sr}" distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0] _, _, _, _, _, pathmap = extra nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn results.append( (distortion.item(), # path distortion pathmap.size(0), # yref num frames pathmap.size(1), # ysyn num frames pathmap.sum().item(), # path length nins.item(), # insertion ndel.item(), # deletion ) ) return results def eval_mel_cepstral_distortion(samples, device="cuda"): return eval_distortion(samples, batch_mel_cepstral_distortion, device) def eval_mel_spectral_distortion(samples, device="cuda"): return eval_distortion(samples, batch_mel_spectral_distortion, device) def print_results(results, show_bin): results = np.array(list(filter(lambda x: x is not None, results))) np.set_printoptions(precision=3) def _print_result(results): dist, dur_ref, dur_syn, dur_ali, nins, ndel = results.sum(axis=0) res = { "nutt": len(results), "dist": dist, "dur_ref": int(dur_ref), "dur_syn": int(dur_syn), "dur_ali": int(dur_ali), "dist_per_ref_frm": dist/dur_ref, "dist_per_syn_frm": dist/dur_syn, "dist_per_ali_frm": dist/dur_ali, "ins": nins/dur_ref, "del": ndel/dur_ref, } print(tabulate( [res.values()], res.keys(), floatfmt=".4f" )) print(">>>> ALL") _print_result(results) if show_bin: edges = [0, 200, 400, 600, 800, 1000, 2000, 4000] for i in range(1, len(edges)): mask = np.logical_and(results[:, 1] >= edges[i-1], results[:, 1] < edges[i]) if not mask.any(): continue bin_results = results[mask] print(f">>>> ({edges[i-1]}, {edges[i]})") _print_result(bin_results) def main(eval_spec, mcd, msd, show_bin): samples = load_eval_spec(eval_spec) device = "cpu" if mcd: print("===== Evaluate Mean Cepstral Distortion =====") results = eval_mel_cepstral_distortion(samples, device) print_results(results, show_bin) if msd: print("===== Evaluate Mean Spectral Distortion =====") results = eval_mel_spectral_distortion(samples, device) print_results(results, show_bin) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("eval_spec") parser.add_argument("--mcd", action="store_true") parser.add_argument("--msd", action="store_true") parser.add_argument("--show-bin", action="store_true") args = parser.parse_args() main(args.eval_spec, args.mcd, args.msd, args.show_bin)
4,149
30.439394
75
py
sign-topic
sign-topic-main/examples/speech_synthesis/evaluation/eval_f0.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Signal processing-based evaluation using waveforms """ import numpy as np import os.path as op import torchaudio import tqdm from tabulate import tabulate from examples.speech_synthesis.utils import ( gross_pitch_error, voicing_decision_error, f0_frame_error ) from examples.speech_synthesis.evaluation.eval_sp import load_eval_spec def difference_function(x, n, tau_max): """ Compute difference function of data x. This solution is implemented directly with Numpy fft. :param x: audio data :param n: length of data :param tau_max: integration window size :return: difference function :rtype: list """ x = np.array(x, np.float64) w = x.size tau_max = min(tau_max, w) x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum())) size = w + tau_max p2 = (size // 32).bit_length() nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32) size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size) fc = np.fft.rfft(x, size_pad) conv = np.fft.irfft(fc * fc.conjugate())[:tau_max] return x_cumsum[w:w - tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - \ 2 * conv def cumulative_mean_normalized_difference_function(df, n): """ Compute cumulative mean normalized difference function (CMND). :param df: Difference function :param n: length of data :return: cumulative mean normalized difference function :rtype: list """ # scipy method cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float) return np.insert(cmn_df, 0, 1) def get_pitch(cmdf, tau_min, tau_max, harmo_th=0.1): """ Return fundamental period of a frame based on CMND function. :param cmdf: Cumulative Mean Normalized Difference function :param tau_min: minimum period for speech :param tau_max: maximum period for speech :param harmo_th: harmonicity threshold to determine if it is necessary to compute pitch frequency :return: fundamental period if there is values under threshold, 0 otherwise :rtype: float """ tau = tau_min while tau < tau_max: if cmdf[tau] < harmo_th: while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]: tau += 1 return tau tau += 1 return 0 # if unvoiced def compute_yin(sig, sr, w_len=512, w_step=256, f0_min=100, f0_max=500, harmo_thresh=0.1): """ Compute the Yin Algorithm. Return fundamental frequency and harmonic rate. https://github.com/NVIDIA/mellotron adaption of https://github.com/patriceguyot/Yin :param sig: Audio signal (list of float) :param sr: sampling rate (int) :param w_len: size of the analysis window (samples) :param w_step: size of the lag between two consecutives windows (samples) :param f0_min: Minimum fundamental frequency that can be detected (hertz) :param f0_max: Maximum fundamental frequency that can be detected (hertz) :param harmo_thresh: Threshold of detection. The yalgorithmù return the first minimum of the CMND function below this threshold. :returns: * pitches: list of fundamental frequencies, * harmonic_rates: list of harmonic rate values for each fundamental frequency value (= confidence value) * argmins: minimums of the Cumulative Mean Normalized DifferenceFunction * times: list of time of each estimation :rtype: tuple """ tau_min = int(sr / f0_max) tau_max = int(sr / f0_min) # time values for each analysis window time_scale = range(0, len(sig) - w_len, w_step) times = [t/float(sr) for t in time_scale] frames = [sig[t:t + w_len] for t in time_scale] pitches = [0.0] * len(time_scale) harmonic_rates = [0.0] * len(time_scale) argmins = [0.0] * len(time_scale) for i, frame in enumerate(frames): # Compute YIN df = difference_function(frame, w_len, tau_max) cm_df = cumulative_mean_normalized_difference_function(df, tau_max) p = get_pitch(cm_df, tau_min, tau_max, harmo_thresh) # Get results if np.argmin(cm_df) > tau_min: argmins[i] = float(sr / np.argmin(cm_df)) if p != 0: # A pitch was found pitches[i] = float(sr / p) harmonic_rates[i] = cm_df[p] else: # No pitch, but we compute a value of the harmonic rate harmonic_rates[i] = min(cm_df) return pitches, harmonic_rates, argmins, times def extract_f0(samples): f0_samples = [] for sample in tqdm.tqdm(samples): if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]): f0_samples.append(None) continue # assume single channel yref, sr = torchaudio.load(sample["ref"]) ysyn, _sr = torchaudio.load(sample["syn"]) yref, ysyn = yref[0], ysyn[0] assert sr == _sr, f"{sr} != {_sr}" yref_f0 = compute_yin(yref, sr) ysyn_f0 = compute_yin(ysyn, sr) f0_samples += [ { "ref": yref_f0, "syn": ysyn_f0 } ] return f0_samples def eval_f0_error(samples, distortion_fn): results = [] for sample in tqdm.tqdm(samples): if sample is None: results.append(None) continue # assume single channel yref_f, _, _, yref_t = sample["ref"] ysyn_f, _, _, ysyn_t = sample["syn"] yref_f = np.array(yref_f) yref_t = np.array(yref_t) ysyn_f = np.array(ysyn_f) ysyn_t = np.array(ysyn_t) distortion = distortion_fn(yref_t, yref_f, ysyn_t, ysyn_f) results.append((distortion.item(), len(yref_f), len(ysyn_f) )) return results def eval_gross_pitch_error(samples): return eval_f0_error(samples, gross_pitch_error) def eval_voicing_decision_error(samples): return eval_f0_error(samples, voicing_decision_error) def eval_f0_frame_error(samples): return eval_f0_error(samples, f0_frame_error) def print_results(results, show_bin): results = np.array(list(filter(lambda x: x is not None, results))) np.set_printoptions(precision=3) def _print_result(results): res = { "nutt": len(results), "error": results[:, 0].mean(), "std": results[:, 0].std(), "dur_ref": int(results[:, 1].sum()), "dur_syn": int(results[:, 2].sum()), } print(tabulate([res.values()], res.keys(), floatfmt=".4f")) print(">>>> ALL") _print_result(results) if show_bin: edges = [0, 200, 400, 600, 800, 1000, 2000, 4000] for i in range(1, len(edges)): mask = np.logical_and(results[:, 1] >= edges[i-1], results[:, 1] < edges[i]) if not mask.any(): continue bin_results = results[mask] print(f">>>> ({edges[i-1]}, {edges[i]})") _print_result(bin_results) def main(eval_f0, gpe, vde, ffe, show_bin): samples = load_eval_spec(eval_f0) if gpe or vde or ffe: f0_samples = extract_f0(samples) if gpe: print("===== Evaluate Gross Pitch Error =====") results = eval_gross_pitch_error(f0_samples) print_results(results, show_bin) if vde: print("===== Evaluate Voicing Decision Error =====") results = eval_voicing_decision_error(f0_samples) print_results(results, show_bin) if ffe: print("===== Evaluate F0 Frame Error =====") results = eval_f0_frame_error(f0_samples) print_results(results, show_bin) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("eval_f0") parser.add_argument("--gpe", action="store_true") parser.add_argument("--vde", action="store_true") parser.add_argument("--ffe", action="store_true") parser.add_argument("--show-bin", action="store_true") args = parser.parse_args() main(args.eval_f0, args.gpe, args.vde, args.ffe, args.show_bin)
8,333
30.213483
80
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/get_feature_manifest.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging from pathlib import Path import shutil from tempfile import NamedTemporaryFile from collections import Counter, defaultdict import pandas as pd import torchaudio from tqdm import tqdm from fairseq.data.audio.audio_utils import convert_waveform from examples.speech_to_text.data_utils import ( create_zip, gen_config_yaml, gen_vocab, get_zip_manifest, load_tsv_to_dicts, save_df_to_tsv ) from examples.speech_synthesis.data_utils import ( extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn, ipa_phonemize, get_mfa_alignment, get_unit_alignment, get_feature_value_min_max ) log = logging.getLogger(__name__) def process(args): assert "train" in args.splits out_root = Path(args.output_root).absolute() out_root.mkdir(exist_ok=True) print("Fetching data...") audio_manifest_root = Path(args.audio_manifest_root).absolute() samples = [] for s in args.splits: for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"): e["split"] = s samples.append(e) sample_ids = [s["id"] for s in samples] # Get alignment info id_to_alignment = None if args.textgrid_zip is not None: assert args.id_to_units_tsv is None id_to_alignment = get_mfa_alignment( args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length ) elif args.id_to_units_tsv is not None: # assume identical hop length on the unit sequence id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids) # Extract features and pack features into ZIP feature_name = "logmelspec80" zip_path = out_root / f"{feature_name}.zip" pitch_zip_path = out_root / "pitch.zip" energy_zip_path = out_root / "energy.zip" gcmvn_npz_path = out_root / "gcmvn_stats.npz" if zip_path.exists() and gcmvn_npz_path.exists(): print(f"{zip_path} and {gcmvn_npz_path} exist.") else: feature_root = out_root / feature_name feature_root.mkdir(exist_ok=True) pitch_root = out_root / "pitch" energy_root = out_root / "energy" if args.add_fastspeech_targets: pitch_root.mkdir(exist_ok=True) energy_root.mkdir(exist_ok=True) print("Extracting Mel spectrogram features...") for sample in tqdm(samples): waveform, sample_rate = torchaudio.load(sample["audio"]) waveform, sample_rate = convert_waveform( waveform, sample_rate, normalize_volume=args.normalize_volume, to_sample_rate=args.sample_rate ) sample_id = sample["id"] target_length = None if id_to_alignment is not None: a = id_to_alignment[sample_id] target_length = sum(a.frame_durations) if a.start_sec is not None and a.end_sec is not None: start_frame = int(a.start_sec * sample_rate) end_frame = int(a.end_sec * sample_rate) waveform = waveform[:, start_frame: end_frame] extract_logmel_spectrogram( waveform, sample_rate, feature_root / f"{sample_id}.npy", win_length=args.win_length, hop_length=args.hop_length, n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min, f_max=args.f_max, target_length=target_length ) if args.add_fastspeech_targets: assert id_to_alignment is not None extract_pitch( waveform, sample_rate, pitch_root / f"{sample_id}.npy", hop_length=args.hop_length, log_scale=True, phoneme_durations=id_to_alignment[sample_id].frame_durations ) extract_energy( waveform, energy_root / f"{sample_id}.npy", hop_length=args.hop_length, n_fft=args.n_fft, log_scale=True, phoneme_durations=id_to_alignment[sample_id].frame_durations ) print("ZIPing features...") create_zip(feature_root, zip_path) get_global_cmvn(feature_root, gcmvn_npz_path) shutil.rmtree(feature_root) if args.add_fastspeech_targets: create_zip(pitch_root, pitch_zip_path) shutil.rmtree(pitch_root) create_zip(energy_root, energy_zip_path) shutil.rmtree(energy_root) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4 if args.add_fastspeech_targets: pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path) energy_paths, energy_lengths = get_zip_manifest(energy_zip_path) # Generate TSV manifest print("Generating manifest...") id_to_cer = None if args.cer_threshold is not None: assert Path(args.cer_tsv_path).is_file() id_to_cer = { x["id"]: x["uer"] for x in load_tsv_to_dicts(args.cer_tsv_path) } manifest_by_split = {split: defaultdict(list) for split in args.splits} for sample in tqdm(samples): sample_id, split = sample["id"], sample["split"] if args.snr_threshold is not None and "snr" in sample \ and sample["snr"] < args.snr_threshold: continue if args.cer_threshold is not None \ and id_to_cer[sample_id] > args.cer_threhold: continue normalized_utt = sample["tgt_text"] if id_to_alignment is not None: normalized_utt = " ".join(id_to_alignment[sample_id].tokens) elif args.ipa_vocab: normalized_utt = ipa_phonemize( normalized_utt, lang=args.lang, use_g2p=args.use_g2p ) manifest_by_split[split]["id"].append(sample_id) manifest_by_split[split]["audio"].append(audio_paths[sample_id]) manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id]) manifest_by_split[split]["tgt_text"].append(normalized_utt) manifest_by_split[split]["speaker"].append(sample["speaker"]) manifest_by_split[split]["src_text"].append(sample["src_text"]) if args.add_fastspeech_targets: assert id_to_alignment is not None duration = " ".join( str(d) for d in id_to_alignment[sample_id].frame_durations ) manifest_by_split[split]["duration"].append(duration) manifest_by_split[split]["pitch"].append(pitch_paths[sample_id]) manifest_by_split[split]["energy"].append(energy_paths[sample_id]) for split in args.splits: save_df_to_tsv( pd.DataFrame.from_dict(manifest_by_split[split]), out_root / f"{split}.tsv" ) # Generate vocab vocab_name, spm_filename = None, None if id_to_alignment is not None or args.ipa_vocab: vocab = Counter() for t in manifest_by_split["train"]["tgt_text"]: vocab.update(t.split(" ")) vocab_name = "vocab.txt" with open(out_root / vocab_name, "w") as f: for s, c in vocab.most_common(): f.write(f"{s} {c}\n") else: spm_filename_prefix = "spm_char" spm_filename = f"{spm_filename_prefix}.model" with NamedTemporaryFile(mode="w") as f: for t in manifest_by_split["train"]["tgt_text"]: f.write(t + "\n") f.flush() # needed to ensure gen_vocab sees dumped text gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char") # Generate speaker list speakers = sorted({sample["speaker"] for sample in samples}) speakers_path = out_root / "speakers.txt" with open(speakers_path, "w") as f: for speaker in speakers: f.write(f"{speaker}\n") # Generate config YAML win_len_t = args.win_length / args.sample_rate hop_len_t = args.hop_length / args.sample_rate extra = { "sample_rate": args.sample_rate, "features": { "type": "spectrogram+melscale+log", "eps": 1e-5, "n_mels": args.n_mels, "n_fft": args.n_fft, "window_fn": "hann", "win_length": args.win_length, "hop_length": args.hop_length, "sample_rate": args.sample_rate, "win_len_t": win_len_t, "hop_len_t": hop_len_t, "f_min": args.f_min, "f_max": args.f_max, "n_stft": args.n_fft // 2 + 1 } } if len(speakers) > 1: extra["speaker_set_filename"] = "speakers.txt" if args.add_fastspeech_targets: pitch_min, pitch_max = get_feature_value_min_max( [(out_root / n).as_posix() for n in pitch_paths.values()] ) energy_min, energy_max = get_feature_value_min_max( [(out_root / n).as_posix() for n in energy_paths.values()] ) extra["features"]["pitch_min"] = pitch_min extra["features"]["pitch_max"] = pitch_max extra["features"]["energy_min"] = energy_min extra["features"]["energy_max"] = energy_max gen_config_yaml( out_root, spm_filename=spm_filename, vocab_name=vocab_name, audio_root=out_root.as_posix(), input_channels=None, input_feat_per_channel=None, specaugment_policy=None, cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--audio-manifest-root", "-m", required=True, type=str) parser.add_argument("--output-root", "-o", required=True, type=str) parser.add_argument("--splits", "-s", type=str, nargs="+", default=["train", "dev", "test"]) parser.add_argument("--ipa-vocab", action="store_true") parser.add_argument("--use-g2p", action="store_true") parser.add_argument("--lang", type=str, default="en-us") parser.add_argument("--win-length", type=int, default=1024) parser.add_argument("--hop-length", type=int, default=256) parser.add_argument("--n-fft", type=int, default=1024) parser.add_argument("--n-mels", type=int, default=80) parser.add_argument("--f-min", type=int, default=20) parser.add_argument("--f-max", type=int, default=8000) parser.add_argument("--sample-rate", type=int, default=22050) parser.add_argument("--normalize-volume", "-n", action="store_true") parser.add_argument("--textgrid-zip", type=str, default=None) parser.add_argument("--id-to-units-tsv", type=str, default=None) parser.add_argument("--add-fastspeech-targets", action="store_true") parser.add_argument("--snr-threshold", type=float, default=None) parser.add_argument("--cer-threshold", type=float, default=None) parser.add_argument("--cer-tsv-path", type=str, default="") args = parser.parse_args() process(args) if __name__ == "__main__": main()
11,171
41.479087
80
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging from pathlib import Path from collections import defaultdict from typing import List, Dict, Tuple import pandas as pd import numpy as np import torchaudio from tqdm import tqdm from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv log = logging.getLogger(__name__) SPLITS = ["train", "dev", "test"] def get_top_n( root: Path, n_speakers: int = 10, min_n_tokens: int = 5 ) -> pd.DataFrame: df = load_df_from_tsv(root / "validated.tsv") df["n_tokens"] = [len(s.split()) for s in df["sentence"]] df = df[df["n_tokens"] >= min_n_tokens] df["n_frames"] = [ torchaudio.info((root / "clips" / p).as_posix()).num_frames for p in tqdm(df["path"]) ] df["id"] = [Path(p).stem for p in df["path"]] total_duration_ms = df.groupby("client_id")["n_frames"].agg(["sum"]) total_duration_ms = total_duration_ms.sort_values("sum", ascending=False) top_n_total_duration_ms = total_duration_ms.head(n_speakers) top_n_client_ids = set(top_n_total_duration_ms.index.tolist()) df_top_n = df[df["client_id"].isin(top_n_client_ids)] return df_top_n def get_splits( df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0 ) -> Tuple[Dict[str, str], List[str]]: np.random.seed(rand_seed) dev_split_ratio = (1. - train_split_ratio) / 3 grouped = list(df.groupby("client_id")) id_to_split = {} for _, cur_df in tqdm(grouped): cur_n_examples = len(cur_df) if speaker_in_all_splits and cur_n_examples < 3: continue cur_n_train = int(cur_n_examples * train_split_ratio) cur_n_dev = int(cur_n_examples * dev_split_ratio) cur_n_test = cur_n_examples - cur_n_dev - cur_n_train if speaker_in_all_splits and cur_n_dev * cur_n_test == 0: cur_n_dev, cur_n_test = 1, 1 cur_n_train = cur_n_examples - cur_n_dev - cur_n_test cur_indices = cur_df.index.tolist() cur_shuffled_indices = np.random.permutation(cur_n_examples) cur_shuffled_indices = [cur_indices[i] for i in cur_shuffled_indices] cur_indices_by_split = { "train": cur_shuffled_indices[:cur_n_train], "dev": cur_shuffled_indices[cur_n_train: cur_n_train + cur_n_dev], "test": cur_shuffled_indices[cur_n_train + cur_n_dev:] } for split in SPLITS: for i in cur_indices_by_split[split]: id_ = df["id"].loc[i] id_to_split[id_] = split return id_to_split, sorted(df["client_id"].unique()) def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000): out_root = root / "wav" out_root.mkdir(exist_ok=True, parents=True) print("Converting to WAV...") for n in tqdm(filenames): in_path = (root / "clips" / n).as_posix() waveform, sr = torchaudio.load(in_path) converted, converted_sr = torchaudio.sox_effects.apply_effects_tensor( waveform, sr, [["rate", str(target_sr)], ["channels", "1"]] ) out_path = (out_root / Path(n).with_suffix(".wav").name).as_posix() torchaudio.save(out_path, converted, converted_sr, encoding="PCM_S", bits_per_sample=16) def process(args): data_root = Path(args.data_root).absolute() / args.lang # Generate TSV manifest print("Generating manifest...") df_top_n = get_top_n(data_root) id_to_split, speakers = get_splits(df_top_n) if args.convert_to_wav: convert_to_wav(data_root, df_top_n["path"].tolist()) manifest_by_split = {split: defaultdict(list) for split in SPLITS} for sample in tqdm(df_top_n.to_dict(orient="index").values()): sample_id = sample["id"] split = id_to_split[sample_id] manifest_by_split[split]["id"].append(sample_id) if args.convert_to_wav: audio_path = data_root / "wav" / f"{sample_id}.wav" else: audio_path = data_root / "clips" / f"{sample_id}.mp3" manifest_by_split[split]["audio"].append(audio_path.as_posix()) manifest_by_split[split]["n_frames"].append(sample["n_frames"]) manifest_by_split[split]["tgt_text"].append(sample["sentence"]) manifest_by_split[split]["speaker"].append(sample["client_id"]) manifest_by_split[split]["src_text"].append(sample["sentence"]) output_root = Path(args.output_manifest_root).absolute() output_root.mkdir(parents=True, exist_ok=True) for split in SPLITS: save_df_to_tsv( pd.DataFrame.from_dict(manifest_by_split[split]), output_root / f"{split}.audio.tsv" ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--data-root", "-d", required=True, type=str) parser.add_argument("--output-manifest-root", "-m", required=True, type=str) parser.add_argument("--lang", "-l", required=True, type=str) parser.add_argument("--convert-to-wav", action="store_true") args = parser.parse_args() process(args) if __name__ == "__main__": main()
5,277
36.432624
80
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/denoise_and_vad_audio.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os import csv import tempfile from collections import defaultdict from pathlib import Path import torchaudio try: import webrtcvad except ImportError: raise ImportError("Please install py-webrtcvad: pip install webrtcvad") import pandas as pd from tqdm import tqdm from examples.speech_synthesis.preprocessing.denoiser.pretrained import master64 import examples.speech_synthesis.preprocessing.denoiser.utils as utils from examples.speech_synthesis.preprocessing.vad import ( frame_generator, vad_collector, read_wave, write_wave, FS_MS, THRESHOLD, SCALE ) from examples.speech_to_text.data_utils import save_df_to_tsv log = logging.getLogger(__name__) PATHS = ["after_denoise", "after_vad"] MIN_T = 0.05 def generate_tmp_filename(extension="txt"): return tempfile._get_default_tempdir() + "/" + \ next(tempfile._get_candidate_names()) + "." + extension def convert_sr(inpath, sr, output_path=None): if not output_path: output_path = generate_tmp_filename("wav") cmd = f"sox {inpath} -r {sr} {output_path}" os.system(cmd) return output_path def apply_vad(vad, inpath): audio, sample_rate = read_wave(inpath) frames = frame_generator(FS_MS, audio, sample_rate) frames = list(frames) segments = vad_collector(sample_rate, FS_MS, 300, vad, frames) merge_segments = list() timestamp_start = 0.0 timestamp_end = 0.0 # removing start, end, and long sequences of sils for i, segment in enumerate(segments): merge_segments.append(segment[0]) if i and timestamp_start: sil_duration = segment[1] - timestamp_end if sil_duration > THRESHOLD: merge_segments.append(int(THRESHOLD / SCALE) * (b'\x00')) else: merge_segments.append(int((sil_duration / SCALE)) * (b'\x00')) timestamp_start = segment[1] timestamp_end = segment[2] segment = b''.join(merge_segments) return segment, sample_rate def write(wav, filename, sr=16_000): # Normalize audio if it prevents clipping wav = wav / max(wav.abs().max().item(), 1) torchaudio.save(filename, wav.cpu(), sr, encoding="PCM_S", bits_per_sample=16) def process(args): # making sure we are requested either denoise or vad if not args.denoise and not args.vad: log.error("No denoise or vad is requested.") return log.info("Creating out directories...") if args.denoise: out_denoise = Path(args.output_dir).absolute().joinpath(PATHS[0]) out_denoise.mkdir(parents=True, exist_ok=True) if args.vad: out_vad = Path(args.output_dir).absolute().joinpath(PATHS[1]) out_vad.mkdir(parents=True, exist_ok=True) log.info("Loading pre-trained speech enhancement model...") model = master64().to(args.device) log.info("Building the VAD model...") vad = webrtcvad.Vad(int(args.vad_agg_level)) # preparing the output dict output_dict = defaultdict(list) log.info(f"Parsing input manifest: {args.audio_manifest}") with open(args.audio_manifest, "r") as f: manifest_dict = csv.DictReader(f, delimiter="\t") for row in tqdm(manifest_dict): filename = str(row["audio"]) final_output = filename keep_sample = True n_frames = row["n_frames"] snr = -1 if args.denoise: output_path_denoise = out_denoise.joinpath(Path(filename).name) # convert to 16khz in case we use a differet sr tmp_path = convert_sr(final_output, 16000) # loading audio file and generating the enhanced version out, sr = torchaudio.load(tmp_path) out = out.to(args.device) estimate = model(out) estimate = (1 - args.dry_wet) * estimate + args.dry_wet * out write(estimate[0], str(output_path_denoise), sr) snr = utils.cal_snr(out, estimate) snr = snr.cpu().detach().numpy()[0][0] final_output = str(output_path_denoise) if args.vad: output_path_vad = out_vad.joinpath(Path(filename).name) sr = torchaudio.info(final_output).sample_rate if sr in [16000, 32000, 48000]: tmp_path = final_output elif sr < 16000: tmp_path = convert_sr(final_output, 16000) elif sr < 32000: tmp_path = convert_sr(final_output, 32000) else: tmp_path = convert_sr(final_output, 48000) # apply VAD segment, sample_rate = apply_vad(vad, tmp_path) if len(segment) < sample_rate * MIN_T: keep_sample = False print(( f"WARNING: skip {filename} because it is too short " f"after VAD ({len(segment) / sample_rate} < {MIN_T})" )) else: if sample_rate != sr: tmp_path = generate_tmp_filename("wav") write_wave(tmp_path, segment, sample_rate) convert_sr(tmp_path, sr, output_path=str(output_path_vad)) else: write_wave(str(output_path_vad), segment, sample_rate) final_output = str(output_path_vad) segment, _ = torchaudio.load(final_output) n_frames = segment.size(1) if keep_sample: output_dict["id"].append(row["id"]) output_dict["audio"].append(final_output) output_dict["n_frames"].append(n_frames) output_dict["tgt_text"].append(row["tgt_text"]) output_dict["speaker"].append(row["speaker"]) output_dict["src_text"].append(row["src_text"]) output_dict["snr"].append(snr) out_tsv_path = Path(args.output_dir) / Path(args.audio_manifest).name log.info(f"Saving manifest to {out_tsv_path.as_posix()}") save_df_to_tsv(pd.DataFrame.from_dict(output_dict), out_tsv_path) def main(): parser = argparse.ArgumentParser() parser.add_argument("--audio-manifest", "-i", required=True, type=str, help="path to the input manifest.") parser.add_argument( "--output-dir", "-o", required=True, type=str, help="path to the output dir. it will contain files after denoising and" " vad" ) parser.add_argument("--vad-agg-level", "-a", type=int, default=2, help="the aggresive level of the vad [0-3].") parser.add_argument( "--dry-wet", "-dw", type=float, default=0.01, help="the level of linear interpolation between noisy and enhanced " "files." ) parser.add_argument( "--device", "-d", type=str, default="cpu", help="the device to be used for the speech enhancement model: " "cpu | cuda." ) parser.add_argument("--denoise", action="store_true", help="apply a denoising") parser.add_argument("--vad", action="store_true", help="apply a VAD") args = parser.parse_args() process(args) if __name__ == "__main__": main()
7,655
36.346341
80
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/get_ljspeech_audio_manifest.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging from pathlib import Path from collections import defaultdict import pandas as pd from torchaudio.datasets import LJSPEECH from tqdm import tqdm from examples.speech_to_text.data_utils import save_df_to_tsv log = logging.getLogger(__name__) SPLITS = ["train", "dev", "test"] def process(args): out_root = Path(args.output_data_root).absolute() out_root.mkdir(parents=True, exist_ok=True) # Generate TSV manifest print("Generating manifest...") # following FastSpeech's splits dataset = LJSPEECH(out_root.as_posix(), download=True) id_to_split = {} for x in dataset._flist: id_ = x[0] speaker = id_.split("-")[0] id_to_split[id_] = { "LJ001": "test", "LJ002": "test", "LJ003": "dev" }.get(speaker, "train") manifest_by_split = {split: defaultdict(list) for split in SPLITS} progress = tqdm(enumerate(dataset), total=len(dataset)) for i, (waveform, _, utt, normalized_utt) in progress: sample_id = dataset._flist[i][0] split = id_to_split[sample_id] manifest_by_split[split]["id"].append(sample_id) audio_path = f"{dataset._path}/{sample_id}.wav" manifest_by_split[split]["audio"].append(audio_path) manifest_by_split[split]["n_frames"].append(len(waveform[0])) manifest_by_split[split]["tgt_text"].append(normalized_utt) manifest_by_split[split]["speaker"].append("ljspeech") manifest_by_split[split]["src_text"].append(utt) manifest_root = Path(args.output_manifest_root).absolute() manifest_root.mkdir(parents=True, exist_ok=True) for split in SPLITS: save_df_to_tsv( pd.DataFrame.from_dict(manifest_by_split[split]), manifest_root / f"{split}.audio.tsv" ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--output-data-root", "-d", required=True, type=str) parser.add_argument("--output-manifest-root", "-m", required=True, type=str) args = parser.parse_args() process(args) if __name__ == "__main__": main()
2,288
31.239437
80
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/get_vctk_audio_manifest.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import numpy as np import re from pathlib import Path from collections import defaultdict import pandas as pd from torchaudio.datasets import VCTK from tqdm import tqdm from examples.speech_to_text.data_utils import save_df_to_tsv log = logging.getLogger(__name__) SPLITS = ["train", "dev", "test"] def normalize_text(text): return re.sub(r"[^a-zA-Z.?!,'\- ]", '', text) def process(args): out_root = Path(args.output_data_root).absolute() out_root.mkdir(parents=True, exist_ok=True) # Generate TSV manifest print("Generating manifest...") dataset = VCTK(out_root.as_posix(), download=False) ids = list(dataset._walker) np.random.seed(args.seed) np.random.shuffle(ids) n_train = len(ids) - args.n_dev - args.n_test _split = ["train"] * n_train + ["dev"] * args.n_dev + ["test"] * args.n_test id_to_split = dict(zip(ids, _split)) manifest_by_split = {split: defaultdict(list) for split in SPLITS} progress = tqdm(enumerate(dataset), total=len(dataset)) for i, (waveform, _, text, speaker_id, _) in progress: sample_id = dataset._walker[i] _split = id_to_split[sample_id] audio_dir = Path(dataset._path) / dataset._folder_audio / speaker_id audio_path = audio_dir / f"{sample_id}.wav" text = normalize_text(text) manifest_by_split[_split]["id"].append(sample_id) manifest_by_split[_split]["audio"].append(audio_path.as_posix()) manifest_by_split[_split]["n_frames"].append(len(waveform[0])) manifest_by_split[_split]["tgt_text"].append(text) manifest_by_split[_split]["speaker"].append(speaker_id) manifest_by_split[_split]["src_text"].append(text) manifest_root = Path(args.output_manifest_root).absolute() manifest_root.mkdir(parents=True, exist_ok=True) for _split in SPLITS: save_df_to_tsv( pd.DataFrame.from_dict(manifest_by_split[_split]), manifest_root / f"{_split}.audio.tsv" ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--output-data-root", "-d", required=True, type=str) parser.add_argument("--output-manifest-root", "-m", required=True, type=str) parser.add_argument("--n-dev", default=50, type=int) parser.add_argument("--n-test", default=100, type=int) parser.add_argument("--seed", "-s", default=1234, type=int) args = parser.parse_args() process(args) if __name__ == "__main__": main()
2,685
32.575
80
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/get_speaker_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from collections import defaultdict from itertools import chain from pathlib import Path import numpy as np import torchaudio import torchaudio.sox_effects as ta_sox import yaml from tqdm import tqdm from examples.speech_to_text.data_utils import load_tsv_to_dicts from examples.speech_synthesis.preprocessing.speaker_embedder import SpkrEmbedder def extract_embedding(audio_path, embedder): wav, sr = torchaudio.load(audio_path) # 2D if sr != embedder.RATE: wav, sr = ta_sox.apply_effects_tensor( wav, sr, [["rate", str(embedder.RATE)]] ) try: emb = embedder([wav[0].cuda().float()]).cpu().numpy() except RuntimeError: emb = None return emb def process(args): print("Fetching data...") raw_manifest_root = Path(args.raw_manifest_root).absolute() samples = [load_tsv_to_dicts(raw_manifest_root / (s + ".tsv")) for s in args.splits] samples = list(chain(*samples)) with open(args.config, "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) with open(f"{config['audio_root']}/{config['speaker_set_filename']}") as f: speaker_to_id = {r.strip(): i for i, r in enumerate(f)} embedder = SpkrEmbedder(args.ckpt).cuda() speaker_to_cnt = defaultdict(float) speaker_to_emb = defaultdict(float) for sample in tqdm(samples, desc="extract emb"): emb = extract_embedding(sample["audio"], embedder) if emb is not None: speaker_to_cnt[sample["speaker"]] += 1 speaker_to_emb[sample["speaker"]] += emb if len(speaker_to_emb) != len(speaker_to_id): missed = set(speaker_to_id) - set(speaker_to_emb.keys()) print( f"WARNING: missing embeddings for {len(missed)} speaker:\n{missed}" ) speaker_emb_mat = np.zeros((len(speaker_to_id), len(emb)), float) for speaker in speaker_to_emb: idx = speaker_to_id[speaker] emb = speaker_to_emb[speaker] cnt = speaker_to_cnt[speaker] speaker_emb_mat[idx, :] = emb / cnt speaker_emb_name = "speaker_emb.npy" speaker_emb_path = f"{config['audio_root']}/{speaker_emb_name}" np.save(speaker_emb_path, speaker_emb_mat) config["speaker_emb_filename"] = speaker_emb_name with open(args.new_config, "w") as f: yaml.dump(config, f) def main(): parser = argparse.ArgumentParser() parser.add_argument("--raw-manifest-root", "-m", required=True, type=str) parser.add_argument("--splits", "-s", type=str, nargs="+", default=["train"]) parser.add_argument("--config", "-c", required=True, type=str) parser.add_argument("--new-config", "-n", required=True, type=str) parser.add_argument("--ckpt", required=True, type=str, help="speaker embedder checkpoint") args = parser.parse_args() process(args) if __name__ == "__main__": main()
3,116
33.633333
81
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/resample.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # author: adefossez import math import torch as th from torch.nn import functional as F def sinc(t): """sinc. :param t: the input tensor """ return th.where(t == 0, th.tensor(1., device=t.device, dtype=t.dtype), th.sin(t) / t) def kernel_upsample2(zeros=56): """kernel_upsample2. """ win = th.hann_window(4 * zeros + 1, periodic=False) winodd = win[1::2] t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros) t *= math.pi kernel = (sinc(t) * winodd).view(1, 1, -1) return kernel def upsample2(x, zeros=56): """ Upsampling the input by 2 using sinc interpolation. Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method." ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing. Vol. 9. IEEE, 1984. """ *other, time = x.shape kernel = kernel_upsample2(zeros).to(x) out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view( *other, time ) y = th.stack([x, out], dim=-1) return y.view(*other, -1) def kernel_downsample2(zeros=56): """kernel_downsample2. """ win = th.hann_window(4 * zeros + 1, periodic=False) winodd = win[1::2] t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros) t.mul_(math.pi) kernel = (sinc(t) * winodd).view(1, 1, -1) return kernel def downsample2(x, zeros=56): """ Downsampling the input by 2 using sinc interpolation. Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method." ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing. Vol. 9. IEEE, 1984. """ if x.shape[-1] % 2 != 0: x = F.pad(x, (0, 1)) xeven = x[..., ::2] xodd = x[..., 1::2] *other, time = xodd.shape kernel = kernel_downsample2(zeros).to(x) out = xeven + F.conv1d( xodd.view(-1, 1, time), kernel, padding=zeros )[..., :-1].view(*other, time) return out.view(*other, -1).mul(0.5)
2,226
26.8375
89
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/demucs.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # author: adefossez import math import time import torch as th from torch import nn from torch.nn import functional as F from .resample import downsample2, upsample2 from .utils import capture_init class BLSTM(nn.Module): def __init__(self, dim, layers=2, bi=True): super().__init__() klass = nn.LSTM self.lstm = klass( bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim ) self.linear = None if bi: self.linear = nn.Linear(2 * dim, dim) def forward(self, x, hidden=None): x, hidden = self.lstm(x, hidden) if self.linear: x = self.linear(x) return x, hidden def rescale_conv(conv, reference): std = conv.weight.std().detach() scale = (std / reference)**0.5 conv.weight.data /= scale if conv.bias is not None: conv.bias.data /= scale def rescale_module(module, reference): for sub in module.modules(): if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)): rescale_conv(sub, reference) class Demucs(nn.Module): """ Demucs speech enhancement model. Args: - chin (int): number of input channels. - chout (int): number of output channels. - hidden (int): number of initial hidden channels. - depth (int): number of layers. - kernel_size (int): kernel size for each layer. - stride (int): stride for each layer. - causal (bool): if false, uses BiLSTM instead of LSTM. - resample (int): amount of resampling to apply to the input/output. Can be one of 1, 2 or 4. - growth (float): number of channels is multiplied by this for every layer. - max_hidden (int): maximum number of channels. Can be useful to control the size/speed of the model. - normalize (bool): if true, normalize the input. - glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions. - rescale (float): controls custom weight initialization. See https://arxiv.org/abs/1911.13254. - floor (float): stability flooring when normalizing. """ @capture_init def __init__(self, chin=1, chout=1, hidden=48, depth=5, kernel_size=8, stride=4, causal=True, resample=4, growth=2, max_hidden=10_000, normalize=True, glu=True, rescale=0.1, floor=1e-3): super().__init__() if resample not in [1, 2, 4]: raise ValueError("Resample should be 1, 2 or 4.") self.chin = chin self.chout = chout self.hidden = hidden self.depth = depth self.kernel_size = kernel_size self.stride = stride self.causal = causal self.floor = floor self.resample = resample self.normalize = normalize self.encoder = nn.ModuleList() self.decoder = nn.ModuleList() activation = nn.GLU(1) if glu else nn.ReLU() ch_scale = 2 if glu else 1 for index in range(depth): encode = [] encode += [ nn.Conv1d(chin, hidden, kernel_size, stride), nn.ReLU(), nn.Conv1d(hidden, hidden * ch_scale, 1), activation, ] self.encoder.append(nn.Sequential(*encode)) decode = [] decode += [ nn.Conv1d(hidden, ch_scale * hidden, 1), activation, nn.ConvTranspose1d(hidden, chout, kernel_size, stride), ] if index > 0: decode.append(nn.ReLU()) self.decoder.insert(0, nn.Sequential(*decode)) chout = hidden chin = hidden hidden = min(int(growth * hidden), max_hidden) self.lstm = BLSTM(chin, bi=not causal) if rescale: rescale_module(self, reference=rescale) def valid_length(self, length): """ Return the nearest valid length to use with the model so that there is no time steps left over in a convolutions, e.g. for all layers, size of the input - kernel_size % stride = 0. If the mixture has a valid length, the estimated sources will have exactly the same length. """ length = math.ceil(length * self.resample) for _ in range(self.depth): length = math.ceil((length - self.kernel_size) / self.stride) + 1 length = max(length, 1) for _ in range(self.depth): length = (length - 1) * self.stride + self.kernel_size length = int(math.ceil(length / self.resample)) return int(length) @property def total_stride(self): return self.stride ** self.depth // self.resample def forward(self, mix): if mix.dim() == 2: mix = mix.unsqueeze(1) if self.normalize: mono = mix.mean(dim=1, keepdim=True) std = mono.std(dim=-1, keepdim=True) mix = mix / (self.floor + std) else: std = 1 length = mix.shape[-1] x = mix x = F.pad(x, (0, self.valid_length(length) - length)) if self.resample == 2: x = upsample2(x) elif self.resample == 4: x = upsample2(x) x = upsample2(x) skips = [] for encode in self.encoder: x = encode(x) skips.append(x) x = x.permute(2, 0, 1) x, _ = self.lstm(x) x = x.permute(1, 2, 0) for decode in self.decoder: skip = skips.pop(-1) x = x + skip[..., :x.shape[-1]] x = decode(x) if self.resample == 2: x = downsample2(x) elif self.resample == 4: x = downsample2(x) x = downsample2(x) x = x[..., :length] return std * x def fast_conv(conv, x): """ Faster convolution evaluation if either kernel size is 1 or length of sequence is 1. """ batch, chin, length = x.shape chout, chin, kernel = conv.weight.shape assert batch == 1 if kernel == 1: x = x.view(chin, length) out = th.addmm(conv.bias.view(-1, 1), conv.weight.view(chout, chin), x) elif length == kernel: x = x.view(chin * kernel, 1) out = th.addmm(conv.bias.view(-1, 1), conv.weight.view(chout, chin * kernel), x) else: out = conv(x) return out.view(batch, chout, -1) class DemucsStreamer: """ Streaming implementation for Demucs. It supports being fed with any amount of audio at a time. You will get back as much audio as possible at that point. Args: - demucs (Demucs): Demucs model. - dry (float): amount of dry (e.g. input) signal to keep. 0 is maximum noise removal, 1 just returns the input signal. Small values > 0 allows to limit distortions. - num_frames (int): number of frames to process at once. Higher values will increase overall latency but improve the real time factor. - resample_lookahead (int): extra lookahead used for the resampling. - resample_buffer (int): size of the buffer of previous inputs/outputs kept for resampling. """ def __init__(self, demucs, dry=0, num_frames=1, resample_lookahead=64, resample_buffer=256): device = next(iter(demucs.parameters())).device self.demucs = demucs self.lstm_state = None self.conv_state = None self.dry = dry self.resample_lookahead = resample_lookahead resample_buffer = min(demucs.total_stride, resample_buffer) self.resample_buffer = resample_buffer self.frame_length = demucs.valid_length(1) + \ demucs.total_stride * (num_frames - 1) self.total_length = self.frame_length + self.resample_lookahead self.stride = demucs.total_stride * num_frames self.resample_in = th.zeros(demucs.chin, resample_buffer, device=device) self.resample_out = th.zeros( demucs.chin, resample_buffer, device=device ) self.frames = 0 self.total_time = 0 self.variance = 0 self.pending = th.zeros(demucs.chin, 0, device=device) bias = demucs.decoder[0][2].bias weight = demucs.decoder[0][2].weight chin, chout, kernel = weight.shape self._bias = bias.view(-1, 1).repeat(1, kernel).view(-1, 1) self._weight = weight.permute(1, 2, 0).contiguous() def reset_time_per_frame(self): self.total_time = 0 self.frames = 0 @property def time_per_frame(self): return self.total_time / self.frames def flush(self): """ Flush remaining audio by padding it with zero. Call this when you have no more input and want to get back the last chunk of audio. """ pending_length = self.pending.shape[1] padding = th.zeros( self.demucs.chin, self.total_length, device=self.pending.device ) out = self.feed(padding) return out[:, :pending_length] def feed(self, wav): """ Apply the model to mix using true real time evaluation. Normalization is done online as is the resampling. """ begin = time.time() demucs = self.demucs resample_buffer = self.resample_buffer stride = self.stride resample = demucs.resample if wav.dim() != 2: raise ValueError("input wav should be two dimensional.") chin, _ = wav.shape if chin != demucs.chin: raise ValueError(f"Expected {demucs.chin} channels, got {chin}") self.pending = th.cat([self.pending, wav], dim=1) outs = [] while self.pending.shape[1] >= self.total_length: self.frames += 1 frame = self.pending[:, :self.total_length] dry_signal = frame[:, :stride] if demucs.normalize: mono = frame.mean(0) variance = (mono**2).mean() self.variance = variance / self.frames + \ (1 - 1 / self.frames) * self.variance frame = frame / (demucs.floor + math.sqrt(self.variance)) frame = th.cat([self.resample_in, frame], dim=-1) self.resample_in[:] = frame[:, stride - resample_buffer:stride] if resample == 4: frame = upsample2(upsample2(frame)) elif resample == 2: frame = upsample2(frame) # remove pre sampling buffer frame = frame[:, resample * resample_buffer:] # remove extra samples after window frame = frame[:, :resample * self.frame_length] out, extra = self._separate_frame(frame) padded_out = th.cat([self.resample_out, out, extra], 1) self.resample_out[:] = out[:, -resample_buffer:] if resample == 4: out = downsample2(downsample2(padded_out)) elif resample == 2: out = downsample2(padded_out) else: out = padded_out out = out[:, resample_buffer // resample:] out = out[:, :stride] if demucs.normalize: out *= math.sqrt(self.variance) out = self.dry * dry_signal + (1 - self.dry) * out outs.append(out) self.pending = self.pending[:, stride:] self.total_time += time.time() - begin if outs: out = th.cat(outs, 1) else: out = th.zeros(chin, 0, device=wav.device) return out def _separate_frame(self, frame): demucs = self.demucs skips = [] next_state = [] first = self.conv_state is None stride = self.stride * demucs.resample x = frame[None] for idx, encode in enumerate(demucs.encoder): stride //= demucs.stride length = x.shape[2] if idx == demucs.depth - 1: # This is sligthly faster for the last conv x = fast_conv(encode[0], x) x = encode[1](x) x = fast_conv(encode[2], x) x = encode[3](x) else: if not first: prev = self.conv_state.pop(0) prev = prev[..., stride:] tgt = (length - demucs.kernel_size) // demucs.stride + 1 missing = tgt - prev.shape[-1] offset = length - demucs.kernel_size - \ demucs.stride * (missing - 1) x = x[..., offset:] x = encode[1](encode[0](x)) x = fast_conv(encode[2], x) x = encode[3](x) if not first: x = th.cat([prev, x], -1) next_state.append(x) skips.append(x) x = x.permute(2, 0, 1) x, self.lstm_state = demucs.lstm(x, self.lstm_state) x = x.permute(1, 2, 0) # In the following, x contains only correct samples, i.e. the one # for which each time position is covered by two window of the upper # layer. extra contains extra samples to the right, and is used only as # a better padding for the online resampling. extra = None for idx, decode in enumerate(demucs.decoder): skip = skips.pop(-1) x += skip[..., :x.shape[-1]] x = fast_conv(decode[0], x) x = decode[1](x) if extra is not None: skip = skip[..., x.shape[-1]:] extra += skip[..., :extra.shape[-1]] extra = decode[2](decode[1](decode[0](extra))) x = decode[2](x) next_state.append( x[..., -demucs.stride:] - decode[2].bias.view(-1, 1) ) if extra is None: extra = x[..., -demucs.stride:] else: extra[..., :demucs.stride] += next_state[-1] x = x[..., :-demucs.stride] if not first: prev = self.conv_state.pop(0) x[..., :demucs.stride] += prev if idx != demucs.depth - 1: x = decode[3](x) extra = decode[3](extra) self.conv_state = next_state return x[0], extra[0] def test(): import argparse parser = argparse.ArgumentParser( "denoiser.demucs", description="Benchmark the streaming Demucs implementation, as well as " "checking the delta with the offline implementation.") parser.add_argument("--depth", default=5, type=int) parser.add_argument("--resample", default=4, type=int) parser.add_argument("--hidden", default=48, type=int) parser.add_argument("--sample_rate", default=16000, type=float) parser.add_argument("--device", default="cpu") parser.add_argument("-t", "--num_threads", type=int) parser.add_argument("-f", "--num_frames", type=int, default=1) args = parser.parse_args() if args.num_threads: th.set_num_threads(args.num_threads) sr = args.sample_rate sr_ms = sr / 1000 demucs = Demucs( depth=args.depth, hidden=args.hidden, resample=args.resample ).to(args.device) x = th.randn(1, int(sr * 4)).to(args.device) out = demucs(x[None])[0] streamer = DemucsStreamer(demucs, num_frames=args.num_frames) out_rt = [] frame_size = streamer.total_length with th.no_grad(): while x.shape[1] > 0: out_rt.append(streamer.feed(x[:, :frame_size])) x = x[:, frame_size:] frame_size = streamer.demucs.total_stride out_rt.append(streamer.flush()) out_rt = th.cat(out_rt, 1) model_size = sum(p.numel() for p in demucs.parameters()) * 4 / 2**20 initial_lag = streamer.total_length / sr_ms tpf = 1000 * streamer.time_per_frame print(f"model size: {model_size:.1f}MB, ", end='') print(f"delta batch/streaming: {th.norm(out - out_rt) / th.norm(out):.2%}") print(f"initial lag: {initial_lag:.1f}ms, ", end='') print(f"stride: {streamer.stride * args.num_frames / sr_ms:.1f}ms") print(f"time per frame: {tpf:.1f}ms, ", end='') rtf = (1000 * streamer.time_per_frame) / (streamer.stride / sr_ms) print(f"RTF: {rtf:.2f}") print(f"Total lag with computation: {initial_lag + tpf:.1f}ms") if __name__ == "__main__": test()
16,989
34.843882
83
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # author: adefossez import functools import logging from contextlib import contextmanager import inspect import time logger = logging.getLogger(__name__) EPS = 1e-8 def capture_init(init): """capture_init. Decorate `__init__` with this, and you can then recover the *args and **kwargs passed to it in `self._init_args_kwargs` """ @functools.wraps(init) def __init__(self, *args, **kwargs): self._init_args_kwargs = (args, kwargs) init(self, *args, **kwargs) return __init__ def deserialize_model(package, strict=False): """deserialize_model. """ klass = package['class'] if strict: model = klass(*package['args'], **package['kwargs']) else: sig = inspect.signature(klass) kw = package['kwargs'] for key in list(kw): if key not in sig.parameters: logger.warning("Dropping inexistant parameter %s", key) del kw[key] model = klass(*package['args'], **kw) model.load_state_dict(package['state']) return model def copy_state(state): return {k: v.cpu().clone() for k, v in state.items()} def serialize_model(model): args, kwargs = model._init_args_kwargs state = copy_state(model.state_dict()) return {"class": model.__class__, "args": args, "kwargs": kwargs, "state": state} @contextmanager def swap_state(model, state): """ Context manager that swaps the state of a model, e.g: # model is in old state with swap_state(model, new_state): # model in new state # model back to old state """ old_state = copy_state(model.state_dict()) model.load_state_dict(state) try: yield finally: model.load_state_dict(old_state) def pull_metric(history, name): out = [] for metrics in history: if name in metrics: out.append(metrics[name]) return out class LogProgress: """ Sort of like tqdm but using log lines and not as real time. Args: - logger: logger obtained from `logging.getLogger`, - iterable: iterable object to wrap - updates (int): number of lines that will be printed, e.g. if `updates=5`, log every 1/5th of the total length. - total (int): length of the iterable, in case it does not support `len`. - name (str): prefix to use in the log. - level: logging level (like `logging.INFO`). """ def __init__(self, logger, iterable, updates=5, total=None, name="LogProgress", level=logging.INFO): self.iterable = iterable self.total = total or len(iterable) self.updates = updates self.name = name self.logger = logger self.level = level def update(self, **infos): self._infos = infos def __iter__(self): self._iterator = iter(self.iterable) self._index = -1 self._infos = {} self._begin = time.time() return self def __next__(self): self._index += 1 try: value = next(self._iterator) except StopIteration: raise else: return value finally: log_every = max(1, self.total // self.updates) # logging is delayed by 1 it, in order to have the metrics from update if self._index >= 1 and self._index % log_every == 0: self._log() def _log(self): self._speed = (1 + self._index) / (time.time() - self._begin) infos = " | ".join(f"{k.capitalize()} {v}" for k, v in self._infos.items()) if self._speed < 1e-4: speed = "oo sec/it" elif self._speed < 0.1: speed = f"{1/self._speed:.1f} sec/it" else: speed = f"{self._speed:.1f} it/sec" out = f"{self.name} | {self._index}/{self.total} | {speed}" if infos: out += " | " + infos self.logger.log(self.level, out) def colorize(text, color): """ Display text with some ANSI color in the terminal. """ code = f"\033[{color}m" restore = "\033[0m" return "".join([code, text, restore]) def bold(text): """ Display text in bold in the terminal. """ return colorize(text, "1") def cal_snr(lbl, est): import torch y = 10.0 * torch.log10( torch.sum(lbl**2, dim=-1) / (torch.sum((est-lbl)**2, dim=-1) + EPS) + EPS ) return y
4,770
25.954802
85
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/pretrained.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # author: adefossez import logging import torch.hub from .demucs import Demucs from .utils import deserialize_model logger = logging.getLogger(__name__) ROOT = "https://dl.fbaipublicfiles.com/adiyoss/denoiser/" DNS_48_URL = ROOT + "dns48-11decc9d8e3f0998.th" DNS_64_URL = ROOT + "dns64-a7761ff99a7d5bb6.th" MASTER_64_URL = ROOT + "master64-8a5dfb4bb92753dd.th" def _demucs(pretrained, url, **kwargs): model = Demucs(**kwargs) if pretrained: state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu') model.load_state_dict(state_dict) return model def dns48(pretrained=True): return _demucs(pretrained, DNS_48_URL, hidden=48) def dns64(pretrained=True): return _demucs(pretrained, DNS_64_URL, hidden=64) def master64(pretrained=True): return _demucs(pretrained, MASTER_64_URL, hidden=64) def add_model_flags(parser): group = parser.add_mutually_exclusive_group(required=False) group.add_argument( "-m", "--model_path", help="Path to local trained model." ) group.add_argument( "--dns48", action="store_true", help="Use pre-trained real time H=48 model trained on DNS." ) group.add_argument( "--dns64", action="store_true", help="Use pre-trained real time H=64 model trained on DNS." ) group.add_argument( "--master64", action="store_true", help="Use pre-trained real time H=64 model trained on DNS and Valentini." ) def get_model(args): """ Load local model package or torchhub pre-trained model. """ if args.model_path: logger.info("Loading model from %s", args.model_path) pkg = torch.load(args.model_path) model = deserialize_model(pkg) elif args.dns64: logger.info("Loading pre-trained real time H=64 model trained on DNS.") model = dns64() elif args.master64: logger.info( "Loading pre-trained real time H=64 model trained on DNS and Valentini." ) model = master64() else: logger.info("Loading pre-trained real time H=48 model trained on DNS.") model = dns48() logger.debug(model) return model
2,384
28.085366
84
py
sign-topic
sign-topic-main/examples/speech_synthesis/preprocessing/speaker_embedder/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import librosa import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torchaudio EMBEDDER_PARAMS = { 'num_mels': 40, 'n_fft': 512, 'emb_dim': 256, 'lstm_hidden': 768, 'lstm_layers': 3, 'window': 80, 'stride': 40, } def set_requires_grad(nets, requires_grad=False): """Set requies_grad=Fasle for all the networks to avoid unnecessary computations Parameters: nets (network list) -- a list of networks requires_grad (bool) -- whether the networks require gradients or not """ if not isinstance(nets, list): nets = [nets] for net in nets: if net is not None: for param in net.parameters(): param.requires_grad = requires_grad class LinearNorm(nn.Module): def __init__(self, hp): super(LinearNorm, self).__init__() self.linear_layer = nn.Linear(hp["lstm_hidden"], hp["emb_dim"]) def forward(self, x): return self.linear_layer(x) class SpeechEmbedder(nn.Module): def __init__(self, hp): super(SpeechEmbedder, self).__init__() self.lstm = nn.LSTM(hp["num_mels"], hp["lstm_hidden"], num_layers=hp["lstm_layers"], batch_first=True) self.proj = LinearNorm(hp) self.hp = hp def forward(self, mel): # (num_mels, T) -> (num_mels, T', window) mels = mel.unfold(1, self.hp["window"], self.hp["stride"]) mels = mels.permute(1, 2, 0) # (T', window, num_mels) x, _ = self.lstm(mels) # (T', window, lstm_hidden) x = x[:, -1, :] # (T', lstm_hidden), use last frame only x = self.proj(x) # (T', emb_dim) x = x / torch.norm(x, p=2, dim=1, keepdim=True) # (T', emb_dim) x = x.mean(dim=0) if x.norm(p=2) != 0: x = x / x.norm(p=2) return x class SpkrEmbedder(nn.Module): RATE = 16000 def __init__( self, embedder_path, embedder_params=EMBEDDER_PARAMS, rate=16000, hop_length=160, win_length=400, pad=False, ): super(SpkrEmbedder, self).__init__() embedder_pt = torch.load(embedder_path, map_location="cpu") self.embedder = SpeechEmbedder(embedder_params) self.embedder.load_state_dict(embedder_pt) self.embedder.eval() set_requires_grad(self.embedder, requires_grad=False) self.embedder_params = embedder_params self.register_buffer('mel_basis', torch.from_numpy( librosa.filters.mel( sr=self.RATE, n_fft=self.embedder_params["n_fft"], n_mels=self.embedder_params["num_mels"]) ) ) self.resample = None if rate != self.RATE: self.resample = torchaudio.transforms.Resample(rate, self.RATE) self.hop_length = hop_length self.win_length = win_length self.pad = pad def get_mel(self, y): if self.pad and y.shape[-1] < 14000: y = F.pad(y, (0, 14000 - y.shape[-1])) window = torch.hann_window(self.win_length).to(y) y = torch.stft(y, n_fft=self.embedder_params["n_fft"], hop_length=self.hop_length, win_length=self.win_length, window=window) magnitudes = torch.norm(y, dim=-1, p=2) ** 2 mel = torch.log10(self.mel_basis @ magnitudes + 1e-6) return mel def forward(self, inputs): dvecs = [] for wav in inputs: mel = self.get_mel(wav) if mel.dim() == 3: mel = mel.squeeze(0) dvecs += [self.embedder(mel)] dvecs = torch.stack(dvecs) dvec = torch.mean(dvecs, dim=0) dvec = dvec / torch.norm(dvec) return dvec
4,103
29.176471
78
py
sign-topic
sign-topic-main/examples/byte_level_bpe/gru_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerEncoder, TransformerModel @register_model("gru_transformer") class GRUTransformerModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return GRUTransformerEncoder(args, src_dict, embed_tokens) class GRUTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.emb_ctx = nn.GRU( input_size=embed_tokens.embedding_dim, hidden_size=embed_tokens.embedding_dim // 2, num_layers=1, bidirectional=True, ) def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) # contextualize embeddings x = x.transpose(0, 1) x = self.dropout_module(x) x, _ = self.emb_ctx.forward(x) x = x.transpose(0, 1) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed @register_model_architecture("gru_transformer", "gru_transformer") def gru_transformer_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.layer_wise_attention = getattr(args, "layer_wise_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) @register_model_architecture("gru_transformer", "gru_transformer_big") def gru_transformer_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) gru_transformer_base_architecture(args)
5,027
45.555556
87
py
sign-topic
sign-topic-main/examples/simultaneous_translation/modules/monotonic_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from torch import Tensor import torch.nn as nn from examples.simultaneous_translation.utils.p_choose_strategy import ( learnable_p_choose, waitk_p_choose ) from examples.simultaneous_translation.utils.monotonic_attention import ( expected_alignment_from_p_choose, expected_soft_attention, mass_preservation, ) from fairseq.modules import MultiheadAttention from . import register_monotonic_attention from typing import Dict, Optional @register_monotonic_attention("hard_aligned") class MonotonicAttention(MultiheadAttention): """ Abstract class of monotonic attentions """ k_in_proj: Dict[str, nn.Linear] q_in_proj: Dict[str, nn.Linear] def __init__(self, args): super().__init__( embed_dim=args.decoder_embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.soft_attention = False self.eps = getattr(args, "attention_eps", True) self.mass_preservation = getattr(args, "mass_preservation", True) self.noise_type = args.noise_type self.noise_mean = args.noise_mean self.noise_var = args.noise_var self.energy_bias_init = args.energy_bias_init self.energy_bias = ( nn.Parameter(self.energy_bias_init * torch.ones([1])) if args.energy_bias is True else 0 ) self.k_in_proj = {"monotonic": self.k_proj} self.q_in_proj = {"monotonic": self.q_proj} self.chunk_size = None @staticmethod def add_args(parser): # fmt: off parser.add_argument('--no-mass-preservation', action="store_false", dest="mass_preservation", help='Do not stay on the last token when decoding') parser.add_argument('--mass-preservation', action="store_true", dest="mass_preservation", help='Stay on the last token when decoding') parser.set_defaults(mass_preservation=True) parser.add_argument('--noise-var', type=float, default=1.0, help='Variance of discretness noise') parser.add_argument('--noise-mean', type=float, default=0.0, help='Mean of discretness noise') parser.add_argument('--noise-type', type=str, default="flat", help='Type of discretness noise') parser.add_argument('--energy-bias', action="store_true", default=False, help='Bias for energy') parser.add_argument('--energy-bias-init', type=float, default=-2.0, help='Initial value of the bias for energy') parser.add_argument('--attention-eps', type=float, default=1e-6, help='Epsilon when calculating expected attention') def energy_from_qk( self, query: Tensor, key: Tensor, energy_type: str, key_padding_mask: Optional[Tensor] = None, bias: int = 0 ): """ Compute energy from query and key q_func_value is a tuple looks like (q_proj_func, q_tensor) q_tensor size: bsz, tgt_len, emb_dim k_tensor size: bsz, src_len, emb_dim key_padding_mask size: bsz, src_len attn_mask: bsz, src_len """ length, bsz, _ = query.size() q = self.q_in_proj[energy_type].forward(query) q = ( q.contiguous() .view(length, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) q = q * self.scaling length, bsz, _ = key.size() k = self.k_in_proj[energy_type].forward(key) k = ( k.contiguous() .view(length, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) energy = torch.bmm(q, k.transpose(1, 2)) + bias if key_padding_mask is not None: energy = energy.masked_fill( key_padding_mask.unsqueeze(1).to(torch.bool), - float("inf") ) return energy def p_choose_from_qk(self, query, key, key_padding_mask, incremental_states=None): monotonic_energy = self.energy_from_qk( query, key, "monotonic", key_padding_mask=key_padding_mask, bias=self.energy_bias, ) p_choose = learnable_p_choose( monotonic_energy, self.noise_mean, self.noise_var, self.training ) return p_choose def p_choose(self, query, key, key_padding_mask, incremental_states=None): return self.p_choose_from_qk(self, query, key, key_padding_mask) def monotonic_attention_process_infer( self, query: Optional[Tensor], key: Optional[Tensor], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], ): """ Monotonic attention at inference time Notice that this function is designed for simuleval not sequence_generator """ assert query is not None assert key is not None if query.size(1) != 1: raise RuntimeError( "Simultaneous translation models don't support batch decoding." ) # 1. compute stepwise probability p_choose = self.p_choose( query, key, None, incremental_state ).squeeze(1) # 2. Compute the alpha src_len = key.size(0) # Maximum steps allows in this iteration max_steps = src_len - 1 if self.mass_preservation else src_len monotonic_cache = self._get_monotonic_buffer(incremental_state) # Step for each head monotonic_step = monotonic_cache.get( 'head_step', p_choose.new_zeros(1, self.num_heads).long() ) assert monotonic_step is not None finish_read = monotonic_step.eq(max_steps) p_choose_i = torch.tensor(1) while finish_read.sum().item() < self.num_heads: # p_choose: self.num_heads, src_len # only choose the p at monotonic steps # p_choose_i: 1, self.num_heads p_choose_i = ( p_choose.gather( 1, monotonic_step .clamp(0, src_len - 1), ) ) read_one_step = ( (p_choose_i < 0.5) .type_as(monotonic_step) .masked_fill(finish_read, 0) ) # 1 x bsz # sample actions on unfinished seq # 0 means stay, finish reading # 1 means leave, continue reading monotonic_step += read_one_step finish_read = monotonic_step.eq(max_steps) | (read_one_step == 0) # p_choose at last steps p_choose_i = ( p_choose.gather( 1, monotonic_step .clamp(0, src_len - 1), ) ) monotonic_cache["head_step"] = monotonic_step # Whether a head is looking for new input monotonic_cache["head_read"] = ( monotonic_step.eq(max_steps) & (p_choose_i < 0.5) ) self._set_monotonic_buffer(incremental_state, monotonic_cache) # 2. Update alpha alpha = ( p_choose .new_zeros([self.num_heads, src_len]) .scatter( 1, (monotonic_step) .view(self.num_heads, 1).clamp(0, src_len - 1), 1 ) ) if not self.mass_preservation: alpha = alpha.masked_fill( (monotonic_step == max_steps) .view(self.num_heads, 1), 0 ) # 4. Compute Beta if self.soft_attention: monotonic_step = monotonic_step.t() beta_mask = torch.arange(src_len).expand_as(alpha).gt(monotonic_step).unsqueeze(1) # If it's soft attention just do softmax on current context soft_energy = self.energy_from_qk( query, key, "soft" ) beta = torch.nn.functional.softmax( soft_energy.masked_fill(beta_mask, -float("inf")), dim=-1 ) # It could happen that a head doesn't move at all beta = beta.masked_fill(monotonic_step.eq(0).unsqueeze(1), 0) else: # If it's hard attention just select the last state beta = alpha return p_choose, alpha, beta def monotonic_attention_process_train( self, query: Optional[Tensor], key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, ): """ Calculating monotonic attention process for training Including: stepwise probability: p_choose expected hard alignment: alpha expected soft attention: beta """ assert query is not None assert key is not None # 1. compute stepwise probability p_choose = self.p_choose_from_qk(query, key, key_padding_mask) # 2. compute expected_alignment alpha = expected_alignment_from_p_choose( p_choose, key_padding_mask, eps=self.eps, ) if self.mass_preservation: alpha = mass_preservation( alpha, key_padding_mask ) # 3. compute expected soft attention (soft aligned model only) if self.soft_attention: soft_energy = self.energy_from_qk( query, key, "soft", key_padding_mask=None, ) beta = expected_soft_attention( alpha, soft_energy, padding_mask=key_padding_mask, chunk_size=self.chunk_size, eps=self.eps, ) else: beta = alpha soft_energy = alpha return p_choose, alpha, beta, soft_energy def forward( self, query: Optional[Tensor], key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, attn_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, need_head_weights: bool = False, ): """ query: tgt_len, bsz, embed_dim key: src_len, bsz, embed_dim value: src_len, bsz, embed_dim """ assert attn_mask is None assert query is not None assert key is not None assert value is not None tgt_len, bsz, embed_dim = query.size() src_len = value.size(0) if key_padding_mask is not None: assert not key_padding_mask[:, 0].any(), ( "Only right padding is supported." ) key_padding_mask = ( key_padding_mask .unsqueeze(1) .expand([bsz, self.num_heads, src_len]) .contiguous() .view(-1, src_len) ) if incremental_state is not None: # Inference ( p_choose, alpha, beta ) = self.monotonic_attention_process_infer( query, key, incremental_state ) soft_energy = beta else: # Train ( p_choose, alpha, beta, soft_energy ) = self.monotonic_attention_process_train( query, key, key_padding_mask ) v = self.v_proj(value) length, bsz, _ = v.size() v = ( v.contiguous() .view(length, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) attn = torch.bmm(beta.type_as(v), v) attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) p_choose = p_choose.view(bsz, self.num_heads, tgt_len, src_len) alpha = alpha.view(bsz, self.num_heads, tgt_len, src_len) beta = beta.view(bsz, self.num_heads, tgt_len, src_len) return attn, { "p_choose": p_choose, "alpha": alpha, "beta": beta, } def _get_monotonic_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]): maybe_incremental_state = self.get_incremental_state( incremental_state, 'monotonic', ) if maybe_incremental_state is None: typed_empty_dict: Dict[str, Optional[Tensor]] = {} return typed_empty_dict else: return maybe_incremental_state def _set_monotonic_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], buffer: Dict[str, Optional[Tensor]]): self.set_incremental_state( incremental_state, 'monotonic', buffer, ) @register_monotonic_attention("infinite_lookback") class MonotonicInfiniteLookbackAttention( MonotonicAttention ): def __init__(self, args): super().__init__(args) self.soft_attention = True self.init_soft_attention() def init_soft_attention(self): self.k_proj_soft = nn.Linear(self.kdim, self.embed_dim, bias=True) self.q_proj_soft = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.k_in_proj["soft"] = self.k_proj_soft self.q_in_proj["soft"] = self.q_proj_soft if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_( self.k_in_proj["soft"].weight, gain=1 / math.sqrt(2) ) nn.init.xavier_uniform_( self.q_in_proj["soft"].weight, gain=1 / math.sqrt(2) ) else: nn.init.xavier_uniform_(self.k_in_proj["soft"].weight) nn.init.xavier_uniform_(self.q_in_proj["soft"].weight) @register_monotonic_attention("waitk") class WaitKAttention( MonotonicInfiniteLookbackAttention ): """ STACL: Simultaneous Translation with Implicit Anticipation and Controllable Latency using Prefix-to-Prefix Framework https://www.aclweb.org/anthology/P19-1289/ """ def __init__(self, args): super().__init__(args) self.q_in_proj["soft"] = self.q_in_proj["monotonic"] self.k_in_proj["soft"] = self.k_in_proj["monotonic"] self.waitk_lagging = args.waitk_lagging assert self.waitk_lagging > 0, ( f"Lagging has to been larger than 0, get {self.waitk_lagging}." ) @staticmethod def add_args(parser): super( MonotonicInfiniteLookbackAttention, MonotonicInfiniteLookbackAttention ).add_args(parser) parser.add_argument( "--waitk-lagging", type=int, required=True, help="Wait K lagging" ) def p_choose_from_qk( self, query: Optional[Tensor], key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): assert query is not None assert key is not None p_choose = waitk_p_choose( tgt_len=query.size(0), src_len=key.size(0), bsz=query.size(1) * self.num_heads, waitk_lagging=self.waitk_lagging, key_padding_mask=key_padding_mask, incremental_state=incremental_state, ) return p_choose.to(query) @register_monotonic_attention("chunkwise") class ChunkwiseAttention( MonotonicInfiniteLookbackAttention ): def __init__(self, args): super().__init__(args) self.chunk_size = args.mocha_chunk_size assert self.chunk_size > 1 @staticmethod def add_args(parser): super( MonotonicInfiniteLookbackAttention ).add_args(parser) parser.add_argument( "--mocha-chunk-size", type=int, required=True, help="Mocha chunk size" )
16,858
31.421154
142
py
sign-topic
sign-topic-main/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer from . import build_monotonic_attention from typing import Dict, Optional, List from torch import Tensor import torch class TransformerMonotonicEncoderLayer(TransformerEncoderLayer): def forward(self, x, encoder_padding_mask): seq_len, _, _ = x.size() attn_mask = x.new_ones([seq_len, seq_len]).triu(1) attn_mask = attn_mask.masked_fill(attn_mask.bool(), float("-inf")) return super().forward(x, encoder_padding_mask, attn_mask) class TransformerMonotonicDecoderLayer(TransformerDecoderLayer): def __init__(self, args): super().__init__(args) assert args.simul_type is not None, "A --simul-type is needed." self.encoder_attn = build_monotonic_attention(args) def prune_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ): input_buffer = self.self_attn._get_input_buffer(incremental_state) for key in ["prev_key", "prev_value"]: input_buffer_key = input_buffer[key] assert input_buffer_key is not None if input_buffer_key.size(2) > 1: input_buffer[key] = input_buffer_key[:, :, :-1, :] else: typed_empty_dict: Dict[str, Optional[Tensor]] = {} input_buffer = typed_empty_dict break assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, input_buffer) def forward( self, x, encoder_out: Optional[Tensor] = None, encoder_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[Tensor]] = None, prev_attn_state: Optional[List[Tensor]] = None, self_attn_mask: Optional[Tensor] = None, self_attn_padding_mask: Optional[Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if prev_self_attn_state is not None: prev_key, prev_value = prev_self_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if self.cross_self_attention and not ( incremental_state is not None and _self_attn_input_buffer is not None and "prev_key" in _self_attn_input_buffer ): if self_attn_mask is not None: assert encoder_out is not None self_attn_mask = torch.cat( (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 ) if self_attn_padding_mask is not None: if encoder_padding_mask is None: assert encoder_out is not None encoder_padding_mask = self_attn_padding_mask.new_zeros( encoder_out.size(1), encoder_out.size(0) ) self_attn_padding_mask = torch.cat( (encoder_padding_mask, self_attn_padding_mask), dim=1 ) assert encoder_out is not None y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) assert self.encoder_attn is not None residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) assert saved_state is not None if self_attn_padding_mask is not None: self_attn_state = [ saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"], ] else: self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] return x, attn, self_attn_state return x, attn, None
7,265
38.704918
88
py
sign-topic
sign-topic-main/examples/simultaneous_translation/modules/fixed_pre_decision.py
from functools import partial import torch from torch import Tensor import math import torch.nn.functional as F from . import register_monotonic_attention from .monotonic_multihead_attention import ( MonotonicAttention, MonotonicInfiniteLookbackAttention, WaitKAttention ) from typing import Dict, Optional def fixed_pooling_monotonic_attention(monotonic_attention): def create_model(monotonic_attention, klass): class FixedStrideMonotonicAttention(monotonic_attention): def __init__(self, args): self.waitk_lagging = 0 self.num_heads = 0 self.noise_mean = 0.0 self.noise_var = 0.0 super().__init__(args) self.pre_decision_type = args.fixed_pre_decision_type self.pre_decision_ratio = args.fixed_pre_decision_ratio self.pre_decision_pad_threshold = args.fixed_pre_decision_pad_threshold assert self.pre_decision_ratio > 1 if args.fixed_pre_decision_type == "average": self.pooling_layer = torch.nn.AvgPool1d( kernel_size=self.pre_decision_ratio, stride=self.pre_decision_ratio, ceil_mode=True, ) elif args.fixed_pre_decision_type == "last": def last(key): if key.size(2) < self.pre_decision_ratio: return key else: k = key[ :, :, self.pre_decision_ratio - 1:: self.pre_decision_ratio, ].contiguous() if key.size(-1) % self.pre_decision_ratio != 0: k = torch.cat([k, key[:, :, -1:]], dim=-1).contiguous() return k self.pooling_layer = last else: raise NotImplementedError @staticmethod def add_args(parser): super( FixedStrideMonotonicAttention, FixedStrideMonotonicAttention ).add_args(parser) parser.add_argument( "--fixed-pre-decision-ratio", type=int, required=True, help=( "Ratio for the fixed pre-decision," "indicating how many encoder steps will start" "simultaneous decision making process." ), ) parser.add_argument( "--fixed-pre-decision-type", default="average", choices=["average", "last"], help="Pooling type", ) parser.add_argument( "--fixed-pre-decision-pad-threshold", type=float, default=0.3, help="If a part of the sequence has pad" ",the threshold the pooled part is a pad.", ) def insert_zeros(self, x): bsz_num_heads, tgt_len, src_len = x.size() stride = self.pre_decision_ratio weight = F.pad(torch.ones(1, 1, 1).to(x), (stride - 1, 0)) x_upsample = F.conv_transpose1d( x.view(-1, src_len).unsqueeze(1), weight, stride=stride, padding=0, ) return x_upsample.squeeze(1).view(bsz_num_heads, tgt_len, -1) def p_choose( self, query: Optional[Tensor], key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): assert key is not None assert query is not None src_len = key.size(0) tgt_len = query.size(0) batch_size = query.size(1) key_pool = self.pooling_layer(key.transpose(0, 2)).transpose(0, 2) if key_padding_mask is not None: key_padding_mask_pool = ( self.pooling_layer(key_padding_mask.unsqueeze(0).float()) .squeeze(0) .gt(self.pre_decision_pad_threshold) ) # Make sure at least one element is not pad key_padding_mask_pool[:, 0] = 0 else: key_padding_mask_pool = None if incremental_state is not None: # The floor instead of ceil is used for inference # But make sure the length key_pool at least 1 if ( max(1, math.floor(key.size(0) / self.pre_decision_ratio)) ) < key_pool.size(0): key_pool = key_pool[:-1] if key_padding_mask_pool is not None: key_padding_mask_pool = key_padding_mask_pool[:-1] p_choose_pooled = self.p_choose_from_qk( query, key_pool, key_padding_mask_pool, incremental_state=incremental_state, ) # Upsample, interpolate zeros p_choose = self.insert_zeros(p_choose_pooled) if p_choose.size(-1) < src_len: # Append zeros if the upsampled p_choose is shorter than src_len p_choose = torch.cat( [ p_choose, torch.zeros( p_choose.size(0), tgt_len, src_len - p_choose.size(-1) ).to(p_choose) ], dim=2 ) else: # can be larger than src_len because we used ceil before p_choose = p_choose[:, :, :src_len] p_choose[:, :, -1] = p_choose_pooled[:, :, -1] assert list(p_choose.size()) == [ batch_size * self.num_heads, tgt_len, src_len, ] return p_choose FixedStrideMonotonicAttention.__name__ = klass.__name__ return FixedStrideMonotonicAttention return partial(create_model, monotonic_attention) @register_monotonic_attention("waitk_fixed_pre_decision") @fixed_pooling_monotonic_attention(WaitKAttention) class WaitKAttentionFixedStride: pass @register_monotonic_attention("hard_aligned_fixed_pre_decision") @fixed_pooling_monotonic_attention(MonotonicAttention) class MonotonicAttentionFixedStride: pass @register_monotonic_attention("infinite_lookback_fixed_pre_decision") @fixed_pooling_monotonic_attention(MonotonicInfiniteLookbackAttention) class MonotonicInfiniteLookbackAttentionFixedStride: pass
7,370
37.591623
91
py
sign-topic
sign-topic-main/examples/simultaneous_translation/eval/agents/simul_t2t_enja.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from fairseq import checkpoint_utils, tasks import sentencepiece as spm import torch try: from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS from simuleval.agents import TextAgent except ImportError: print("Please install simuleval 'pip install simuleval'") BOS_PREFIX = "\u2581" class SimulTransTextAgentJA(TextAgent): """ Simultaneous Translation Text agent for Japanese """ def __init__(self, args): # Whether use gpu self.gpu = getattr(args, "gpu", False) # Max len self.max_len = args.max_len # Load Model self.load_model_vocab(args) # build word splitter self.build_word_splitter(args) self.eos = DEFAULT_EOS def initialize_states(self, states): states.incremental_states = dict() states.incremental_states["online"] = dict() def to_device(self, tensor): if self.gpu: return tensor.cuda() else: return tensor.cpu() def load_model_vocab(self, args): filename = args.model_path if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename) task_args = state["cfg"]["task"] task_args.data = args.data_bin task = tasks.setup_task(task_args) # build model for ensemble state["cfg"]["model"].load_pretrained_encoder_from = None state["cfg"]["model"].load_pretrained_decoder_from = None self.model = task.build_model(state["cfg"]["model"]) self.model.load_state_dict(state["model"], strict=True) self.model.eval() self.model.share_memory() if self.gpu: self.model.cuda() # Set dictionary self.dict = {} self.dict["tgt"] = task.target_dictionary self.dict["src"] = task.source_dictionary @staticmethod def add_args(parser): # fmt: off parser.add_argument('--model-path', type=str, required=True, help='path to your pretrained model.') parser.add_argument("--data-bin", type=str, required=True, help="Path of data binary") parser.add_argument("--max-len", type=int, default=100, help="Max length of translation") parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for target text.") parser.add_argument("--tgt-splitter-path", type=str, default=None, help="Subword splitter model path for target text.") parser.add_argument("--src-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for source text.") parser.add_argument("--src-splitter-path", type=str, default=None, help="Subword splitter model path for source text.") # fmt: on return parser def build_word_splitter(self, args): self.spm = {} for lang in ['src', 'tgt']: if getattr(args, f'{lang}_splitter_type', None): path = getattr(args, f'{lang}_splitter_path', None) if path: self.spm[lang] = spm.SentencePieceProcessor() self.spm[lang].Load(path) def segment_to_units(self, segment, states): # Split a full word (segment) into subwords (units) return self.spm['src'].EncodeAsPieces(segment) def update_model_encoder(self, states): if len(states.units.source) == 0: return src_indices = [ self.dict['src'].index(x) for x in states.units.source.value ] if states.finish_read(): # Append the eos index when the prediction is over src_indices += [self.dict["tgt"].eos_index] src_indices = self.to_device( torch.LongTensor(src_indices).unsqueeze(0) ) src_lengths = self.to_device( torch.LongTensor([src_indices.size(1)]) ) states.encoder_states = self.model.encoder(src_indices, src_lengths) torch.cuda.empty_cache() def update_states_read(self, states): # Happens after a read action. self.update_model_encoder(states) def units_to_segment(self, units, states): # Merge sub words (units) to full word (segment). # For Japanese, we can directly send # the untokenized token to server except the BOS token # with following option # --sacrebleu-tokenizer MeCab # --eval-latency-unit char # --no-space token = units.value.pop() if ( token == self.dict["tgt"].eos_word or len(states.segments.target) > self.max_len ): return DEFAULT_EOS if BOS_PREFIX == token: return None if token[0] == BOS_PREFIX: return token[1:] else: return token def policy(self, states): if not getattr(states, "encoder_states", None): # No encoder states, read a token first return READ_ACTION # encode previous predicted target tokens tgt_indices = self.to_device( torch.LongTensor( [self.model.decoder.dictionary.eos()] + [ self.dict['tgt'].index(x) for x in states.units.target.value if x is not None ] ).unsqueeze(0) ) # Current steps states.incremental_states["steps"] = { "src": states.encoder_states["encoder_out"][0].size(0), "tgt": 1 + len(states.units.target), } # Online only means the reading is not finished states.incremental_states["online"]["only"] = ( torch.BoolTensor([not states.finish_read()]) ) x, outputs = self.model.decoder.forward( prev_output_tokens=tgt_indices, encoder_out=states.encoder_states, incremental_state=states.incremental_states, ) states.decoder_out = x torch.cuda.empty_cache() if outputs.action == 0: return READ_ACTION else: return WRITE_ACTION def predict(self, states): # Predict target token from decoder states decoder_states = states.decoder_out lprobs = self.model.get_normalized_probs( [decoder_states[:, -1:]], log_probs=True ) index = lprobs.argmax(dim=-1)[0, 0].item() if index != self.dict['tgt'].eos_index: token = self.dict['tgt'].string([index]) else: token = self.dict['tgt'].eos_word return token
7,099
30.277533
85
py
sign-topic
sign-topic-main/examples/simultaneous_translation/models/transformer_monotonic_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, NamedTuple, Optional import torch import torch.nn as nn from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer, ) from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, tiny_architecture ) from torch import Tensor DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 READ_ACTION = 0 WRITE_ACTION = 1 TransformerMonotonicDecoderOut = NamedTuple( "TransformerMonotonicDecoderOut", [ ("action", int), ("p_choose", Optional[Tensor]), ("attn_list", Optional[List[Optional[Dict[str, Tensor]]]]), ("encoder_out", Optional[Dict[str, List[Tensor]]]), ("encoder_padding_mask", Optional[Tensor]), ], ) @register_model("transformer_unidirectional") class TransformerUnidirectionalModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @register_model("transformer_monotonic") class TransformerModelSimulTrans(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) class TransformerMonotonicEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerMonotonicEncoderLayer(args) for i in range(args.encoder_layers) ] ) class TransformerMonotonicDecoder(TransformerDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerMonotonicDecoderLayer(args) for _ in range(args.decoder_layers) ] ) self.policy_criterion = getattr(args, "policy_criterion", "any") self.num_updates = None def set_num_updates(self, num_updates): self.num_updates = num_updates def pre_attention( self, prev_output_tokens, encoder_out_dict: Dict[str, List[Tensor]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_out = encoder_out_dict["encoder_out"][0] if "encoder_padding_mask" in encoder_out_dict: encoder_padding_mask = ( encoder_out_dict["encoder_padding_mask"][0] if encoder_out_dict["encoder_padding_mask"] and len(encoder_out_dict["encoder_padding_mask"]) > 0 else None ) else: encoder_padding_mask = None return x, encoder_out, encoder_padding_mask def post_attention(self, x): if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x def clean_cache( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], end_id: Optional[int] = None, ): """ Clean cache in the monotonic layers. The cache is generated because of a forward pass of decoder has run but no prediction, so that the self attention key value in decoder is written in the incremental state. end_id is the last idx of the layers """ if end_id is None: end_id = len(self.layers) for index, layer in enumerate(self.layers): if index < end_id: layer.prune_incremental_state(incremental_state) def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, # unused alignment_layer: Optional[int] = None, # unused alignment_heads: Optional[int] = None, # unsed ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # incremental_state = None assert encoder_out is not None (x, encoder_outs, encoder_padding_mask) = self.pre_attention( prev_output_tokens, encoder_out, incremental_state ) attn = None inner_states = [x] attn_list: List[Optional[Dict[str, Tensor]]] = [] p_choose = torch.tensor([1.0]) for i, layer in enumerate(self.layers): x, attn, _ = layer( x=x, encoder_out=encoder_outs, encoder_padding_mask=encoder_padding_mask, incremental_state=incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) attn_list.append(attn) if incremental_state is not None: if_online = incremental_state["online"]["only"] assert if_online is not None if if_online.to(torch.bool): # Online indicates that the encoder states are still changing assert attn is not None if self.policy_criterion == "any": # Any head decide to read than read head_read = layer.encoder_attn._get_monotonic_buffer(incremental_state)["head_read"] assert head_read is not None if head_read.any(): # We need to prune the last self_attn saved_state # if model decide not to read # otherwise there will be duplicated saved_state self.clean_cache(incremental_state, i + 1) return x, TransformerMonotonicDecoderOut( action=0, p_choose=p_choose, attn_list=None, encoder_out=None, encoder_padding_mask=None, ) x = self.post_attention(x) return x, TransformerMonotonicDecoderOut( action=1, p_choose=p_choose, attn_list=attn_list, encoder_out=encoder_out, encoder_padding_mask=encoder_padding_mask, ) @register_model_architecture("transformer_monotonic", "transformer_monotonic") def base_monotonic_architecture(args): base_architecture(args) args.encoder_unidirectional = getattr(args, "encoder_unidirectional", False) @register_model_architecture( "transformer_monotonic", "transformer_monotonic_iwslt_de_en" ) def transformer_monotonic_iwslt_de_en(args): transformer_iwslt_de_en(args) base_monotonic_architecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_de_big" ) def transformer_monotonic_vaswani_wmt_en_de_big(args): transformer_vaswani_wmt_en_de_big(args) @register_model_architecture( "transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_fr_big" ) def transformer_monotonic_vaswani_wmt_en_fr_big(args): transformer_monotonic_vaswani_wmt_en_fr_big(args) @register_model_architecture( "transformer_unidirectional", "transformer_unidirectional_iwslt_de_en" ) def transformer_unidirectional_iwslt_de_en(args): transformer_iwslt_de_en(args) @register_model_architecture("transformer_monotonic", "transformer_monotonic_tiny") def monotonic_tiny_architecture(args): tiny_architecture(args) base_monotonic_architecture(args)
10,185
32.617162
108
py
sign-topic
sign-topic-main/examples/simultaneous_translation/models/convtransformer_simul_trans.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from fairseq import checkpoint_utils from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.speech_to_text import ( ConvTransformerModel, convtransformer_espnet, ConvTransformerEncoder, ) from fairseq.models.speech_to_text.modules.augmented_memory_attention import ( augmented_memory, SequenceEncoder, AugmentedMemoryConvTransformerEncoder, ) from torch import nn, Tensor from typing import Dict, List from fairseq.models.speech_to_text.modules.emformer import NoSegAugmentedMemoryTransformerEncoderLayer @register_model("convtransformer_simul_trans") class SimulConvTransformerModel(ConvTransformerModel): """ Implementation of the paper: SimulMT to SimulST: Adapting Simultaneous Text Translation to End-to-End Simultaneous Speech Translation https://www.aclweb.org/anthology/2020.aacl-main.58.pdf """ @staticmethod def add_args(parser): super(SimulConvTransformerModel, SimulConvTransformerModel).add_args(parser) parser.add_argument( "--train-monotonic-only", action="store_true", default=False, help="Only train monotonic attention", ) @classmethod def build_decoder(cls, args, task, embed_tokens): tgt_dict = task.tgt_dict from examples.simultaneous_translation.models.transformer_monotonic_attention import ( TransformerMonotonicDecoder, ) decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @register_model_architecture( "convtransformer_simul_trans", "convtransformer_simul_trans_espnet" ) def convtransformer_simul_trans_espnet(args): convtransformer_espnet(args) @register_model("convtransformer_augmented_memory") @augmented_memory class AugmentedMemoryConvTransformerModel(SimulConvTransformerModel): @classmethod def build_encoder(cls, args): encoder = SequenceEncoder(args, AugmentedMemoryConvTransformerEncoder(args)) if getattr(args, "load_pretrained_encoder_from", None) is not None: encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @register_model_architecture( "convtransformer_augmented_memory", "convtransformer_augmented_memory" ) def augmented_memory_convtransformer_espnet(args): convtransformer_espnet(args) # ============================================================================ # # Convtransformer # with monotonic attention decoder # with emformer encoder # ============================================================================ # class ConvTransformerEmformerEncoder(ConvTransformerEncoder): def __init__(self, args): super().__init__(args) stride = self.conv_layer_stride(args) trf_left_context = args.segment_left_context // stride trf_right_context = args.segment_right_context // stride context_config = [trf_left_context, trf_right_context] self.transformer_layers = nn.ModuleList( [ NoSegAugmentedMemoryTransformerEncoderLayer( input_dim=args.encoder_embed_dim, num_heads=args.encoder_attention_heads, ffn_dim=args.encoder_ffn_embed_dim, num_layers=args.encoder_layers, dropout_in_attn=args.dropout, dropout_on_attn=args.dropout, dropout_on_fc1=args.dropout, dropout_on_fc2=args.dropout, activation_fn=args.activation_fn, context_config=context_config, segment_size=args.segment_length, max_memory_size=args.max_memory_size, scaled_init=True, # TODO: use constant for now. tanh_on_mem=args.amtrf_tanh_on_mem, ) ] ) self.conv_transformer_encoder = ConvTransformerEncoder(args) def forward(self, src_tokens, src_lengths): encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device)) output = encoder_out["encoder_out"][0] encoder_padding_masks = encoder_out["encoder_padding_mask"] return { "encoder_out": [output], # This is because that in the original implementation # the output didn't consider the last segment as right context. "encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0 else [], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @staticmethod def conv_layer_stride(args): # TODO: make it configurable from the args return 4 @register_model("convtransformer_emformer") class ConvtransformerEmformer(SimulConvTransformerModel): @staticmethod def add_args(parser): super(ConvtransformerEmformer, ConvtransformerEmformer).add_args(parser) parser.add_argument( "--segment-length", type=int, metavar="N", help="length of each segment (not including left context / right context)", ) parser.add_argument( "--segment-left-context", type=int, help="length of left context in a segment", ) parser.add_argument( "--segment-right-context", type=int, help="length of right context in a segment", ) parser.add_argument( "--max-memory-size", type=int, default=-1, help="Right context for the segment.", ) parser.add_argument( "--amtrf-tanh-on-mem", default=False, action="store_true", help="whether to use tanh on memory vector", ) @classmethod def build_encoder(cls, args): encoder = ConvTransformerEmformerEncoder(args) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @register_model_architecture( "convtransformer_emformer", "convtransformer_emformer", ) def convtransformer_emformer_base(args): convtransformer_espnet(args)
7,162
33.941463
123
py
sign-topic
sign-topic-main/examples/simultaneous_translation/tests/test_text_models.py
import argparse import unittest from typing import Any, Dict import torch from examples.simultaneous_translation.models import ( transformer_monotonic_attention ) from tests.test_roberta import FakeTask DEFAULT_CONFIG = { "attention_eps": 1e-6, "mass_preservation": True, "noise_type": "flat", "noise_mean": 0.0, "noise_var": 1.0, "energy_bias_init": -2, "energy_bias": True } PAD_INDEX = 1 def generate_config(overrides_kv): new_dict = {key: value for key, value in DEFAULT_CONFIG.items()} for key, value in overrides_kv.items(): new_dict[key] = value return new_dict def make_sample_with_padding(longer_src=False) -> Dict[str, Any]: tokens_1 = torch.LongTensor( [ [2, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 2], [ 2, 11, 12, 14, 15, 10, 11, 12, 13, 14, 15, 2, PAD_INDEX, PAD_INDEX ], ] ) tokens_2 = torch.LongTensor( [ [2, 11, 12, 13, 14, 2, PAD_INDEX, PAD_INDEX], [2, 11, 22, 33, 2, PAD_INDEX, PAD_INDEX, PAD_INDEX] ] ) if longer_src: src_tokens = tokens_1[:, 1:] prev_output_tokens = tokens_2 else: src_tokens = tokens_2[:, 1:8] prev_output_tokens = tokens_1 src_lengths = src_tokens.ne(PAD_INDEX).sum(dim=1).long() sample = { "net_input": { "src_tokens": src_tokens, "prev_output_tokens": prev_output_tokens, "src_lengths": src_lengths, }, "target": prev_output_tokens[:, 1:], } return sample def build_transformer_monotonic_attention(**extra_args: Any): overrides = { # Use characteristics dimensions "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "decoder_embed_dim": 12, "decoder_ffn_embed_dim": 14, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } overrides.update(extra_args) # Overrides the defaults from the parser args = argparse.Namespace(**overrides) transformer_monotonic_attention.monotonic_tiny_architecture(args) torch.manual_seed(0) task = FakeTask(args) return ( transformer_monotonic_attention .TransformerModelSimulTrans .build_model(args, task) ) def expected_alignment_formula( p_choose, mass_perservation=True, padding_mask=None ): # Online and Linear-Time Attention by Enforcing Monotonic Alignments # https://arxiv.org/pdf/1704.00784.pdf # Eq 18, 19 bsz, tgt_len, src_len = p_choose.size() alpha = torch.zeros_like(p_choose) if padding_mask is not None: bsz_pad = padding_mask.size(0) num_heads = int(bsz / bsz_pad) padding_mask = ( padding_mask .unsqueeze(1) .expand([bsz_pad, num_heads, src_len]) .contiguous() .view(-1, src_len) ) p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0) for bsz_i in range(bsz): for i in range(tgt_len): for j in range(src_len): if i == 0: if j == 0: # First source token alpha[bsz_i, i, j] = p_choose[bsz_i, i, j] else: # First target token alpha[bsz_i, i, j] = ( p_choose[bsz_i, i, j] * torch.prod( 1 - p_choose[bsz_i, i, :j] ) ) else: alpha[bsz_i, i, j] = alpha[bsz_i, i - 1, j] for k in range(j): alpha[bsz_i, i, j] += ( alpha[bsz_i, i - 1, k] * torch.prod( 1 - p_choose[bsz_i, i, k:j] ) ) alpha[bsz_i, i, j] *= p_choose[bsz_i, i, j] alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0) if mass_perservation: alpha = mass_perservation_formula(alpha, False, padding_mask) return alpha def mass_perservation_formula(alpha, left_padding=False, padding_mask=None): if padding_mask is None or alpha.size(-1) == 1: if alpha.size(-1) > 1: alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1) return alpha src_lens = (padding_mask.logical_not()).sum(dim=1).long() bsz, tgt_len, src_len = alpha.size() assert ( not left_padding or (left_padding and (not padding_mask[:, 0].any())) ) alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0) for bsz_i in range(bsz): if left_padding: alpha[bsz_i, :, -1] = ( 1 - alpha[bsz_i, :, :-1].sum(dim=-1) ) else: alpha[bsz_i, :, src_lens[bsz_i] - 1] = ( 1 - alpha[bsz_i, :, :src_lens[bsz_i] - 1].sum(dim=-1) ) return alpha def expected_soft_attention_formula( alpha, soft_energy, padding_mask=None, chunksize=1e10, ): # Monotonic Infinite Lookback Attention for Simultaneous Machine Translation # https://arxiv.org/pdf/1906.05218.pdf # Eq 14 # Monotonic Chunkwise Attention # https://arxiv.org/abs/1712.05382 # Eq 17 bsz, tgt_len, src_len = alpha.size() beta = torch.zeros_like(alpha) if padding_mask is not None: bsz_pad = padding_mask.size(0) num_heads = int(bsz / bsz_pad) # Expanding for potential head dimension padding_mask = ( padding_mask .unsqueeze(1) .expand([bsz_pad, num_heads, src_len]) .contiguous() .view(-1, src_len) ) soft_energy = soft_energy.masked_fill(padding_mask.unsqueeze(1), float('-inf')) for bsz_i in range(bsz): for i in range(tgt_len): for j in range(src_len): for k in range(j, min([src_len, j + chunksize])): if not padding_mask[bsz_i, j]: beta[bsz_i, i, j] += ( alpha[bsz_i, i, k] * torch.exp(soft_energy[bsz_i, i, j]) / torch.sum(torch.exp(soft_energy[bsz_i, i, max([0, k - chunksize + 1]):k + 1])) ) return beta class MonotonicAttentionTestAbstractClass(object): def test_forward(self): sample = make_sample_with_padding() out, _ = self.model.forward(**sample["net_input"]) loss = out.sum() loss.backward() def test_p_choose(self): sample = make_sample_with_padding() _, extra_out = self.model.forward(**sample["net_input"]) for item in extra_out.attn_list: p_choose = item["p_choose"] self.assertTrue(p_choose.le(1.0).all()) self.assertTrue(p_choose.ge(0.0).all()) def test_expected_alignment(self): for longer_src in [True, False]: sample = make_sample_with_padding(longer_src) _, extra_out = self.model.forward(**sample["net_input"]) for item in extra_out.attn_list: p_choose = item["p_choose"] alpha_system = item["alpha"] self.assertTrue(p_choose.size() == alpha_system.size()) bsz, num_head, tgt_len, src_len = alpha_system.size() alpha_system = alpha_system.view(-1, tgt_len, src_len) p_choose = p_choose.view(-1, tgt_len, src_len) alpha_real = expected_alignment_formula( p_choose, self.model.decoder.layers[0].encoder_attn.mass_preservation, sample["net_input"]["src_tokens"].eq(PAD_INDEX) ) self.assertTrue( torch.abs(alpha_system - alpha_real).le(5e-5).all(), ) class HardMonotonicAttentionTestCase( unittest.TestCase, MonotonicAttentionTestAbstractClass ): def setUp(self): self.model = build_transformer_monotonic_attention( **generate_config({"simul_type": "hard_aligned"}) ) class InfiniteLookbackTestCase( unittest.TestCase, MonotonicAttentionTestAbstractClass ): def setUp(self): self.model = build_transformer_monotonic_attention( **generate_config( { "simul_type": "infinite_lookback" } ) ) self.model.train() def test_fp16_for_long_input(self): sample = { "net_input": { "src_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0), "prev_output_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0), "src_lengths": torch.LongTensor([1000]).cuda(), }, "target": torch.LongTensor([2] + [7] * 1000).unsqueeze(0).cuda() } self.model.cuda().half() _, extra_out = self.model.forward(**sample["net_input"]) for item in extra_out.attn_list: for key in ["p_choose", "alpha", "beta", "soft_energy"]: self.assertFalse(torch.isnan(item[key]).any()) def test_expected_attention(self): for longer_src in [True, False]: sample = make_sample_with_padding(longer_src) _, extra_out = self.model.forward(**sample["net_input"]) for item in extra_out.attn_list: p_choose = item["p_choose"] alpha_system = item["alpha"] beta_system = item["beta"] soft_energy_system = item["soft_energy"] self.assertTrue(beta_system.size() == alpha_system.size()) self.assertTrue(p_choose.size() == alpha_system.size()) bsz, num_head, tgt_len, src_len = alpha_system.size() alpha_system = alpha_system.view(-1, tgt_len, src_len) beta_system = beta_system.view(-1, tgt_len, src_len) p_choose = p_choose.view(-1, tgt_len, src_len) soft_energy_system = soft_energy_system.view(-1, tgt_len, src_len) alpha_real = expected_alignment_formula( p_choose, self.model.decoder.layers[0].encoder_attn.mass_preservation, sample["net_input"]["src_tokens"].eq(PAD_INDEX) ) beta_real = expected_soft_attention_formula( alpha_real, soft_energy_system, sample["net_input"]["src_tokens"].eq(PAD_INDEX), chunksize=getattr( self.model.decoder.layers[0].encoder_attn, "chunk_size", int(1e10) ) ) self.assertTrue( torch.abs(beta_system - beta_real).le(1e-5).all(), ) class ChunkwiswTestCase( InfiniteLookbackTestCase ): def setUp(self): self.model = build_transformer_monotonic_attention( **generate_config( { "simul_type": "chunkwise", "mocha_chunk_size": 3 } ) ) class WaitkTestCase(InfiniteLookbackTestCase): def setUp(self): self.model = build_transformer_monotonic_attention( **generate_config( { "simul_type": "waitk", "waitk_lagging": 3, } ) ) def check_waitk(self, p_choose, lagging, padding_mask): bsz, tgt_len, src_len = p_choose.size() for bsz_i in range(bsz): for i in range(tgt_len): for j in range(src_len): if not padding_mask[bsz_i, j]: if j - i == lagging - 1: self.assertTrue(p_choose[bsz_i, i, j] == 1) else: self.assertTrue(p_choose[bsz_i, i, j] == 0) def test_waitk_p_choose(self): for longer_src in [True, False]: for k in [1, 3, 10, 20, 100]: sample = make_sample_with_padding(longer_src) model = build_transformer_monotonic_attention( **generate_config( { "simul_type": "waitk", "waitk_lagging": k, } ) ) model.train() _, extra_out = model.forward(**sample["net_input"]) for item in extra_out.attn_list: p_choose = item["p_choose"] bsz, num_heads, tgt_len, src_len = p_choose.size() padding_mask = sample["net_input"]["src_tokens"].eq(PAD_INDEX) padding_mask = ( padding_mask .unsqueeze(1) .expand([bsz, num_heads, src_len]) .contiguous() .view(-1, src_len) ) p_choose = p_choose.view(bsz * num_heads, tgt_len, src_len) self.check_waitk(p_choose, k, padding_mask)
13,524
32.14951
108
py
sign-topic
sign-topic-main/examples/simultaneous_translation/tests/test_alignment_train.py
import unittest import numpy as np import torch import hypothesis.strategies as st from hypothesis import assume, given, settings from torch.testing._internal.common_utils import TestCase from examples.simultaneous_translation.utils.functions import exclusive_cumprod TEST_CUDA = torch.cuda.is_available() class AlignmentTrainTest(TestCase): def _test_custom_alignment_train_ref(self, p_choose, eps): cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=eps) cumprod_1mp_clamp = torch.clamp(cumprod_1mp, eps, 1.0) bsz = p_choose.size(0) tgt_len = p_choose.size(1) src_len = p_choose.size(2) alpha_0 = p_choose.new_zeros([bsz, 1, src_len]) alpha_0[:, :, 0] = 1.0 previous_alpha = [alpha_0] for i in range(tgt_len): # p_choose: bsz , tgt_len, src_len # cumprod_1mp_clamp : bsz, tgt_len, src_len # previous_alpha[i]: bsz, 1, src_len # alpha_i: bsz, src_len alpha_i = ( p_choose[:, i] * cumprod_1mp[:, i] * torch.cumsum( previous_alpha[i][:, 0] / cumprod_1mp_clamp[:, i], dim=1 ) ).clamp(0, 1.0) previous_alpha.append(alpha_i.unsqueeze(1)) # alpha: bsz * num_heads, tgt_len, src_len alpha = torch.cat(previous_alpha[1:], dim=1) return alpha def _test_custom_alignment_train_impl(self, p_choose, alpha, eps): if p_choose.is_cuda: from alignment_train_cuda_binding import alignment_train_cuda # @manual=//deeplearning/projects/fairseq-py:alignment_train_cuda_binding alignment_train_cuda(p_choose, alpha, eps) else: from alignment_train_cpu_binding import alignment_train_cpu # @manual=//deeplearning/projects/fairseq-py:alignment_train_cpu_binding alignment_train_cpu(p_choose, alpha, eps) @settings(deadline=None) @given( bsz=st.integers(1, 100), tgt_len=st.integers(1, 100), src_len=st.integers(1, 550), device=st.sampled_from(["cpu", "cuda"]), ) def test_alignment_train(self, bsz, tgt_len, src_len, device): eps = 1e-6 assume(device == "cpu" or TEST_CUDA) p_choose = torch.rand(bsz, tgt_len, src_len, device=device) # run the alignment with the custom operator alpha_act = p_choose.new_zeros([bsz, tgt_len, src_len]) self._test_custom_alignment_train_impl(p_choose, alpha_act, eps) # runu the alignment with the ref implementation alpha_ref = self._test_custom_alignment_train_ref(p_choose, eps) # verify the results alpha_act = alpha_act.cpu().detach().numpy() alpha_ref = alpha_ref.cpu().detach().numpy() np.testing.assert_allclose( alpha_act, alpha_ref, atol=1e-3, rtol=1e-3, ) if __name__ == "__main__": unittest.main()
2,989
32.595506
148
py
sign-topic
sign-topic-main/examples/simultaneous_translation/utils/monotonic_attention.py
from typing import Optional import torch from torch import Tensor from examples.simultaneous_translation.utils.functions import ( exclusive_cumprod, prob_check, moving_sum, ) def expected_alignment_from_p_choose( p_choose: Tensor, padding_mask: Optional[Tensor] = None, eps: float = 1e-6 ): """ Calculating expected alignment for from stepwise probability Reference: Online and Linear-Time Attention by Enforcing Monotonic Alignments https://arxiv.org/pdf/1704.00784.pdf q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j} a_ij = p_ij q_ij Parallel solution: ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi)) ============================================================ Expected input size p_choose: bsz, tgt_len, src_len """ prob_check(p_choose) # p_choose: bsz, tgt_len, src_len bsz, tgt_len, src_len = p_choose.size() dtype = p_choose.dtype p_choose = p_choose.float() if padding_mask is not None: p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0.0) if p_choose.is_cuda: p_choose = p_choose.contiguous() from alignment_train_cuda_binding import alignment_train_cuda as alignment_train else: from alignment_train_cpu_binding import alignment_train_cpu as alignment_train alpha = p_choose.new_zeros([bsz, tgt_len, src_len]) alignment_train(p_choose, alpha, eps) # Mix precision to prevent overflow for fp16 alpha = alpha.type(dtype) prob_check(alpha) return alpha def expected_soft_attention( alpha: Tensor, soft_energy: Tensor, padding_mask: Optional[Tensor] = None, chunk_size: Optional[int] = None, eps: float = 1e-10 ): """ Function to compute expected soft attention for monotonic infinite lookback attention from expected alignment and soft energy. Reference: Monotonic Chunkwise Attention https://arxiv.org/abs/1712.05382 Monotonic Infinite Lookback Attention for Simultaneous Machine Translation https://arxiv.org/abs/1906.05218 alpha: bsz, tgt_len, src_len soft_energy: bsz, tgt_len, src_len padding_mask: bsz, src_len left_padding: bool """ if padding_mask is not None: alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) soft_energy = soft_energy.masked_fill( padding_mask.unsqueeze(1), -float("inf") ) prob_check(alpha) dtype = alpha.dtype alpha = alpha.float() soft_energy = soft_energy.float() soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) + eps if chunk_size is not None: # Chunkwise beta = ( exp_soft_energy * moving_sum( alpha / (eps + moving_sum(exp_soft_energy, chunk_size, 1)), 1, chunk_size ) ) else: # Infinite lookback # Notice that infinite lookback is a special case of chunkwise # where chunksize = inf inner_items = alpha / (eps + torch.cumsum(exp_soft_energy, dim=2)) beta = ( exp_soft_energy * torch.cumsum(inner_items.flip(dims=[2]), dim=2) .flip(dims=[2]) ) if padding_mask is not None: beta = beta.masked_fill( padding_mask.unsqueeze(1).to(torch.bool), 0.0) # Mix precision to prevent overflow for fp16 beta = beta.type(dtype) beta = beta.clamp(0, 1) prob_check(beta) return beta def mass_preservation( alpha: Tensor, padding_mask: Optional[Tensor] = None, left_padding: bool = False ): """ Function to compute the mass perservation for alpha. This means that the residual weights of alpha will be assigned to the last token. Reference: Monotonic Infinite Lookback Attention for Simultaneous Machine Translation https://arxiv.org/abs/1906.05218 alpha: bsz, tgt_len, src_len padding_mask: bsz, src_len left_padding: bool """ prob_check(alpha) if padding_mask is not None: if not left_padding: assert not padding_mask[:, 0].any(), ( "Find padding on the beginning of the sequence." ) alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) if left_padding or padding_mask is None: residuals = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0, 1) alpha[:, :, -1] = residuals else: # right padding _, tgt_len, src_len = alpha.size() residuals = 1 - alpha.sum(dim=-1, keepdim=True).clamp(0, 1) src_lens = src_len - padding_mask.sum(dim=1, keepdim=True) src_lens = src_lens.expand(-1, tgt_len).contiguous() # add back the last value residuals += alpha.gather(2, src_lens.unsqueeze(2) - 1) alpha = alpha.scatter(2, src_lens.unsqueeze(2) - 1, residuals) prob_check(alpha) return alpha
4,975
26.491713
88
py
sign-topic
sign-topic-main/examples/simultaneous_translation/utils/p_choose_strategy.py
from typing import Optional, Dict from torch import Tensor import torch def waitk_p_choose( tgt_len: int, src_len: int, bsz: int, waitk_lagging: int, key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None ): max_src_len = src_len if incremental_state is not None: # Retrieve target length from incremental states # For inference the length of query is always 1 max_tgt_len = incremental_state["steps"]["tgt"] assert max_tgt_len is not None max_tgt_len = int(max_tgt_len) else: max_tgt_len = tgt_len if max_src_len < waitk_lagging: if incremental_state is not None: max_tgt_len = 1 return torch.zeros( bsz, max_tgt_len, max_src_len ) # Assuming the p_choose looks like this for wait k=3 # src_len = 6, max_tgt_len = 5 # [0, 0, 1, 0, 0, 0, 0] # [0, 0, 0, 1, 0, 0, 0] # [0, 0, 0, 0, 1, 0, 0] # [0, 0, 0, 0, 0, 1, 0] # [0, 0, 0, 0, 0, 0, 1] # linearize the p_choose matrix: # [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0...] # The indices of linearized matrix that equals 1 is # 2 + 6 * 0 # 3 + 6 * 1 # ... # n + src_len * n + k - 1 = n * (src_len + 1) + k - 1 # n from 0 to max_tgt_len - 1 # # First, generate the indices (activate_indices_offset: bsz, max_tgt_len) # Second, scatter a zeros tensor (bsz, max_tgt_len * src_len) # with activate_indices_offset # Third, resize the tensor to (bsz, max_tgt_len, src_len) activate_indices_offset = ( ( torch.arange(max_tgt_len) * (max_src_len + 1) + waitk_lagging - 1 ) .unsqueeze(0) .expand(bsz, max_tgt_len) .long() ) if key_padding_mask is not None: if key_padding_mask[:, 0].any(): # Left padding activate_indices_offset += ( key_padding_mask.sum(dim=1, keepdim=True) ) # Need to clamp the indices that are too large activate_indices_offset = ( activate_indices_offset .clamp( 0, min( [ max_tgt_len, max_src_len - waitk_lagging + 1 ] ) * max_src_len - 1 ) ) p_choose = torch.zeros(bsz, max_tgt_len * max_src_len) p_choose = p_choose.scatter( 1, activate_indices_offset, 1.0 ).view(bsz, max_tgt_len, max_src_len) if key_padding_mask is not None: p_choose = p_choose.to(key_padding_mask) p_choose = p_choose.masked_fill(key_padding_mask.unsqueeze(1), 0) if incremental_state is not None: p_choose = p_choose[:, -1:] return p_choose.float() def learnable_p_choose( energy, noise_mean: float = 0.0, noise_var: float = 0.0, training: bool = True ): """ Calculating step wise prob for reading and writing 1 to read, 0 to write energy: bsz, tgt_len, src_len """ noise = 0 if training: # add noise here to encourage discretness noise = ( torch.normal(noise_mean, noise_var, energy.size()) .type_as(energy) .to(energy.device) ) p_choose = torch.sigmoid(energy + noise) # p_choose: bsz * self.num_heads, tgt_len, src_len return p_choose
3,445
26.133858
78
py
sign-topic
sign-topic-main/examples/simultaneous_translation/utils/functions.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def prob_check(tensor, eps=1e-10): assert not torch.isnan(tensor).any(), ( "Nan in a probability tensor." ) # Add the eps here to prevent errors introduced by precision assert tensor.le(1.0 + eps).all() and tensor.ge(0.0 - eps).all(), ( "Incorrect values in a probability tensor" ", 0.0 <= tensor <= 1.0" ) def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10): """ Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] """ tensor_size = list(tensor.size()) tensor_size[dim] = 1 return_tensor = safe_cumprod( torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim), dim=dim, eps=eps, ) if dim == 0: return return_tensor[:-1] elif dim == 1: return return_tensor[:, :-1] elif dim == 2: return return_tensor[:, :, :-1] else: raise RuntimeError( "Cumprod on dimension 3 and more is not implemented" ) def safe_cumprod(tensor, dim: int, eps: float = 1e-10): """ An implementation of cumprod to prevent precision issue. cumprod(x) = [x1, x1x2, x1x2x3, ....] = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...] = exp(cumsum(log(x))) """ if (tensor + eps < 0).any().item(): raise RuntimeError( "Safe cumprod can only take non-negative tensors as input." "Consider use torch.cumprod if you want to calculate negative values." ) log_tensor = torch.log(tensor + eps) cumsum_log_tensor = torch.cumsum(log_tensor, dim) exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor) return exp_cumsum_log_tensor def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ # TODO: Make dimension configurable assert start_idx > 0 and end_idx > 0 batch_size, tgt_len, src_len = x.size() x = x.view(-1, src_len).unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = torch.ones([1, 1, end_idx + start_idx - 1]).type_as(x) moving_sum = torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ).squeeze(1) moving_sum = moving_sum[:, end_idx:-start_idx] assert src_len == moving_sum.size(1) assert batch_size * tgt_len == moving_sum.size(0) moving_sum = moving_sum.view(batch_size, tgt_len, src_len) return moving_sum
3,535
27.063492
84
py
sign-topic
sign-topic-main/examples/SL_topic_detection/analysis_outputs.py
# Script for analizing outputs produced by the models. import math from typing import Tuple from typing import Union from typing import List from copy import deepcopy import argparse import ast import pandas as pd from sympy import ShapeError import numpy as np from sklearn import manifold from sklearn.metrics import precision_recall_curve from sklearn.metrics import average_precision_score from sklearn.metrics import PrecisionRecallDisplay from sklearn.metrics import ConfusionMatrixDisplay from sklearn.metrics import classification_report from sklearn.calibration import calibration_curve, CalibrationDisplay from matplotlib.gridspec import GridSpec import matplotlib.pyplot as plt from matplotlib.lines import Line2D from matplotlib import cm import torch import torchvision import torchvision.transforms.functional as F from PIL import ImageFont, ImageDraw, ImageOps # from transformer_contributions_nmt.wrappers.transformer_wrapper import FairseqTransformerHub tab10 = [ "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf" ] def load_data_dict(file_path: str) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: data = torch.load(file_path, map_location=torch.device('cpu')) if type(data) == type(dict()): return data # ['embeddings'], data['targets'], data['preds'], data['att_time'], ['att_inputs'] raise TypeError(f'Expected data container to be of type `{type(dict)}` but got `{type(data)}` instead.') def plot_precision_recall( targets_binary: List[int], preds_binary: List[int], model: str, data_type: str, split: str, average: str, ) -> None: precision = dict() recall = dict() average_precision = dict() precision[average], recall[average], _ = precision_recall_curve( targets_binary.ravel(), preds_binary.ravel() ) average_precision[average] = average_precision_score(targets_binary, preds_binary, average=average) display = PrecisionRecallDisplay( recall=recall[average], precision=precision[average], average_precision=average_precision[average], ) display.plot() _ = display.ax_.set_title(f"{average}-average; {model} - {data_type} - {split}") plt.savefig(f'./outputs/{average}-average_precision_recall_{model}_{data_type}_{split}.png') plt.close() for i in range(10): precision[i], recall[i], _ = precision_recall_curve(targets_binary[:, i], preds_binary[:, i]) average_precision[i] = average_precision_score(targets_binary[:, i], preds_binary[:, i]) _, ax = plt.subplots(figsize=(8, 8)) for i, color in zip(range(10), tab10): display = PrecisionRecallDisplay( recall=recall[i], precision=precision[i], average_precision=average_precision[i], ) display.plot(ax=ax, name=f"class {i}", color=color) display = PrecisionRecallDisplay( recall=recall[average], precision=precision[average], average_precision=average_precision[average], ) display.plot(ax=ax, name=f"{average}-average precision-recall", color="gold") handles, labels = display.ax_.get_legend_handles_labels() # set the legend and the axes ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.legend(handles=handles, labels=labels, loc="best") ax.set_title(f'{model} - {data_type} - {split}') plt.savefig(f'./outputs/{average}-average_precision_recall_multiclass_{model}_{data_type}_{split}.png') plt.close() def calibration_plots( targets: Union[List[int], torch.Tensor], logits: torch.Tensor, model: str, data_type: str, split: str, ) -> None: print(targets) print(logits) logits = logits.squeeze() # fig = plt.figure(figsize=(12, 19)) # # gs = GridSpec(10, 2) # # ax_calibration_curve = fig.add_subplot(gs[:2, :2]) # # calibration_displays = {} # for categ in range(10): # targets_binary = [1 if categ == int(tgt) else 0 for tgt in targets] # logits_categ = logits[:, categ].squeeze().tolist() # prob_true, prob_pred = calibration_curve(targets_binary, logits_categ, n_bins=10, normalize=True,) # display = CalibrationDisplay( # prob_true, # prob_pred, # logits_categ, # ) # display.plot(ax=ax_calibration_curve, name=f"class {categ}", color=tab10[categ],) # calibration_displays[categ] = display gs = GridSpec(2, 1) fig = plt.figure(figsize=(8, 8)) ax_calibration_curve = fig.add_subplot(gs[0, 0]) logits_list = logits.tolist() targets_list = [] for tgt in targets.tolist(): targets_list.append([1 if categ == int(tgt) else 0 for categ in range(1, 11)]) targets_list = targets.tolist() # logits_list = logits.tolist() print(targets_list) print(logits_list) prob_true, prob_pred = calibration_curve(targets_list, logits_list, n_bins=10, normalize=True,) display = CalibrationDisplay( prob_true, prob_pred, logits_list, ) display.plot( ax=ax_calibration_curve, name=f"{model}", color=tab10[categ], ) plt.grid() plt.title(f'{model} - {data_type} - {split}') ax = fig.add_subplot(gs[1, 0]) ax.hist( display.y_prob, range=(0, 1), bins=10, label=categ+1, color=tab10[categ], ) ax.set(xlabel="Mean predicted probability", ylabel="Count") # # Add histogram # grid_positions = [(i, j) for i in range(2, 10) for j in range(0, 2)] # for categ in range(10): # row, col = grid_positions[categ] # ax = fig.add_subplot(gs[row, col]) # ax.hist( # calibration_displays[categ].y_prob, # range=(0, 1), # bins=10, # label=categ+1, # color=tab10[categ], # ) # ax.set(title=categ, xlabel="Mean predicted probability", ylabel="Count") plt.tight_layout() plt.savefig(f'./outputs/calibration_multiclass_{model}_{data_type}_{split}.png') plt.close() def plot_confusion_matrix( targets: Union[List[int], torch.Tensor], preds: Union[List[int], torch.Tensor], model: str, data_type: str, split: str, ) -> None: disp = ConfusionMatrixDisplay.from_predictions(targets, preds, cmap=plt.cm.Blues, colorbar=False) disp.figure_.suptitle(f'{model} - {data_type} - {split}') plt.savefig(f'./outputs/confusion_matrix_{model}_{data_type}_{split}.png') plt.close() def metrics_to_csv( targets: Union[List[int], torch.Tensor], preds: Union[List[int], torch.Tensor], model: str, data_type: str, split: str, ) -> None: report = classification_report( targets, preds, # labels=[i for i in range(1, 11)], # target_names=[i for i in range(1, 11)], digits=4, output_dict=True, zero_division='warn', ) report = pd.DataFrame.from_dict(report, orient='columns').transpose() report.to_csv(f'./outputs/metrics_report_{model}_{data_type}_{split}.csv') support = report.pop('support') report, weighted_avg = report.drop(report.tail(1).index),report.tail(1) report, macro_avg = report.drop(report.tail(1).index),report.tail(1) report, accuracy = report.drop(report.tail(1).index),report.tail(1) report = report.append(weighted_avg) report = report.append(macro_avg) accuracy = accuracy.iloc[0,0] ax = report.plot.bar( rot=0, width=0.7, edgecolor='white', linewidth=1.5, color=["#ff7f0e", "#bcbd22", "#8c564b"], figsize=(11, 5), ) ax.axes.set_xlim(-0.5,11.5) leg1 = ax.legend( loc='upper center', bbox_to_anchor=(0.5, 1.08), ncol=3, fancybox=True, shadow=True ) leg2 = ax.legend( [f"accuracy = " + "{:.2f}".format(accuracy*100)], handles=[ Line2D( [0], [0], marker='o', color='w', label=f"accuracy = " + "{:.2f} %".format(accuracy*100), markerfacecolor='g', markersize=0) ], loc='upper center', bbox_to_anchor=(0.85, 1.065), borderaxespad=0, fontsize='x-large', frameon=False, ) ax.add_artist(leg1) plt.xticks([i for i in range(12)], [i for i in range(1, 11)] + ['w_avg', 'macro_avg']) plt.savefig(f'./outputs/metrics_barchart_{model}_{data_type}_{split}.png') plt.close() ax = support.iloc[0:10].plot.bar(rot=0, width=0.7, edgecolor='white', linewidth=1, color=tab10) ax.set_title(f"Samples per class in {split} set") plt.xticks([i for i in range(10)], [i for i in range(1, 11)]) plt.savefig(f'./outputs/metrics_support_{model}_{data_type}_{split}.png') plt.close() def analysis_of_errors( targets: Union[List[int], torch.Tensor], preds: Union[List[int], torch.Tensor], logits: Union[List[float], torch.Tensor], labels: List[str], model: str, data_type: str, split: str, ) -> None: from sklearn.metrics import precision_recall_fscore_support as score # targets = [1,2,3,4,5,6,7,8,9,0,9,8,7,6,5,1,2,1,1,4,1] # preds = [1,2,3,4,5,6,7,8,9,0,9,8,7,6,5,1,2,1,1,4,5] # logits = [] # labels = [f'class_{i}' for i in range(10)] # for i in range(21): # array = np.random.normal(0.5, 10, 10) # array /= np.sum(array) # logits.append(array.tolist()) # logits = torch.tensor(logits) from sklearn.preprocessing import label_binarize # Use label_binarize to fit into a multilabel setting targets_binary = label_binarize(targets, classes=[i for i in range(10)]) preds_binary = label_binarize(preds, classes=[i for i in range(10)]) # TODO: make sure that targets and preds are binarized the same way for average in ['micro', 'macro']: plot_precision_recall( targets_binary=targets_binary, preds_binary=preds_binary, model=model, data_type=data_type, split=split, average=average, ) plot_confusion_matrix( targets=targets, preds=preds, model=model, data_type=data_type, split=split, ) metrics_to_csv( targets=targets, preds=preds, model=model, data_type=data_type, split=split, ) def plot_att_time( att_time: torch.Tensor, video_id: str, model: str, data_type: str, ) -> None: att_time = att_time[0] # TODO: remove this print(att_time) if att_time.shape[0] == 1 and len(att_time.shape) == 2: att_time -= att_time.min(1, keepdim=True)[0] att_time /= att_time.max(1, keepdim=True)[0] elif len(att_time.shape) == 1: att_time -= att_time.min(0, keepdim=True)[0] att_time /= att_time.max(0, keepdim=True)[0] else: raise ShapeError('Please pass in a tensor corresponding to just one sample') if model == 'transformer': raise TypeError('Temporal attention visualization is not implemented for Transformer, only for PerceiverIO and LSTM.') att_time = att_time.tolist() print(f'len(att_time) = {len(att_time)}') text_timestamps = pd.read_csv( ( '/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/' 'train_csv_frames_redone_2.csv' ), sep = '\t' ) text_timestamps = text_timestamps[text_timestamps['VIDEO_ID'] == video_id] text_timestamps = text_timestamps.sort_values(by=['START_FRAME']) word_timestamp_list = [] if data_type == 'text': # TODO: retrieve tokenized text to visualize it pass else: for index, row in text_timestamps.iterrows(): print(f'index = {index}') text = row['SENTENCE'] words = text.split(' ') l = len(words) start_frame = row['START_FRAME'] - 8 if data_type == 'i3d' else row['START_FRAME'] end_frame = row['END_FRAME'] - 8 if data_type == 'i3d' else row['END_FRAME'] delta_t = end_frame - start_frame delta_w = delta_t / l word_frame = [(math.floor(i * delta_w), word) for i, word in enumerate(words)] expand = max(1, math.floor((delta_t / 100) ** 1.2)) print(f'expand = {expand}') x = [i for i in range(0, delta_t, 1)] y = [1 for _ in range(0, delta_t, 1)] print(f'(start_frame, end_frame) = {(start_frame, end_frame)}') c = att_time[start_frame:end_frame] # viz. weights dpi = 120 fig, ax = plt.subplots(figsize=(1000 / dpi * expand, 1000 / dpi * expand), dpi=dpi) ax.axis('off') ax.scatter(x, y, c=c, alpha=0.8, marker='s', s=1) for i, word in word_frame: print(f'i = {i}') ax.annotate(word, (x[i], y[i])) plt.set_cmap('YlOrBr') plt.savefig(f'./outputs/attention_{model}_{data_type}_{start_frame}_{end_frame}.png') plt.close() pass def viz_att( att_time: torch.Tensor, video_path: str, csv_path:str, model: str, data_type: str, ) -> None: """ To visualize attentions on video: * LSTM, PerceiverIO, Transformer: 1. do inference on the video's data with models trained on Calcula 2. load those att_time weights and pass them in to `viz_att` function * TransformerCLS: 1. do inference on the video's data with transformerCLS model trained on Calcula. It will be necessary to hand-code the call to get the ALTI contribution weights 2. load those ALTI weights and pass them in to `viz_att` function """ print(f'att_time.shape = {att_time.shape}') if model == 'transformerCLS': att_time_path = ( f'/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/fairseq-internal/' f'examples/SL_topic_detection/outputs/att_time_ALTI_transformerCLS_{data_type}_train.pkl' ) att_time = torch.load(att_time_path) else: att_time = att_time[0] att_time = att_time.squeeze() print(f'att_time.shape = {att_time.shape}') if att_time.shape[0] == 1 and len(att_time.shape) == 2: att_time -= att_time.min(1, keepdim=True)[0] att_time /= att_time.max(1, keepdim=True)[0] att_time /= att_time.mean(1, keepdim=True)[0] elif len(att_time.shape) == 1: att_time -= att_time.min() att_time /= att_time.max() att_time /= att_time.mean() else: raise ShapeError('Please pass in a tensor corresponding to just one sample') # gloss_timestamp = pd.read_csv(csv_path, sep=';') text_font = ImageFont.truetype('/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/att_viz/train/Verdana.ttf', 33) if data_type == 'spot_align': csv_path = '/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/spot_align/train.csv' mouthings = pd.read_csv(csv_path) mouthings = mouthings[mouthings['VIDEO_ID'] == '-EsVrbRTMU4'] idx = [5, 9, 19, 28, 35, 45, 63, 75, 80, 86, 101, 105, 107] words = list(mouthings['TEXT'])[0] w_frames = list(mouthings['MOUTHING_FRAME'])[0] words = ast.literal_eval(words) w_frames = ast.literal_eval(w_frames) words = [words[i] for i in idx] w_frames = [w_frames[i] for i in idx] for i, w, w_f in zip(idx, words, w_frames): init_frame, end_frame = int(w_f - 10), int(w_f + 10) print(f'init_frame, end_frame = {init_frame, end_frame}') init_time, end_time = init_frame / 25, end_frame / 25 video = torchvision.io.read_video( filename=video_path, start_pts=init_time, end_pts=end_time, pts_unit='sec' ) frame_rate = video[2]['video_fps'] video = video[0] print(f'video.shape = {video.shape}') print('%'*25) out = [] # define color mapping for the att weights N = 1000 cmap = cm.get_cmap('plasma', N) # for each frame in the video range... for t in range(min(end_frame - init_frame, video.shape[0])): frame = video[t].permute(2, 0, 1) frame = F.to_pil_image(frame) image_editable = ImageDraw.Draw(frame) image_editable.text( (15,15), # (0, 0): upper left w, (0, 0, 0), # RGB font=text_font, ) # add sourrounding frame with weight's coloring to each of the frames in video color_att = int(N * att_time[i]) colors = tuple(int(255 * c) for c in cmap(color_att)[0:3]) frame = ImageOps.expand(frame, border=66, fill=colors) # (0,0,0)) frame = F.pil_to_tensor(frame).permute(1, 2, 0) out.append(frame) # store to .mp4 out = torch.stack(out) out_path = '/'.join(video_path.split('/')[:-1]) + '/' + f'{data_type}_{model}_viz_att_{init_frame}' + video_path.split('/')[-1] print(out_path) torchvision.io.write_video( filename=out_path, video_array=out, fps=frame_rate, video_codec='libx264', ) else: text_timestamps = pd.read_csv( ( '/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/' 'train_csv_frames_redone_2.csv' ), sep = '\t' ) text_timestamps = text_timestamps[text_timestamps['VIDEO_ID'] == '-EsVrbRTMU4'] text_timestamps = text_timestamps.sort_values(by=['START_FRAME']) if ( data_type in ['keypoints', 'mediapipe_keypoints', 'mediapipe_rotational'] and # TODO: all non-textual feats should have been downsampled! model == 'transformer' ): print(f'1 att_time.shape = {att_time.shape}') att_time = att_time.repeat_interleave(9) print(f'2 att_time.shape = {att_time.shape}') elif ( data_type in ['keypoints', 'mediapipe_keypoints', 'rotational', 'mediapipe_rotational', 'i3d',] and model == 'transformerCLS' ): print(f'1 att_time.shape = {att_time.shape}') att_time = torch.from_numpy(att_time).repeat_interleave(9) print(f'2 att_time.shape = {att_time.shape}') for i, row in text_timestamps.iterrows(): init_time, end_time = int(row['START_REALIGNED']), int(row['END_REALIGNED']) init_frame, end_frame = int(row['START_FRAME']), int(row['END_FRAME']) if init_frame > 1200: break if data_type == 'i3d': init_frame, end_frame = max(0, init_frame - 8), max(0, end_frame - 8) if end_frame == 0: end_frame += 8 print(f'init_frame, end_frame = {init_frame, end_frame}') ### add subtitle to time range (init_time, end_time) ### video = torchvision.io.read_video( filename=video_path, start_pts=init_time, end_pts=end_time, pts_unit='sec' ) frame_rate = video[2]['video_fps'] video = video[0] print(f'video.shape = {video.shape}') print('%'*25) out = [] # define color mapping for the att weights N = 1000 cmap = cm.get_cmap('plasma', N) # add subtitles... text = row['SENTENCE'] print(f'len(text) = {len(text)}') ## words = text.split() new_text = "" word_count = 0 for word in words: new_text += word + " " word_count += 1 if word_count == 11 or "." in word: new_text += "\n" word_count = 0 ## text = new_text # for each frame in the video range... for t in range(min(end_frame - init_frame, video.shape[0])): # print(f't = {t}') frame = video[t].permute(2, 0, 1) # print(frame.shape) frame = F.to_pil_image(frame) image_editable = ImageDraw.Draw(frame) image_editable.text( (15,15), # (0, 0): upper left text, (0, 0, 0), # RGB font=text_font, ) # add sourrounding frame with att weight's coloring to each of the frames in video color_att = int(N * att_time[init_frame + t]) colors = tuple(int(255 * c) for c in cmap(color_att)[0:3]) frame = ImageOps.expand(frame, border=66, fill=colors) frame = F.pil_to_tensor(frame).permute(1, 2, 0) out.append(frame) # store to .mp4 out = torch.stack(out) out_path = '/'.join(video_path.split('/')[:-1]) + '/' + f'{data_type}_{model}_viz_att_{init_frame}' + video_path.split('/')[-1] print(out_path) torchvision.io.write_video( filename=out_path, video_array=out, fps=frame_rate, video_codec='libx264', ) def obtain_tSNE_projection( embeddings: Union[torch.Tensor, np.array], ) -> np.array: # TODO: set a grid for each of the models (3 in total), # with 4 x 3 = 12 subplots each (4 data types, 3 dataset splits) if type(embeddings) == torch.Tensor: embeddings = embeddings.numpy() if len(embeddings.shape) != 2: raise RuntimeError( (f'Expected input embeddings to be two-dimensional tensor' f' but got a `{len(embeddings.shape)}-dimensional tensor instead.`') ) tsne = manifold.TSNE( n_components=2, perplexity=30, early_exaggeration=12, learning_rate="auto", n_iter=1000, random_state=41, n_jobs=-1, ) Y = tsne.fit_transform(embeddings) return Y def plot_projection( Y: np.array, class_labels: Union[List[int], List[str], torch.Tensor], labels: List[str], model: str, data_type: str, split: str, ) -> None: if type(class_labels) == torch.Tensor: class_labels = deepcopy(class_labels).tolist() # toy example # class_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 4] # x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] # y = [0, 1, 2, 2, 2, 2, 3, 3, 4, 4, 5, 5] dpi = 100 fig, ax = plt.subplots(figsize=(850/dpi,850/dpi), dpi=dpi) ax.axis('off') scatter = plt.scatter(Y[:, 0], Y[:, 1], c=class_labels, cmap='tab10') # scatter = plt.scatter(x, y, c=class_labels, cmap='tab10') # lgd = ax.legend(handles=scatter.legend_elements()[0], labels=labels, ncol=1, bbox_to_anchor=(1.04,1)) ax.set_title(f'{model} - {data_type} - {split}') # plt.savefig(f'./outputs/tsne_{model}_{data_type}_{split}.png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.savefig(f'./outputs/tsne_{model}_{data_type}_{split}.png', bbox_inches='tight') plt.close() # maps the label's number to its corresponding string e.g. 0 -> "beauty" def label_num_to_str( class_labels: Union[List[int], List[str], torch.Tensor], mapping_file: str = '../../../data/How2Sign/categoryName_categoryID.csv', ) -> List[str]: if type(class_labels) == torch.Tensor: class_labels = deepcopy(class_labels).tolist() mapping = pd.read_csv(mapping_file, index_col=0, squeeze=True, sep=',').to_dict() assert len(mapping.keys()) == 10 return mapping def obtain_labels(targets): mapping = label_num_to_str(targets) labels = [k for k, v in sorted(mapping.items(), key=lambda item: item[1])] return labels def main(args): MODELS = ['perceiverIO', 'lstm', 'transformer', 'transformerCLS'] DATA_TYPE = ['keypoints', 'mediapipe_keypoints', 'rotational', 'mediapipe_rotational', 'text', 'i3d', 'spot_align'] SPLIT = ['test', 'val', 'train'] for model in MODELS: for data_type in DATA_TYPE: for split in SPLIT: print(f'Analyzing outputs from model = {model}, data type = {data_type}, split = {split}...', flush=True) print(flush=True) data = load_data_dict(f'./outputs/inference_{model}_{data_type}_{split}.pt') # if model == 'transformerCLS': # data = {'att_time': np.array([[-99999, -99], [-99999, -99]])} # else: # data = load_data_dict(f'./outputs/inference_{model}_{data_type}_{split}.pt') targets = data['targets'] + 1 preds = data['preds'] + 1 logits = data['logits'] labels = obtain_labels(data['targets']) Y = obtain_tSNE_projection(data['embeddings']) print(f'Plotting projections; model = {model}, data type = {data_type}, split = {split}...', flush=True) plot_projection(Y, targets, obtain_labels(data['targets']), model, data_type, split) print(f'analysis_of_errors; model = {model}, data type = {data_type}, split = {split}...', flush=True) analysis_of_errors( targets=targets, preds=preds, logits=logits, labels=labels, model=model, data_type=data_type, split=split, ) # data_dir = f'../../../../../../data/How2Sign/{data_type}' # plot_att_time( # att_time=data['att_time'][0], # video_id='eSzXQQUgH1A', # model=model, # data_type=data_type, # ) # viz_att( # att_time=data['att_time'], # TODO: select the data corresponding to the -EsVrbRTMU4 video # video_path='/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/att_viz/train/-EsVrbRTMU4-8-rgb_front.mp4', # # csv_path='/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/video/train/-EsVrbRTMU4-8-rgb_front_gloss_timesteps.csv', # csv_path='/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/train_csv_frames_redone_2.csv', # model=model, # 'lstm', # data_type=data_type, # ) print(flush=True) print(f'Analyzed outputs for model = {model}, data type = {data_type}, split = {split}', flush=True) print(flush=True) if __name__ == '__main__': parser = argparse.ArgumentParser() args = parser.parse_args() main(args)
27,538
35.235526
167
py
sign-topic
sign-topic-main/examples/SL_topic_detection/infer.py
import ast import os import sys from dataclasses import dataclass, field, is_dataclass import logging from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union, Callable from operator import attrgetter import torch import torch.distributed as dist import hydra from hydra.core.config_store import ConfigStore from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, FairseqDataclass, ) from fairseq.logging.meters import StopwatchMeter, TimeMeter from fairseq.logging.progress_bar import BaseProgressBar from fairseq.models.fairseq_model import FairseqModel from omegaconf import OmegaConf logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) config_path = Path(__file__).resolve().parent / "config" @dataclass class InferConfig(FairseqDataclass): task: Any = None common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() checkpoint: CheckpointConfig = CheckpointConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() is_ax: bool = field( default=False, metadata={ "help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume" }, ) def reset_logging(): root = logging.getLogger() for handler in root.handlers: root.removeHandler(handler) root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper()) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) ) root.addHandler(handler) class InferenceProcessor: cfg: InferConfig def __init__(self, cfg: InferConfig) -> None: self.cfg = cfg self.cfg.task.data = os.environ.get('DATA', cfg.task.data) self.cfg.task.dict_path = os.environ.get('DICT_PATH', cfg.task.dict_path) self.cfg.task.feats_type = os.environ.get('FEATS_TYPE', cfg.task.feats_type) self.cfg.common_eval.path = os.environ.get('MODEL_PATH', cfg.common_eval.path) self.cfg.bpe.sentencepiece_model = os.environ.get('SP_MODEL', cfg.bpe.sentencepiece_model) self.cfg.hooks.out_file = os.environ.get('OUTPUTS_FILE', self.cfg.hooks.out_file) self.task = tasks.setup_task(cfg.task) self.att_time = [] self.embeddings = [] self.logits = [] model, saved_cfg = self.load_model() self.targets = [] self.preds = [] self.model = model if torch.cuda.is_available(): model.cuda() self.saved_cfg = saved_cfg self.src_dict = self.task.source_dictionary # this is None except when cfg.feats_type == 'text' self.tgt_dict = self.task.target_dictionary # this is always None self.cfg.dataset.dataset_split = os.environ.get('DATASET_SPLIT', self.cfg.dataset.dataset_split) self.task.load_dataset( self.cfg.dataset.dataset_split, task_cfg=saved_cfg.task, ) self.inference_timer = StopwatchMeter() self.wps_meter = TimeMeter() self.frames = 0 self.total_counts = 0 self.total_totals = 0 self.progress_bar = self.build_progress_bar() def __enter__(self) -> "InferenceProcessor": logger.info( "num. shared model params: {:,} (num. trained: {:,})".format( sum( p.numel() for p in self.model.parameters() if not getattr(p, "expert", False) ), sum( p.numel() for p in self.model.parameters() if not getattr(p, "expert", False) and p.requires_grad ), ) ) logger.info( "num. expert model params: {} (num. trained: {})".format( sum(p.numel() for p in self.model.parameters() if getattr(p, "expert", False)), sum( p.numel() for p in self.model.parameters() if getattr(p, "expert", False) and p.requires_grad ), ) ) return self def __exit__(self, *exc) -> bool: # TODO: right before exiting, the attention maps and embeddings in self.att_time and self.embeddings should be stored in disk # store targets, embeddings and attention weights for later usage (e.g. visualization) outputs = {} self.targets = torch.cat(self.targets, 0).squeeze() outputs['targets'] = self.targets self.preds = torch.cat(self.preds, 0).squeeze() outputs['preds'] = self.preds if self.cfg.hooks.embedding: self.embeddings = torch.cat(self.embeddings, 0).squeeze() outputs['embeddings'] = self.embeddings if self.cfg.hooks.attention: outputs['att_time'] = self.att_time if self.cfg.hooks.logits: self.logits = torch.cat(self.logits, 0).squeeze() outputs['logits'] = self.logits torch.save(outputs, self.cfg.hooks.out_file) return False def __iter__(self) -> Any: for sample in self.progress_bar: if not self.cfg.common.cpu or torch.cuda.is_available(): sample = utils.move_to_cuda(sample) # Happens on the last batch. if "net_input" not in sample: continue yield sample def log(self, *args, **kwargs): self.progress_bar.log(*args, **kwargs) def print(self, *args, **kwargs): self.progress_bar.print(*args, **kwargs) def register_hooks(self, model: FairseqModel): def get_input(container: List) -> Callable: def hook(module, input): container.append(input[0].detach()) return hook def get_logits(container: List) -> Callable: softm = torch.nn.Softmax(dim=1) def hook(module, input, output): logits = output.squeeze() if len(output.shape) > 2 else output logits = logits.unsqueeze(0) if len(logits.shape) == 1 else logits logits = softm(logits) container.append(logits.detach()) return hook # used for retrieving att over final embeddings in LSTM and transformer def get_output_att1(container: List) -> Callable: def hook(module, input, output): container.append(output[1].detach()) return hook # used for retrieving encoder cross-attention in PerceiverIO def get_output_crossatt_PerceiverIO(container: List) -> Callable: def hook(module, input, output): att = output[3][0].detach() if len(att.shape) != 4: raise RuntimeError( (f'Expected input embeddings to be four-dimensional tensor' f' but got a `{len(att.shape)}-dimensional tensor instead.`') ) att = torch.mean(att, (1, 2)) container.append(att) return hook if self.cfg.hooks.attention: # register a hook to retrieve attention maps over the input sequence # TODO: set these layers layers_attention_time = { 'PerceiverModel': 'encoder', 'Sign2TextTransformerModel': 'att_encoder', 'SLTopicDetectionLSTMModel': 'att_encoder', } hooks_time = { 'PerceiverModel': get_output_crossatt_PerceiverIO, 'Sign2TextTransformerModel': get_output_att1, 'SLTopicDetectionLSTMModel': get_output_att1, } # TODO: decide how to store these results. Should they be stored in a variable and then to disk after passing through the whole dataset? # or should they rather be stored one-by-one/batch-by-batch? model_class = model.__class__.__name__ if model_class == 'Sign2TextTransformerModel_CLS': raise AttributeError('Cannot visualize attention with this script for model class `Sign2TextTransformerModel_CLS`.') retriever = attrgetter(layers_attention_time[model_class]) retriever(model).register_forward_hook(hooks_time[model_class](self.att_time)) if self.cfg.hooks.embedding: # register hook to retrieve embeddings produced at the last layer before the classification head layers_embedding = { 'PerceiverModel': 'decoder.decoder.decoding_cross_attention.attention.output', 'Sign2TextTransformerModel': 'classif_head', 'Sign2TextTransformerModel_CLS': 'classif_head', # TODO: hook for retrieving CLS token 'SLTopicDetectionLSTMModel': 'classif_head', } # for name, layer in model.named_modules(): # print(name, layer) # print(f'model.__class__.__name__ {model.__class__.__name__}') retriever = attrgetter(layers_embedding[model.__class__.__name__]) retriever(model).register_forward_pre_hook(get_input(self.embeddings)) if self.cfg.hooks.logits: layers_logits = { 'PerceiverModel': 'decoder.decoder.final_layer', 'Sign2TextTransformerModel': 'classif_head', 'Sign2TextTransformerModel_CLS': 'classif_head', 'SLTopicDetectionLSTMModel': 'classif_head', } retriever = attrgetter(layers_logits[model.__class__.__name__]) retriever(model).register_forward_hook(get_logits(self.logits)) def load_model(self) -> Tuple[FairseqModel, FairseqDataclass]: arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(self.cfg.common_eval.path, separator="\\"), arg_overrides=arg_overrides, task=self.task, suffix=self.cfg.checkpoint.checkpoint_suffix, strict=(self.cfg.checkpoint.checkpoint_shard_count == 1), num_shards=self.cfg.checkpoint.checkpoint_shard_count, ) logger.info(models[0]) self.register_hooks(models[0]) return models[0], saved_cfg def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None: return self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.dataset_split), max_tokens=self.cfg.dataset.max_tokens, ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ).next_epoch_itr(shuffle=False) def build_progress_bar( self, epoch: Optional[int] = None, prefix: Optional[str] = None, default_log_format: str = "tqdm", ) -> BaseProgressBar: return progress_bar.progress_bar( iterator=self.get_dataset_itr(), log_format=self.cfg.common.log_format, log_interval=self.cfg.common.log_interval, epoch=epoch, prefix=prefix, tensorboard_logdir=self.cfg.common.tensorboard_logdir, default_log_format=default_log_format, ) @property def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() @property def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() def process_sample(self, sample: Dict[str, Any]) -> None: self.inference_timer.start() counts, total = self.task.inference_step( sample, self.model, output_attentions=(self.model.__class__.__name__=='PerceiverModel' and self.cfg.hooks.attention), targets_container=self.targets, preds_container=self.preds, ) self.inference_timer.stop(total) self.total_counts += counts self.total_totals += total self.frames = max(sample['net_input']['src_tokens'].shape[1], self.frames) def log_generation_time(self) -> None: logger.info( "Processed %d samples (the longest one having %d frames) in %.1fs %.2f," " frames per second, %.2f samples per second)", self.inference_timer.n, self.frames, self.inference_timer.sum, self.frames * self.inference_timer.n / self.inference_timer.sum, 1.0 / self.inference_timer.avg * self.frames, ) def parse_acc(wer_file: Path) -> float: with open(wer_file, "r") as f: return float(f.readline().strip().split(" ")[1]) def get_acc_file(cfg: InferConfig) -> Path: """Hashes the decoding parameters to a unique file ID.""" base_path = "acc" return Path(base_path) def main(cfg: InferConfig) -> float: """Entry point for main processing logic. Args: cfg: The inference configuration to use. acc: Optional shared memory pointer for returning the accuracy. If not None, the final accuracy value will be written here instead of being returned. Returns: The final accuracy if `acc` is None, otherwise None. """ acc_file = get_acc_file(cfg) # Validates the provided configuration. if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 20000 if not cfg.common.cpu and not torch.cuda.is_available(): raise ValueError("CUDA not found; set `cpu=True` to run without CUDA") with InferenceProcessor(cfg) as processor: i = 1 for sample in processor: processor.process_sample(sample) i += 1 processor.log_generation_time() counts_t, totals_t = processor.total_counts, processor.total_totals if cfg.common.cpu: logger.warning("Merging Accuracy requires CUDA.") if type(counts_t) != int or type(counts_t) != int: raise RuntimeError( (f'counts of samples of type `{type(counts_t), type(counts_t)}`' ' are not of type `int`') ) elif processor.data_parallel_world_size > 1: stats = torch.LongTensor([counts_t, totals_t]).cuda() dist.all_reduce(stats, op=dist.ReduceOp.SUM) counts_t, totals_t = stats[0].item(), stats[1].item() acc = counts_t * 100.0 / totals_t if distributed_utils.is_master(cfg.distributed_training): with open(acc_file, "w") as f: f.write( ( f"Accuracy: {acc}\n" f"counts / total = {counts_t} / {totals_t}\n\n" ) ) return acc @hydra.main(config_path=config_path, config_name=os.environ['CONFIG_NAME']) # TODO: set this systematically def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]: container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) cfg = OmegaConf.create(container) OmegaConf.set_struct(cfg, True) if cfg.common.reset_logging: reset_logging() logger.info("Config:\n%s", OmegaConf.to_yaml(cfg)) acc = float("inf") try: if cfg.common.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) acc = parse_acc(get_acc_file(cfg)) except BaseException as e: # pylint: disable=broad-except if not cfg.common.suppress_crashes: raise else: logger.error("Crashed! %s", str(e)) logger.info("Accuracy: %.4f", acc) if cfg.is_ax: return acc, None return acc def cli_main() -> None: try: from hydra._internal.utils import ( get_args, ) # pylint: disable=import-outside-toplevel cfg_name = get_args().config_name or "infer" print(f'cfg_name = {cfg_name}') except ImportError: logger.warning("Failed to get config name from hydra args") cfg_name = "infer" cs = ConfigStore.instance() cs.store(name=cfg_name, node=InferConfig) for k in InferConfig.__dataclass_fields__: if is_dataclass(InferConfig.__dataclass_fields__[k].type): v = InferConfig.__dataclass_fields__[k].default cs.store(name=k, node=v) hydra_main() # pylint: disable=no-value-for-parameter if __name__ == "__main__": cli_main()
17,569
37.030303
148
py
sign-topic
sign-topic-main/examples/SL_topic_detection/prep_how2sign.py
#!/usr/bin/env python3 import errno import os import json import h5py import argparse import logging import pandas as pd from typing import Tuple from typing import List from pathlib import Path from tempfile import NamedTemporaryFile import cv2 import torchvision import torch from torch.utils.data import Dataset from utils import ( gen_vocab, save_df_to_tsv, load_text, ) log = logging.getLogger(__name__) class How2Sign(Dataset): ''' Create a Dataset for How2Sign. ''' LANGUAGES = ['en'] # TODO: add 'pt' SPLITS = ['val', 'test', 'train'] def __init__( self, root: str, lang: str, split: str, featsType: str, ) -> None: self.root = Path(root) self.featsType = featsType assert split in self.SPLITS and lang in self.LANGUAGES assert self.root.is_dir() try: self.h5_sign = h5py.File(self.root / f'{split}.h5', 'r') except: raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), self.root / f'{split}.h5' ) with h5py.File(self.root / f'{split}_filt.h5', 'w') as f: for key in self.h5_sign.keys(): try: f[key[:11]] = self.h5_sign[key][()] except: pass self.h5_sign.close() self.h5_sign = h5py.File(self.root / f'{split}_filt.h5', 'r') if featsType == 'text': self.text = load_text(self.root / f'{split}.txt', list(self.h5_sign.keys())) elif featsType == 'spot_align': self.categs = pd.read_csv(self.root / f'{split}_categs.csv') self.data = pd.read_csv(self.root / f'{split}.csv') if featsType == 'text': self.data['TEXT'] = pd.NaT elif featsType == 'spot_align': self.data['CATEGORY_ID'] = pd.NaT self.data['START_FRAME'] = pd.NaT self.data['END_FRAME'] = pd.NaT for i, row in self.data.iterrows(): if row['VIDEO_ID'] not in list(self.h5_sign.keys()): print(f'Error with keypoint {row["VIDEO_ID"]}, not found inside h5_sign', flush=True) self.data.drop(i, inplace=True) else: self.data.loc[i, 'START_FRAME'] = 0 self.data.loc[i, 'END_FRAME'] = torch.Tensor(self.h5_sign[row['VIDEO_ID']]).shape[0] if featsType == 'text': self.data.loc[i, 'TEXT'] = self.text[row['VIDEO_ID']] elif featsType == 'spot_align': self.data.loc[i, 'CATEGORY_ID'] = self.categs[self.categs['VIDEO_ID'] == row['VIDEO_ID']]['CATEGORY_ID'].tolist()[0] self.data.reset_index(drop=True, inplace=True) def __getitem__(self, n: int) -> Tuple[torch.Tensor, str, str]: sent_id = self.data.loc[n, 'VIDEO_ID'] src_signs = torch.Tensor(self.h5_sign[sent_id]) categ = self.data.loc[n, 'CATEGORY_ID'] if self.featsType in ['text', 'spot_align']: text = self.data.loc[n, 'TEXT'] return sent_id, src_signs, text, categ return sent_id, src_signs, categ def __len__(self) -> int: return len(self.data) def filter_by_length(self, min_n_frames: int, max_n_frames: int) -> None: lengths = self.data['END_FRAME'] - self.data['START_FRAME'] + 1 self.data = self.data[lengths.between(min_n_frames, max_n_frames)] self.data.reset_index(drop=True, inplace=True) class How2Sign_video(Dataset): ''' Create a Dataset for How2Sign for video data. ''' LANGUAGES = ['en'] # TODO: add 'pt' SPLITS = ['train', 'val', 'test'] DATA_PATH = { 'train': '/mnt/gpid08/datasets/How2Sign/How2Sign/video_level/train/rgb_front/raw_videos', 'val': '/mnt/gpid08/datasets/How2Sign/How2Sign/video_level/val/rgb_front/raw_videos', 'test': '/mnt/gpid08/datasets/How2Sign/How2Sign/video_level/test/rgb_front/raw_videos', } # DATA_PATH = { # 'train': './', # 'val': './', # 'test': './', # } def __init__( self, root: str, lang: str, split: str, featsType: str, ) -> None: self.root = Path(root) self.featsType = featsType assert split in self.SPLITS and lang in self.LANGUAGES assert self.root.is_dir() self.split = split self.data = [] self.videonames = self.load_video_names(os.path.join(self.root, 'subset2episode.json'))[split] # self.videonames = ['cKmtmtqeUkI-5-rgb_front', 'g1uA0f9I0Sg-5-rgb_front'] self.categories = pd.read_csv(self.root / f'{split}.csv') self.categories = self.categories.set_index('VIDEO_ID').to_dict()['CATEGORY_ID'] # self.categories = {'cKmtmtqeUkI': 4, 'g1uA0f9I0Sg': 2} for i, video_name in enumerate(self.videonames): cap = cv2.VideoCapture(os.path.join(self.DATA_PATH[split], video_name + '.mp4')) # cap = cv2.VideoCapture('/home/alvaro/Documents/ML and DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/video/cKmtmtqeUkI-5-rgb_front.mp4') totalframecount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) print("The total number of frames in this video is ", totalframecount, flush=True) self.data.append({ 'VIDEO_ID': video_name[:11], 'VIDEO_NAME': video_name, 'CATEGORY_ID': self.categories[video_name[:11]], 'START_FRAME': 0, 'END_FRAME': totalframecount - 1 }) self.data = pd.DataFrame(self.data) self.data = self.data.drop_duplicates(subset=['VIDEO_ID'], keep='first') self.data = self.data.drop_duplicates(subset=['VIDEO_NAME'], keep='first') self.data.reset_index(drop=True, inplace=True) def load_video_names(self, path: str) -> List[str]: with open(path, 'r') as f: data_features = json.load(f) return data_features def __getitem__(self, n: int) -> Tuple[torch.Tensor, str, str]: sent_id = self.data.loc[n, 'VIDEO_ID'] src_signs = torchvision.io.read_video(filename=os.path.join(self.DATA_PATH[self.split], self.data.loc[n, 'VIDEO_NAME'] + '.mp4')) categ = self.data.loc[n, 'CATEGORY_ID'] return sent_id, src_signs, categ def __len__(self) -> int: return len(self.data) def filter_by_length(self, min_n_frames: int, max_n_frames: int) -> None: lengths = self.data['END_FRAME'] - self.data['START_FRAME'] + 1 self.data = self.data[lengths.between(min_n_frames, max_n_frames)] def process(args): root = Path(args.data_root).absolute() for split in How2Sign.SPLITS: print(f'Processing "{split}" split', flush=True) filt_csv = root / f'{split}_filt.csv' for lang in How2Sign.LANGUAGES: if args.featsType == 'video': dataset = How2Sign_video(root, lang, split, args.featsType) else: dataset = How2Sign(root, lang, split, args.featsType) print('Filtering samples by length...', flush=True) dataset.filter_by_length(args.min_n_frames, args.max_n_frames) print(f'{len(dataset)} samples remaining after filtering', flush=True) if split == 'train' and args.featsType in ['text', 'spot_align']: print(f"Generating vocab for '{lang}' language") v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{lang}" with NamedTemporaryFile(mode="w") as f: for i in range(len(dataset)): f.write(dataset[i][2] + "\n") f.seek(0) gen_vocab( Path(f.name), root / spm_filename_prefix, args.vocab_type, args.vocab_size, special_symbols=['_', '-'] ) print('Saving dataframe...', flush=True) save_df_to_tsv(dataset.data, filt_csv) def main(): parser = argparse.ArgumentParser() parser.add_argument('--data-root', '-d', required=True, type=str) parser.add_argument('--min-n-frames', default=150, type=int) parser.add_argument('--max-n-frames', default=5500, type=int) parser.add_argument('--featsType', default='keypoints', type=str) parser.add_argument( "--vocab-type", default="unigram", type=str, choices=["bpe", "unigram", "char"], ) parser.add_argument("--vocab-size", default=8000, type=int) args = parser.parse_args() process(args) if __name__ == '__main__': main()
8,850
35.57438
163
py
sign-topic
sign-topic-main/examples/sign2vec/prep_how2sign.py
#!/usr/bin/env python3 # This code is based on the speech_to_text implementation (commit: d974c70) # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import h5py import argparse import logging import pandas as pd from typing import Tuple from pathlib import Path from tempfile import NamedTemporaryFile import torch from torch.utils.data import Dataset import pdb from examples.speech_to_text.data_utils import ( gen_vocab, save_df_to_tsv, ) from utils import h5_video2sentence log = logging.getLogger(__name__) MANIFEST_COLUMNS = ["id", "signs", "n_frames", "tgt_text"] class How2Sign(Dataset): """ Create a Dataset for How2Sign. Each item is a tuple of the form: signs, target sentence """ LANGUAGES = ["en"] # TODO: add "pt" SPLITS = ["train", "val", "test"] def __init__(self, root: str, lang: str, split: str) -> None: self.root = Path(root) assert split in self.SPLITS and lang in self.LANGUAGES assert self.root.is_dir() self.h5_sign = h5py.File(self.root / f"{split}_sent.h5", "r") self.data = pd.read_csv(self.root / f"{split}.tsv", sep="\t") for i, row in self.data.iterrows(): #not finding anything in here.... why?? #pdb.set_trace() if row['SENTENCE_NAME'] not in list(self.h5_sign.keys()): print(f"Error with keypoint {row['SENTENCE_NAME']}, not found inside h5_sign") self.data.drop(i, inplace=True) self.data.reset_index(drop=True, inplace=True) def __getitem__(self, n: int) -> Tuple[torch.Tensor, str, str]: sent_id = self.data.loc[n, 'SENTENCE_NAME'] src_signs = torch.Tensor(self.h5_sign[sent_id]) tgt_sent = self.data.loc[n, 'SENTENCE'] return sent_id, src_signs, tgt_sent def __len__(self) -> int: return len(self.data) def filter_by_length(self, min_n_frames: int, max_n_frames: int) -> None: lengths = self.data['END_FRAME'] - self.data['START_FRAME'] + 1 self.data = self.data[lengths.between(min_n_frames, max_n_frames)] def process(args): root = Path(args.data_root).absolute() for split in How2Sign.SPLITS: print(f"Processing '{split}' split") input_tsv = root / f"{split}.tsv" filt_tsv = root / f"{split}_filt.tsv" if args.data_type == 'skeletons': signs_video = root / f"{split}.h5" signs_sentence = root / f"{split}_sent.h5" elif args.data_type == 'i3d': signs_video = root / f"{split}_i3d.h5" signs_sentence = root / f"{split}_i3d_sent.h5" else: print('Error with data_type, not i3d or skeletons') try: h5_video2sentence(input_tsv, signs_video, signs_sentence, overwrite=args.overwrite) except FileNotFoundError: print(f"Skipping '{split}' split") continue except FileExistsError: print(f"Reusing sentence-level h5 for '{split}' split. Set --overwrite to overwrite it.") print(f'signs_video: {signs_video}, signs_sentence: {signs_sentence}') for lang in How2Sign.LANGUAGES: dataset = How2Sign(root, lang, split) if split == 'train': ''' print(f"Generating vocab for '{lang}' language") v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{lang}" with NamedTemporaryFile(mode="w") as f: for i in range(len(dataset)): f.write(dataset[i][2] + "\n") gen_vocab( Path(f.name), root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) ''' print("Filtering samples by length...") dataset.filter_by_length(args.min_n_frames, args.max_n_frames) print(f"{len(dataset)} samples after filtering") print("Saving dataframe...") save_df_to_tsv(dataset.data, filt_tsv) def main(): parser = argparse.ArgumentParser() parser.add_argument("--data-root", "-d", required=True, type=str) parser.add_argument("--min-n-frames", default=5, type=int) parser.add_argument("--max-n-frames", default=1000, type=int) parser.add_argument( "--vocab-type", default="unigram", type=str, choices=["bpe", "unigram", "char"], ) parser.add_argument("--data-type", default='skeletons', type=str) parser.add_argument("--vocab-size", default=8000, type=int) parser.add_argument("--overwrite", action="store_true") args = parser.parse_args() process(args) if __name__ == "__main__": main()
5,013
34.062937
101
py
sign-topic
sign-topic-main/examples/fast_noisy_channel/noisy_channel_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.tasks.translation import TranslationTask from fairseq.tasks.language_modeling import LanguageModelingTask from fairseq import checkpoint_utils import argparse from fairseq.tasks import register_task import torch @register_task("noisy_channel_translation") class NoisyChannelTranslation(TranslationTask): """ Rescore the top k candidates from each beam using noisy channel modeling """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" TranslationTask.add_args(parser) # fmt: off parser.add_argument('--channel-model', metavar='FILE', help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.') parser.add_argument('--combine-method', default='lm_only', choices=['lm_only', 'noisy_channel'], help="""method for combining direct and channel model scores. lm_only: decode with P(T|S)P(T) noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""") parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False, help='normalize lm score by target length instead of source length') parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'], help="Normalize bw scores with log softmax or return bw scores without log softmax") parser.add_argument('--top-k-vocab', default=0, type=int, help='top k vocab IDs to use with `src_vocab` in channel model scoring') parser.add_argument('--k2', default=50, type=int, help='the top k2 candidates to rescore with the noisy channel model for each beam') parser.add_argument('--ch-wt', default=1, type=float, help='weight for the channel model') parser.add_argument('--lm-model', metavar='FILE', help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side') parser.add_argument('--lm-data', metavar='FILE', help='path to lm model training data for target language, used to properly load LM with correct dictionary') parser.add_argument('--lm-wt', default=1, type=float, help='the weight of the lm in joint decoding') # fmt: on def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None ): if getattr(args, "score_reference", False): raise NotImplementedError() else: from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator use_cuda = torch.cuda.is_available() and not self.args.cpu assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!' assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs' if self.args.channel_model is not None: import copy ch_args_task = copy.deepcopy(self.args) tmp = ch_args_task.source_lang ch_args_task.source_lang = ch_args_task.target_lang ch_args_task.target_lang = tmp ch_args_task._name = 'translation' channel_task = TranslationTask.setup_task(ch_args_task) arg_dict = {} arg_dict['task'] = 'language_modeling' arg_dict['sample_break_mode'] = 'eos' arg_dict['data'] = self.args.lm_data arg_dict['output_dictionary_size'] = -1 lm_args = argparse.Namespace(**arg_dict) lm_task = LanguageModelingTask.setup_task(lm_args) lm_dict = lm_task.output_dictionary if self.args.channel_model is not None: channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task) for model in channel_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() else: channel_models = None lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task) for model in lm_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() return NoisyChannelSequenceGenerator( combine_method=self.args.combine_method, tgt_dict=self.target_dictionary, src_dict=self.source_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), normalize_scores=(not getattr(args, 'unnormalized', False)), channel_models=channel_models, k2=getattr(self.args, 'k2', 50), ch_weight=getattr(self.args, 'ch_wt', 1), channel_scoring_type=self.args.channel_scoring_type, top_k_vocab=self.args.top_k_vocab, lm_models=lm_models, lm_dict=lm_dict, lm_weight=getattr(self.args, 'lm_wt', 1), normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False), )
6,709
51.421875
160
py
sign-topic
sign-topic-main/examples/fast_noisy_channel/noisy_channel_beam_search.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.search import Search class NoisyChannelBeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.fw_scores_buf = None self.lm_scores_buf = None def _init_buffers(self, t): # super()._init_buffers(t) if self.fw_scores_buf is None: self.scores_buf = t.new() self.indices_buf = torch.LongTensor().to(device=t.device) self.beams_buf = torch.LongTensor().to(device=t.device) self.fw_scores_buf = t.new() self.lm_scores_buf = t.new() def combine_fw_bw(self, combine_method, fw_cum, bw, step): if combine_method == "noisy_channel": fw_norm = fw_cum.div(step + 1) lprobs = bw + fw_norm elif combine_method == "lm_only": lprobs = bw + fw_cum return lprobs def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method): self._init_buffers(fw_lprobs) bsz, beam_size, vocab_size = fw_lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous() bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous() # nothing to add since we are at the first step fw_lprobs_cum = fw_lprobs else: # make probs contain cumulative scores for each hypothesis raw_scores = (scores[:, :, step - 1].unsqueeze(-1)) fw_lprobs_cum = (fw_lprobs.add(raw_scores)) combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step) # choose the top k according to the combined noisy channel model score torch.topk( combined_lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), out=(self.scores_buf, self.indices_buf), ) # save corresponding fw and lm scores self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf) self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf) # Project back into relative indices and beams self.beams_buf = self.indices_buf // vocab_size self.indices_buf.fmod_(vocab_size) return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
2,895
39.222222
104
py
sign-topic
sign-topic-main/examples/fast_noisy_channel/noisy_channel_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import math import numpy as np import torch import torch.nn.functional as F from torch import Tensor from .noisy_channel_beam_search import NoisyChannelBeamSearch from fairseq.sequence_generator import EnsembleModel class NoisyChannelSequenceGenerator(object): def __init__( self, combine_method, tgt_dict, src_dict=None, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, len_penalty=1.0, unk_penalty=0.0, retain_dropout=False, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, normalize_scores=True, channel_models=None, k2=10, ch_weight=1.0, channel_scoring_type='log_norm', top_k_vocab=0, lm_models=None, lm_dict=None, lm_weight=1.0, normalize_lm_scores_by_tgt_len=False, ): """Generates translations of a given source sentence, using beam search with noisy channel decoding. Args: combine_method (string, optional): Method to combine direct, LM and channel model scores (default: None) tgt_dict (~fairseq.data.Dictionary): target dictionary src_dict (~fairseq.data.Dictionary): source dictionary beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) retain_dropout (bool, optional): use dropout when generating (default: False) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) no_repeat_ngram_size (int, optional): Size of n-grams that we avoid repeating in the generation (default: 0) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) channel_models (List[~fairseq.models.FairseqModel]): ensemble of models translating from the target to the source k2 (int, optional): Top K2 candidates to score per beam at each step (default:10) ch_weight (int, optional): Weight associated with the channel model score assuming that the direct model score has weight 1.0 (default: 1.0) channel_scoring_type (str, optional): String specifying how to score the channel model (default: 'log_norm') top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or `'src_vocab_batched'`, then this parameter specifies the number of most frequent tokens to include in the channel model output vocabulary, in addition to the source tokens in the input batch (default: 0) lm_models (List[~fairseq.models.FairseqModel]): ensemble of models generating text in the target language lm_dict (~fairseq.data.Dictionary): LM Model dictionary lm_weight (int, optional): Weight associated with the LM model score assuming that the direct model score has weight 1.0 (default: 1.0) normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores by the target length? By default, we normalize the combination of LM and channel model scores by the source length """ self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.retain_dropout = retain_dropout self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size self.channel_models = channel_models self.src_dict = src_dict self.tgt_dict = tgt_dict self.combine_method = combine_method self.k2 = k2 self.ch_weight = ch_weight self.channel_scoring_type = channel_scoring_type self.top_k_vocab = top_k_vocab self.lm_models = lm_models self.lm_dict = lm_dict self.lm_weight = lm_weight self.log_softmax_fn = torch.nn.LogSoftmax(dim=1) self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len self.share_tgt_dict = (self.lm_dict == self.tgt_dict) self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict) self.ch_scoring_bsz = 3072 assert temperature > 0, '--temperature must be greater than 0' self.search = NoisyChannelBeamSearch(tgt_dict) @torch.no_grad() def generate( self, models, sample, prefix_tokens=None, bos_token=None, **kwargs ): """Generate a batch of translations. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens """ model = EnsembleModel(models) incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(model.models_size) ], ) if not self.retain_dropout: model.eval() # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample['net_input'].items() if k != 'prev_output_tokens' } src_tokens = encoder_input['src_tokens'] src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) input_size = src_tokens.size() # batch dimension goes first followed by source lengths bsz = input_size[0] src_len = input_size[1] beam_size = self.beam_size if self.match_source_len: max_len = src_lengths_no_eos.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), # exclude the EOS marker model.max_decoder_positions() - 1, ) # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = model.reorder_encoder_out(encoder_outs, new_order) src_lengths = encoder_input['src_lengths'] # initialize buffers scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0) lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0) scores_buf = scores.clone() tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad) tokens_buf = tokens.clone() tokens[:, 0] = self.eos if bos_token is None else bos_token # reorder source tokens so they may be used as a reference in generating P(S|T) src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index) src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len) src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1) attn, attn_buf = None, None nonpad_idxs = None # The cands_to_ignore indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then the cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask # list of completed sentences finalized = [[] for i in range(bsz)] finished = [False for i in range(bsz)] num_remaining_sent = bsz # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) cand_offsets = torch.arange(0, cand_size).type_as(tokens) # helper function for allocating buffers on the fly buffers = {} def buffer(name, type_of=tokens): # noqa if name not in buffers: buffers[name] = type_of.new() return buffers[name] def is_finished(sent, step, unfin_idx): """ Check whether we've finished generation for a given sentence, by comparing the worst score among finalized hypotheses to the best possible score among unfinalized hypotheses. """ assert len(finalized[sent]) <= beam_size if len(finalized[sent]) == beam_size: return True return False def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores): """ Finalize the given hypotheses at this step, while keeping the total number of finalized hypotheses per sentence <= beam_size. Note: the input must be in the desired finalization order, so that hypotheses that appear earlier in the input are preferred to those that appear later. Args: step: current time step bbsz_idx: A vector of indices in the range [0, bsz*beam_size), indicating which hypotheses to finalize eos_scores: A vector of the same size as bbsz_idx containing fw scores for each hypothesis combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing combined noisy channel scores for each hypothesis """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors tokens_clone = tokens.index_select(0, bbsz_idx) tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS assert not tokens_clone.eq(self.eos).any() tokens_clone[:, step] = self.eos attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty cum_unfin = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) sents_seen = set() for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())): unfin_idx = idx // beam_size sent = unfin_idx + cum_unfin[unfin_idx] sents_seen.add((sent, unfin_idx)) if self.match_source_len and step > src_lengths_no_eos[unfin_idx]: score = -math.inf def get_hypo(): if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i][nonpad_idxs[sent]] _, alignment = hypo_attn.max(dim=0) else: hypo_attn = None alignment = None return { 'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, # src_len x tgt_len 'alignment': alignment, 'positional_scores': pos_scores[i], } if len(finalized[sent]) < beam_size: finalized[sent].append(get_hypo()) newly_finished = [] for sent, unfin_idx in sents_seen: # check termination conditions for this sentence if not finished[sent] and is_finished(sent, step, unfin_idx): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k): """Rescore the top k hypothesis from each beam using noisy channel modeling Returns: new_fw_lprobs: the direct model probabilities after pruning the top k new_ch_lm_lprobs: the combined channel and language model probabilities new_lm_lprobs: the language model probabilities after pruning the top k """ with torch.no_grad(): lprobs_size = lprobs.size() if prefix_tokens is not None and step < prefix_tokens.size(1): probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :] cand_scores = torch.gather( probs_slice, dim=1, index=prefix_tokens[:, step].view(-1, 1).data ).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1) cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1) # need to calculate and save fw and lm probs for prefix tokens fw_top_k = cand_scores fw_top_k_idx = cand_indices k = 1 else: # take the top k best words for every sentence in batch*beam fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k) eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0] ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0) src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype) if self.combine_method != "lm_only": temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1) not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index cur_tgt_size = step+2 # add eos to all candidate sentences except those that already end in eos eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1) eos_tokens[eos_idx] = self.tgt_dict.pad_index if step == 0: channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1) else: # move eos from beginning to end of target sentence channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1) ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size)) ch_input_lengths[eos_idx] = cur_tgt_size-1 if self.channel_scoring_type == "unnormalized": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:]) ch_intermed_scores = ch_intermed_scores.float() ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) elif self.channel_scoring_type == "k2_separate": for k_idx in range(k): k_eos_tokens = eos_tokens[k_idx::k, :] if step == 0: k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) else: # move eos from beginning to end of target sentence k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) k_ch_input_lengths = ch_input_lengths[k_idx::k] k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens) k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True) k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2) k_ch_intermed_scores *= not_padding.float() ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1) elif self.channel_scoring_type == "src_vocab": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_lprobs = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab) ch_scores = torch.sum(ch_lprobs, dim=1) elif self.channel_scoring_type == "src_vocab_batched": ch_bsz_size = temp_src_tokens_full.shape[0] ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz)) for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)): end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size) temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :] channel_input_batch = channel_input[start_idx:end_idx, :] ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx] ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch) ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True) ch_lprobs_list[i] = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output_batch, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab, start_idx=start_idx, end_idx=end_idx) ch_lprobs = torch.cat(ch_lprobs_list, dim=0) ch_scores = torch.sum(ch_lprobs, dim=1) else: ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full) ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True) ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1) ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) else: cur_tgt_size = 0 ch_scores = ch_scores.view(bsz*beam_size, k) expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten() if self.share_tgt_dict: lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k) else: new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm) new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm) lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k) lm_scores.add_(expanded_lm_prefix_scores) ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size) # initialize all as min value new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_fw_lprobs[:, self.pad] = -math.inf new_ch_lm_lprobs[:, self.pad] = -math.inf new_lm_lprobs[:, self.pad] = -math.inf new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k) new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores) new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k)) return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size): if self.channel_scoring_type == "unnormalized": ch_scores = self.log_softmax_fn( ch_scores.view(-1, self.beam_size * self.k2) ).view(ch_scores.shape) ch_scores = ch_scores * self.ch_weight lm_scores1 = lm_scores1 * self.lm_weight if combine_type == "lm_only": # log P(T|S) + log P(T) ch_scores = lm_scores1.view(ch_scores.size()) elif combine_type == "noisy_channel": # 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T) if self.normalize_lm_scores_by_tgt_len: ch_scores.div_(src_size) lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size) ch_scores.add_(lm_scores_norm) # 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T) else: ch_scores.add_(lm_scores1.view(ch_scores.size())) ch_scores.div_(src_size) return ch_scores if self.channel_models is not None: channel_model = self.channel_models[0] # assume only one channel_model model else: channel_model = None lm = EnsembleModel(self.lm_models) lm_incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(lm.models_size) ], ) reorder_state = None batch_idxs = None for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs) reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size) model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state) lm.reorder_incremental_state(lm_incremental_states, reorder_state) fw_lprobs, avg_attn_scores = model.forward_decoder( tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature, ) fw_lprobs[:, self.pad] = -math.inf # never select pad fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2) # handle min and max length constraints if step >= max_len: fw_lprobs[:, :self.eos] = -math.inf fw_lprobs[:, self.eos + 1:] = -math.inf elif step < self.min_len: fw_lprobs[:, self.eos] = -math.inf # handle prefix tokens (possibly with different lengths) if prefix_tokens is not None and step < prefix_tokens.size(1): prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_mask = prefix_toks.ne(self.pad) prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) fw_lprobs[prefix_mask] = -math.inf fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs ) prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) ch_lm_lprobs[prefix_mask] = -math.inf ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs ) prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) lm_lprobs[prefix_mask] = -math.inf lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() def replicate_first_beam(tensor, mask): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) # copy tokens, scores and lprobs from the first beam to all beams tokens = replicate_first_beam(tokens, eos_mask_batch_dim) scores = replicate_first_beam(scores, eos_mask_batch_dim) fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim) ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim) lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim) if self.no_repeat_ngram_size > 0: # for each beam and batch sentence, generate a list of previous ngrams gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): gen_tokens = tokens[bbsz_idx].tolist() for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]): gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \ gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]] # Record attention scores if avg_attn_scores is not None: if attn is None: attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2) attn_buf = attn.clone() nonpad_idxs = src_tokens.ne(self.pad) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(fw_lprobs) scores_buf = scores_buf.type_as(fw_lprobs) self.search.set_src_lengths(src_lengths_no_eos) if self.no_repeat_ngram_size > 0: def calculate_banned_tokens(bbsz_idx): # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist()) return gen_ngrams[bbsz_idx].get(ngram_index, []) if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)] else: banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step( step, fw_lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size), lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos (except for candidates to be ignored) eos_mask = cand_indices.eq(self.eos) eos_mask[:, :beam_size] &= ~cands_to_ignore # only consider eos when it's among the top beam_size indices eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = set() if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size] ) combined_noisy_channel_eos_scores = torch.masked_select( combined_noisy_channel_scores[:, :beam_size], mask=eos_mask[:, :beam_size], ) # finalize hypo using channel model score finalized_sents = finalize_hypos( step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = cand_indices.new_ones(bsz) batch_mask[cand_indices.new(finalized_sents)] = 0 batch_idxs = torch.nonzero(batch_mask).squeeze(-1) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs] fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths_no_eos = src_lengths_no_eos[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) scores_buf.resize_as_(scores) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens_buf.resize_as_(tokens) src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze() if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1) attn_buf.resize_as_(attn) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos or # ignored hypos and values < cand_size indicate candidate # active hypos. After this, the min values per row are the top # candidate active hypos. eos_mask[:, :beam_size] |= cands_to_ignore active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just the hypos # with the smallest values in active_mask active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore') torch.topk( active_mask, k=beam_size, dim=1, largest=False, out=(new_cands_to_ignore, active_hypos) ) # update cands_to_ignore to ignore any finalized hypos cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] assert (~cands_to_ignore).any(dim=1).all() active_bbsz_idx = buffer('active_bbsz_idx') torch.gather( cand_bbsz_idx, dim=1, index=active_hypos, out=active_bbsz_idx, ) active_scores = torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores[:, step].view(bsz, beam_size), ) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses torch.index_select( tokens[:, :step + 1], dim=0, index=active_bbsz_idx, out=tokens_buf[:, :step + 1], ) torch.gather( cand_indices, dim=1, index=active_hypos, out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1], ) if step > 0: torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx, out=scores_buf[:, :step], ) torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores_buf.view(bsz, beam_size, -1)[:, :, step], ) torch.gather( lm_lprobs_top_k, dim=1, index=active_hypos, out=lm_prefix_scores.view(bsz, beam_size) ) # copy attention for active hypotheses if attn is not None: torch.index_select( attn[:, :, :step + 2], dim=0, index=active_bbsz_idx, out=attn_buf[:, :, :step + 2], ) # swap buffers tokens, tokens_buf = tokens_buf, tokens scores, scores_buf = scores_buf, scores if attn is not None: attn, attn_buf = attn_buf, attn # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True) return finalized def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k): with torch.no_grad(): lm_lprobs, avg_attn_scores = model.forward_decoder( input_tokens, encoder_outs=None, incremental_states=incremental_states, ) lm_lprobs_size = lm_lprobs.size(0) probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1) return probs_next_wrd def make_dict2dict(old_dict, new_dict): dict2dict_map = {} for sym in old_dict.symbols: dict2dict_map[old_dict.index(sym)] = new_dict.index(sym) return dict2dict_map def dict2dict(tokens, dict2dict_map): if tokens.device == torch.device('cpu'): tokens_tmp = tokens else: tokens_tmp = tokens.cpu() return tokens_tmp.map_( tokens_tmp, lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)] ).to(tokens.device) def reorder_tokens(tokens, lengths, eos): # reorder source tokens so they may be used as reference for P(S|T) return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0) def reorder_all_tokens(tokens, lengths, eos): # used to reorder src tokens from [<pad> <w1> <w2> .. <eos>] to [<eos> <w1> <w2>...<pad>] # so source tokens can be used to predict P(S|T) return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)]) def normalized_scores_with_batch_vocab( model_decoder, features, target_ids, k, bsz, beam_size, pad_idx, top_k=0, vocab_size_meter=None, start_idx=None, end_idx=None, **kwargs): """ Get normalized probabilities (or log probs) from a net's output w.r.t. vocab consisting of target IDs in the batch """ if model_decoder.adaptive_softmax is None: weight = model_decoder.output_projection.weight vocab_ids = torch.unique( torch.cat( (torch.unique(target_ids), torch.arange(top_k, device=target_ids.device)) ) ) id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids)))) mapped_target_ids = target_ids.cpu().apply_( lambda x, id_map=id_map: id_map[x] ).to(target_ids.device) expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1) if start_idx is not None and end_idx is not None: expanded_target_ids = expanded_target_ids[start_idx:end_idx, :] logits = F.linear(features, weight[vocab_ids, :]) log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32) intermed_scores = torch.gather( log_softmax[:, :-1, :], 2, expanded_target_ids[:, 1:].unsqueeze(2), ).squeeze() not_padding = expanded_target_ids[:, 1:] != pad_idx intermed_scores *= not_padding.float() return intermed_scores else: raise ValueError("adaptive softmax doesn't work with " + "`normalized_scores_with_batch_vocab()`")
41,200
47.874259
187
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/ulm/sample.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Sample from a trained LM; hacked fairseq-interactive """ from collections import namedtuple import os import ast import numpy as np from fairseq import checkpoint_utils, options, tasks, utils import tqdm Batch = namedtuple('Batch', 'ids src_tokens src_lengths') Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments') def make_batches(lines, args, task, max_positions): tokens = [ task.source_dictionary.encode_line( src_str, add_if_not_exist=False ).long() for src_str in lines ] lengths = [t.numel() for t in tokens] itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.dataset.max_tokens, max_sentences=args.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=args.dataset.skip_invalid_size_inputs_valid_test ).next_epoch_itr(shuffle=False) for batch in itr: yield Batch( ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'], ) def main(args): arg_prompts = args.prompts arg_output = args.output arg_debug = args.debug arg_sample_size = args.samples_per_prompt try: from fairseq.dataclass.utils import convert_namespace_to_omegaconf args = convert_namespace_to_omegaconf(args) except: pass # if args.max_tokens is None and args.max_sentences is None: if args.common.seed is not None: np.random.seed(args.common.seed) utils.set_torch_seed(args.common.seed) if args.generation.sampling: args.generation.nbest = args.generation.beam = arg_sample_size task = tasks.setup_task(args.task) overrides = ast.literal_eval(args.common_eval.model_overrides) models, _model_args = checkpoint_utils.load_model_ensemble( args.common_eval.path.split(os.pathsep), arg_overrides=overrides, task=task, suffix=getattr(args, "checkpoint_suffix", ""), ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: model.prepare_for_inference_(args) model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.generation.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) output_file = open(arg_output, 'w') with open(arg_prompts, 'r') as fin: lines = fin.readlines() split = [x.split('|', 1) for x in lines] seq_id = [x[0] for x in split] prompts = [x[1] for x in split] if args.generation.prefix_size >= 0: prompts = [' '.join(l.split()[:args.generation.prefix_size]) for l in prompts] if arg_debug: prompts = prompts[:10] generator = task.build_generator(models, args.generation) start_id = 0 pbar = tqdm.tqdm(total=len(prompts)) for batch in make_batches(prompts, args, task, max_positions): src_tokens = batch.src_tokens src_lengths = batch.src_lengths src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() sample = { 'net_input': { 'src_tokens': src_tokens, 'src_lengths': src_lengths, }, } results = [] translations = task.inference_step(generator, models, sample) for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) results.append((i + start_id, src_tokens_i, hypos)) # sort output to match input order for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]): if src_dict is not None: src_str = src_dict.string( src_tokens, args.common_eval.post_process) # Process top predictions for hypo_id, hypo in enumerate(hypos): _hypo_tokens, hypo_str, _alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.common_eval.post_process, ) detok_hypo_str = hypo_str utterance = detok_hypo_str print(f'{seq_id[id]}__{hypo_id}|{utterance}', file=output_file) pbar.update(1) start_id += len(results) # output_file.close() def cli_main(): parser = options.get_interactive_generation_parser() parser.add_argument('--prompts', type=str, default=None, required=True) parser.add_argument('--output', type=str, default=None, required=True) parser.add_argument('--debug', action='store_true') parser.add_argument('--samples-per-prompt', type=int, default=1) args = options.parse_args_and_arch(parser) np.random.seed(args.seed) utils.set_torch_seed(args.seed) main(args) if __name__ == '__main__': cli_main()
5,623
31.137143
103
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/tools/resynthesize_speech.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import gc import logging import os import joblib import soundfile as sf import torch from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_feature_reader from examples.textless_nlp.gslm.unit2speech.tts_data import TacotronInputDataset from examples.textless_nlp.gslm.unit2speech.utils import ( load_tacotron, load_waveglow, synthesize_audio, ) def get_logger(): log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" logging.basicConfig(format=log_format, level=logging.INFO) logger = logging.getLogger(__name__) return logger def get_parser(): parser = argparse.ArgumentParser(description="GSLM U2S tool") parser.add_argument( "--feature_type", type=str, choices=["logmel", "hubert", "w2v2", "cpc"], default=None, required=True, help="Acoustic feature type", ) parser.add_argument( "--acoustic_model_path", type=str, help="Pretrained acoustic model checkpoint", ) parser.add_argument("--layer", type=int, help="Layer of acoustic model") parser.add_argument( "--kmeans_model_path", type=str, required=True, help="K-means model file path to use for inference", ) parser.add_argument( "--tts_model_path", type=str, help="TTS model file path to use for inference", ) parser.add_argument( "--code_dict_path", type=str, help="Code dict file path to use for inference", ) parser.add_argument( "--waveglow_path", type=str, help="Waveglow (vocoder) model file path to use for inference", ) parser.add_argument("--max_decoder_steps", type=int, default=2000) parser.add_argument("--denoiser_strength", type=float, default=0.1) return parser ################################################ def main(args, logger): # Acoustic Model logger.info(f"Loading acoustic model from {args.tts_model_path}...") feature_reader_cls = get_feature_reader(args.feature_type) reader = feature_reader_cls( checkpoint_path=args.acoustic_model_path, layer=args.layer ) # K-means Model logger.info(f"Loading K-means model from {args.kmeans_model_path} ...") kmeans_model = joblib.load(open(args.kmeans_model_path, "rb")) kmeans_model.verbose = False # TTS Model logger.info(f"Loading TTS model from {args.tts_model_path}...") tacotron_model, sample_rate, hparams = load_tacotron( tacotron_model_path=args.tts_model_path, max_decoder_steps=args.max_decoder_steps, ) # Waveglow Model logger.info(f"Loading Waveglow model from {args.waveglow_path}...") waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path) # Dataset if not os.path.exists(hparams.code_dict): hparams.code_dict = args.code_dict_path tts_dataset = TacotronInputDataset(hparams) iters = 0 while True: in_file_path = input("Input: Enter the full file path of audio file...\n") out_file_path = input("Output: Enter the full file path of audio file...\n") feats = reader.get_feats(in_file_path).cpu().numpy() iters += 1 if iters == 1000: gc.collect() torch.cuda.empty_cache() quantized_units = kmeans_model.predict(feats) quantized_units_str = " ".join(map(str, quantized_units)) tts_input = tts_dataset.get_tensor(quantized_units_str) mel, aud, aud_dn, has_eos = synthesize_audio( tacotron_model, waveglow, denoiser, tts_input.unsqueeze(0), strength=args.denoiser_strength, ) sf.write(f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate) logger.info("Resynthesis done!\n") if __name__ == "__main__": parser = get_parser() args = parser.parse_args() logger = get_logger() logger.info(args) main(args, logger)
4,183
30.458647
86
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tts_data.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import numpy as np from examples.textless_nlp.gslm.unit2speech.tacotron2.text import ( EOS_TOK, SOS_TOK, code_to_sequence, text_to_sequence, ) from examples.textless_nlp.gslm.unit2speech.tacotron2.utils import ( load_code_dict, ) class TacotronInputDataset: def __init__(self, hparams, append_str=""): self.is_text = getattr(hparams, "text_or_code", "text") == "text" if not self.is_text: self.code_dict = load_code_dict( hparams.code_dict, hparams.add_sos, hparams.add_eos ) self.code_key = hparams.code_key self.add_sos = hparams.add_sos self.add_eos = hparams.add_eos self.collapse_code = hparams.collapse_code self.append_str = append_str def process_code(self, inp_str): inp_toks = inp_str.split() if self.add_sos: inp_toks = [SOS_TOK] + inp_toks if self.add_eos: inp_toks = inp_toks + [EOS_TOK] return code_to_sequence(inp_toks, self.code_dict, self.collapse_code) def process_text(self, inp_str): return text_to_sequence(inp_str, ["english_cleaners"]) def get_tensor(self, inp_str): # uid, txt, inp_str = self._get_data(idx) inp_str = inp_str + self.append_str if self.is_text: inp_toks = self.process_text(inp_str) else: inp_toks = self.process_code(inp_str) return torch.from_numpy(np.array(inp_toks)).long() def __len__(self): return len(self.data)
1,733
30.527273
77
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/multiproc.py
import os import time import torch import sys import subprocess argslist = list(sys.argv)[1:] log_dir = argslist[-1] num_gpus = torch.cuda.device_count() argslist.append('--n_gpus={}'.format(num_gpus)) workers = [] job_id = time.strftime("%Y_%m_%d-%H%M%S") argslist.append("--group_name=group_{}".format(job_id)) print("GPU log directory is {}".format(log_dir)) os.makedirs(log_dir, exist_ok=True) for i in range(num_gpus): argslist.append('--rank={}'.format(i)) stdout = None if i == 0 else open("{}/{}_GPU_{}.log".format(log_dir, job_id, i), "w") print(argslist) p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout) workers.append(p) argslist = argslist[:-1] for p in workers: p.wait()
772
26.607143
84
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from examples.textless_nlp.gslm.unit2speech.tacotron2.model import Tacotron2 from examples.textless_nlp.gslm.unit2speech.tacotron2.waveglow_denoiser import ( Denoiser, ) def load_quantized_audio_from_file(file_path): base_fname_batch, quantized_units_batch = [], [] with open(file_path) as f: for line in f: base_fname, quantized_units_str = line.rstrip().split("|") quantized_units = [int(q) for q in quantized_units_str.split(" ")] base_fname_batch.append(base_fname) quantized_units_batch.append(quantized_units) return base_fname_batch, quantized_units_batch def synthesize_audio(model, waveglow, denoiser, inp, lab=None, strength=0.0): assert inp.size(0) == 1 inp = inp.cuda() if lab is not None: lab = torch.LongTensor(1).cuda().fill_(lab) with torch.no_grad(): _, mel, _, ali, has_eos = model.inference(inp, lab, ret_has_eos=True) aud = waveglow.infer(mel, sigma=0.666) aud_dn = denoiser(aud, strength=strength).squeeze(1) return mel, aud, aud_dn, has_eos def load_tacotron(tacotron_model_path, max_decoder_steps): ckpt_dict = torch.load(tacotron_model_path) hparams = ckpt_dict["hparams"] hparams.max_decoder_steps = max_decoder_steps sr = hparams.sampling_rate model = Tacotron2(hparams) model.load_state_dict(ckpt_dict["model_dict"]) model = model.cuda().eval().half() return model, sr, hparams def load_waveglow(waveglow_path): waveglow = torch.load(waveglow_path)["model"] waveglow = waveglow.cuda().eval().half() for k in waveglow.convinv: k.float() denoiser = Denoiser(waveglow) return waveglow, denoiser
1,904
33.017857
80
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/glow.py
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import copy import torch from torch.autograd import Variable import torch.nn.functional as F @torch.jit.script def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): n_channels_int = n_channels[0] in_act = input_a+input_b t_act = torch.tanh(in_act[:, :n_channels_int, :]) s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) acts = t_act * s_act return acts class WaveGlowLoss(torch.nn.Module): def __init__(self, sigma=1.0): super(WaveGlowLoss, self).__init__() self.sigma = sigma def forward(self, model_output): z, log_s_list, log_det_W_list = model_output for i, log_s in enumerate(log_s_list): if i == 0: log_s_total = torch.sum(log_s) log_det_W_total = log_det_W_list[i] else: log_s_total = log_s_total + torch.sum(log_s) log_det_W_total += log_det_W_list[i] loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total return loss/(z.size(0)*z.size(1)*z.size(2)) class Invertible1x1Conv(torch.nn.Module): """ The layer outputs both the convolution, and the log determinant of its weight matrix. If reverse=True it does convolution with inverse """ def __init__(self, c): super(Invertible1x1Conv, self).__init__() self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False) # Sample a random orthonormal matrix to initialize weights W = torch.qr(torch.FloatTensor(c, c).normal_())[0] # Ensure determinant is 1.0 not -1.0 if torch.det(W) < 0: W[:,0] = -1*W[:,0] W = W.view(c, c, 1) self.conv.weight.data = W def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.float().inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W class WN(torch.nn.Module): """ This is the WaveNet like layer for the affine coupling. The primary difference from WaveNet is the convolutions need not be causal. There is also no dilation size reset. The dilation only doubles on each layer """ def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels, kernel_size): super(WN, self).__init__() assert(kernel_size % 2 == 1) assert(n_channels % 2 == 0) self.n_layers = n_layers self.n_channels = n_channels self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() start = torch.nn.Conv1d(n_in_channels, n_channels, 1) start = torch.nn.utils.weight_norm(start, name='weight') self.start = start # Initializing last layer to 0 makes the affine coupling layers # do nothing at first. This helps with training stability end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1) end.weight.data.zero_() end.bias.data.zero_() self.end = end cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1) self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') for i in range(n_layers): dilation = 2 ** i padding = int((kernel_size*dilation - dilation)/2) in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size, dilation=dilation, padding=padding) in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') self.in_layers.append(in_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2*n_channels else: res_skip_channels = n_channels res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') self.res_skip_layers.append(res_skip_layer) def forward(self, forward_input): audio, spect = forward_input audio = self.start(audio) output = torch.zeros_like(audio) n_channels_tensor = torch.IntTensor([self.n_channels]) spect = self.cond_layer(spect) for i in range(self.n_layers): spect_offset = i*2*self.n_channels acts = fused_add_tanh_sigmoid_multiply( self.in_layers[i](audio), spect[:,spect_offset:spect_offset+2*self.n_channels,:], n_channels_tensor) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: audio = audio + res_skip_acts[:,:self.n_channels,:] output = output + res_skip_acts[:,self.n_channels:,:] else: output = output + res_skip_acts return self.end(output) class WaveGlow(torch.nn.Module): def __init__(self, n_mel_channels, n_flows, n_group, n_early_every, n_early_size, WN_config): super(WaveGlow, self).__init__() self.upsample = torch.nn.ConvTranspose1d(n_mel_channels, n_mel_channels, 1024, stride=256) assert(n_group % 2 == 0) self.n_flows = n_flows self.n_group = n_group self.n_early_every = n_early_every self.n_early_size = n_early_size self.WN = torch.nn.ModuleList() self.convinv = torch.nn.ModuleList() n_half = int(n_group/2) # Set up layers with the right sizes based on how many dimensions # have been output already n_remaining_channels = n_group for k in range(n_flows): if k % self.n_early_every == 0 and k > 0: n_half = n_half - int(self.n_early_size/2) n_remaining_channels = n_remaining_channels - self.n_early_size self.convinv.append(Invertible1x1Conv(n_remaining_channels)) self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config)) self.n_remaining_channels = n_remaining_channels # Useful during inference def forward(self, forward_input): """ forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames forward_input[1] = audio: batch x time """ spect, audio = forward_input # Upsample spectrogram to size of audio spect = self.upsample(spect) assert(spect.size(2) >= audio.size(1)) if spect.size(2) > audio.size(1): spect = spect[:, :, :audio.size(1)] spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1) audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1) output_audio = [] log_s_list = [] log_det_W_list = [] for k in range(self.n_flows): if k % self.n_early_every == 0 and k > 0: output_audio.append(audio[:,:self.n_early_size,:]) audio = audio[:,self.n_early_size:,:] audio, log_det_W = self.convinv[k](audio) log_det_W_list.append(log_det_W) n_half = int(audio.size(1)/2) audio_0 = audio[:,:n_half,:] audio_1 = audio[:,n_half:,:] output = self.WN[k]((audio_0, spect)) log_s = output[:, n_half:, :] b = output[:, :n_half, :] audio_1 = torch.exp(log_s)*audio_1 + b log_s_list.append(log_s) audio = torch.cat([audio_0, audio_1],1) output_audio.append(audio) return torch.cat(output_audio,1), log_s_list, log_det_W_list def infer(self, spect, sigma=1.0): spect = self.upsample(spect) # trim conv artifacts. maybe pad spec to kernel multiple time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0] spect = spect[:, :, :-time_cutoff] spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1) if spect.type() == 'torch.cuda.HalfTensor': audio = torch.cuda.HalfTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_() else: audio = torch.cuda.FloatTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_() audio = torch.autograd.Variable(sigma*audio) for k in reversed(range(self.n_flows)): n_half = int(audio.size(1)/2) audio_0 = audio[:,:n_half,:] audio_1 = audio[:,n_half:,:] output = self.WN[k]((audio_0, spect)) s = output[:, n_half:, :] b = output[:, :n_half, :] audio_1 = (audio_1 - b)/torch.exp(s) audio = torch.cat([audio_0, audio_1],1) audio = self.convinv[k](audio, reverse=True) if k % self.n_early_every == 0 and k > 0: if spect.type() == 'torch.cuda.HalfTensor': z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_() else: z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_() audio = torch.cat((sigma*z, audio),1) audio = audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data return audio @staticmethod def remove_weightnorm(model): waveglow = model for WN in waveglow.WN: WN.start = torch.nn.utils.remove_weight_norm(WN.start) WN.in_layers = remove(WN.in_layers) WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer) WN.res_skip_layers = remove(WN.res_skip_layers) return waveglow def remove(conv_list): new_conv_list = torch.nn.ModuleList() for old_conv in conv_list: old_conv = torch.nn.utils.remove_weight_norm(old_conv) new_conv_list.append(old_conv) return new_conv_list
12,653
39.557692
105
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/stft.py
""" BSD 3-Clause License Copyright (c) 2017, Prem Seetharaman All rights reserved. * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import torch import numpy as np import torch.nn.functional as F from torch.autograd import Variable from scipy.signal import get_window from librosa.util import pad_center, tiny from .audio_processing import window_sumsquare class STFT(torch.nn.Module): """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'): super(STFT, self).__init__() self.filter_length = filter_length self.hop_length = hop_length self.win_length = win_length self.window = window self.forward_transform = None scale = self.filter_length / self.hop_length fourier_basis = np.fft.fft(np.eye(self.filter_length)) cutoff = int((self.filter_length / 2 + 1)) fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]) forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) inverse_basis = torch.FloatTensor( np.linalg.pinv(scale * fourier_basis).T[:, None, :]) if window is not None: assert(filter_length >= win_length) # get window and zero center pad it to filter_length fft_window = get_window(window, win_length, fftbins=True) fft_window = pad_center(fft_window, filter_length) fft_window = torch.from_numpy(fft_window).float() # window the bases forward_basis *= fft_window inverse_basis *= fft_window self.register_buffer('forward_basis', forward_basis.float()) self.register_buffer('inverse_basis', inverse_basis.float()) def transform(self, input_data): num_batches = input_data.size(0) num_samples = input_data.size(1) self.num_samples = num_samples # similar to librosa, reflect-pad the input input_data = input_data.view(num_batches, 1, num_samples) input_data = F.pad( input_data.unsqueeze(1), (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), mode='reflect') input_data = input_data.squeeze(1) forward_transform = F.conv1d( input_data, Variable(self.forward_basis, requires_grad=False), stride=self.hop_length, padding=0) cutoff = int((self.filter_length / 2) + 1) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] magnitude = torch.sqrt(real_part**2 + imag_part**2) phase = torch.autograd.Variable( torch.atan2(imag_part.data, real_part.data)) return magnitude, phase def inverse(self, magnitude, phase): recombine_magnitude_phase = torch.cat( [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1) inverse_transform = F.conv_transpose1d( recombine_magnitude_phase, Variable(self.inverse_basis, requires_grad=False), stride=self.hop_length, padding=0) if self.window is not None: window_sum = window_sumsquare( self.window, magnitude.size(-1), hop_length=self.hop_length, win_length=self.win_length, n_fft=self.filter_length, dtype=np.float32) # remove modulation effects approx_nonzero_indices = torch.from_numpy( np.where(window_sum > tiny(window_sum))[0]) window_sum = torch.autograd.Variable( torch.from_numpy(window_sum), requires_grad=False) window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices] # scale by hop ratio inverse_transform *= float(self.filter_length) / self.hop_length inverse_transform = inverse_transform[:, :, int(self.filter_length/2):] inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):] return inverse_transform def forward(self, input_data): self.magnitude, self.phase = self.transform(input_data) reconstruction = self.inverse(self.magnitude, self.phase) return reconstruction
5,893
40.507042
97
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import io import json import librosa import numpy as np import soundfile as sf import time import torch from scipy.io.wavfile import read from .text import SOS_TOK, EOS_TOK def get_mask_from_lengths(lengths): max_len = torch.max(lengths).item() ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len)) mask = (ids < lengths.unsqueeze(1)) return mask def load_wav_to_torch(full_path, sr=None): data, sr = librosa.load(full_path, sr=sr) data = np.clip(data, -1, 1) # potentially out of [-1, 1] due to resampling data = data * 32768.0 # match values loaded by scipy return torch.FloatTensor(data.astype(np.float32)), sr def read_binary_audio(bin_data, tar_sr=None): """ read binary audio (`bytes` or `uint8` `numpy.ndarray`) to `float32` `numpy.ndarray` RETURNS: data (np.ndarray) : audio of shape (n,) or (2, n) tar_sr (int) : sample rate """ data, ori_sr = sf.read(io.BytesIO(bin_data), dtype='float32') data = data.T if (tar_sr is not None) and (ori_sr != tar_sr): data = librosa.resample(data, ori_sr, tar_sr) else: tar_sr = ori_sr data = np.clip(data, -1, 1) data = data * 32768.0 return torch.FloatTensor(data.astype(np.float32)), tar_sr def load_filepaths_and_text(filename): with open(filename, encoding='utf-8') as f: data = [json.loads(line.rstrip()) for line in f] return data def to_gpu(x): x = x.contiguous() if torch.cuda.is_available(): x = x.cuda(non_blocking=True) return torch.autograd.Variable(x) def load_code_dict(path, add_sos=False, add_eos=False): if not path: return {} with open(path, 'r') as f: codes = ['_'] + [line.rstrip() for line in f] # '_' for pad code_dict = {c: i for i, c in enumerate(codes)} if add_sos: code_dict[SOS_TOK] = len(code_dict) if add_eos: code_dict[EOS_TOK] = len(code_dict) assert(set(code_dict.values()) == set(range(len(code_dict)))) return code_dict def load_obs_label_dict(path): if not path: return {} with open(path, 'r') as f: obs_labels = [line.rstrip() for line in f] return {c: i for i, c in enumerate(obs_labels)} # A simple timer class inspired from `tnt.TimeMeter` class CudaTimer: def __init__(self, keys): self.keys = keys self.reset() def start(self, key): s = torch.cuda.Event(enable_timing=True) s.record() self.start_events[key].append(s) return self def stop(self, key): e = torch.cuda.Event(enable_timing=True) e.record() self.end_events[key].append(e) return self def reset(self): self.start_events = collections.defaultdict(list) self.end_events = collections.defaultdict(list) self.running_times = collections.defaultdict(float) self.n = collections.defaultdict(int) return self def value(self): self._synchronize() return {k: self.running_times[k] / self.n[k] for k in self.keys} def _synchronize(self): torch.cuda.synchronize() for k in self.keys: starts = self.start_events[k] ends = self.end_events[k] if len(starts) == 0: raise ValueError("Trying to divide by zero in TimeMeter") if len(ends) != len(starts): raise ValueError("Call stop before checking value!") time = 0 for start, end in zip(starts, ends): time += start.elapsed_time(end) self.running_times[k] += time * 1e-3 self.n[k] += len(starts) self.start_events = collections.defaultdict(list) self.end_events = collections.defaultdict(list) # Used to measure the time taken for multiple events class Timer: def __init__(self, keys): self.keys = keys self.n = {} self.running_time = {} self.total_time = {} self.reset() def start(self, key): self.running_time[key] = time.time() return self def stop(self, key): self.total_time[key] = time.time() - self.running_time[key] self.n[key] += 1 self.running_time[key] = None return self def reset(self): for k in self.keys: self.total_time[k] = 0 self.running_time[k] = None self.n[k] = 0 return self def value(self): vals = {} for k in self.keys: if self.n[k] == 0: raise ValueError("Trying to divide by zero in TimeMeter") else: vals[k] = self.total_time[k] / self.n[k] return vals
4,918
27.598837
79
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/model.py
from math import sqrt import torch import torch.distributions as distr from torch.autograd import Variable from torch import nn from torch.nn import functional as F from .layers import ConvNorm, LinearNorm, GlobalAvgPool from .utils import to_gpu, get_mask_from_lengths class LocationLayer(nn.Module): def __init__(self, attention_n_filters, attention_kernel_size, attention_dim): super(LocationLayer, self).__init__() padding = int((attention_kernel_size - 1) / 2) self.location_conv = ConvNorm(2, attention_n_filters, kernel_size=attention_kernel_size, padding=padding, bias=False, stride=1, dilation=1) self.location_dense = LinearNorm(attention_n_filters, attention_dim, bias=False, w_init_gain='tanh') def forward(self, attention_weights_cat): processed_attention = self.location_conv(attention_weights_cat) processed_attention = processed_attention.transpose(1, 2) processed_attention = self.location_dense(processed_attention) return processed_attention class Attention(nn.Module): def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, attention_location_n_filters, attention_location_kernel_size): super(Attention, self).__init__() self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, bias=False, w_init_gain='tanh') self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, w_init_gain='tanh') self.v = LinearNorm(attention_dim, 1, bias=False) self.location_layer = LocationLayer(attention_location_n_filters, attention_location_kernel_size, attention_dim) self.score_mask_value = -float("inf") def get_alignment_energies(self, query, processed_memory, attention_weights_cat): """ PARAMS ------ query: decoder output (batch, n_mel_channels * n_frames_per_step) processed_memory: processed encoder outputs (B, T_in, attention_dim) attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) RETURNS ------- alignment (batch, max_time) """ processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer(attention_weights_cat) energies = self.v(torch.tanh( processed_query + processed_attention_weights + processed_memory)) energies = energies.squeeze(-1) return energies def forward(self, attention_hidden_state, memory, processed_memory, attention_weights_cat, mask): """ PARAMS ------ attention_hidden_state: attention rnn last output memory: encoder outputs processed_memory: processed encoder outputs attention_weights_cat: previous and cummulative attention weights mask: binary mask for padded data """ alignment = self.get_alignment_energies( attention_hidden_state, processed_memory, attention_weights_cat) if mask is not None: alignment.data.masked_fill_(mask, self.score_mask_value) attention_weights = F.softmax(alignment, dim=1) attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) attention_context = attention_context.squeeze(1) return attention_context, attention_weights class Prenet(nn.Module): def __init__(self, in_dim, sizes): super(Prenet, self).__init__() in_sizes = [in_dim] + sizes[:-1] self.layers = nn.ModuleList( [LinearNorm(in_size, out_size, bias=False) for (in_size, out_size) in zip(in_sizes, sizes)]) def forward(self, x): for linear in self.layers: x = F.dropout(F.relu(linear(x)), p=0.5, training=True) return x class Postnet(nn.Module): """Postnet - Five 1-d convolution with 512 channels and kernel size 5 """ def __init__(self, hparams): super(Postnet, self).__init__() self.convolutions = nn.ModuleList() self.convolutions.append( nn.Sequential( ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) for i in range(1, hparams.postnet_n_convolutions - 1): self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='linear'), nn.BatchNorm1d(hparams.n_mel_channels)) ) def forward(self, x): for i in range(len(self.convolutions) - 1): x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training) x = F.dropout(self.convolutions[-1](x), 0.5, self.training) return x class Encoder(nn.Module): """Encoder module: - Three 1-d convolution banks - Bidirectional LSTM """ def __init__(self, hparams): super(Encoder, self).__init__() convolutions = [] for _ in range(hparams.encoder_n_convolutions): conv_layer = nn.Sequential( ConvNorm(hparams.encoder_embedding_dim, hparams.encoder_embedding_dim, kernel_size=hparams.encoder_kernel_size, stride=1, padding=int((hparams.encoder_kernel_size - 1) / 2), dilation=1, w_init_gain='relu'), nn.BatchNorm1d(hparams.encoder_embedding_dim)) convolutions.append(conv_layer) self.convolutions = nn.ModuleList(convolutions) self.lstm = nn.LSTM(hparams.encoder_embedding_dim, int(hparams.encoder_embedding_dim / 2), 1, batch_first=True, bidirectional=True) def forward(self, x, input_lengths): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) # pytorch tensor are not reversible, hence the conversion input_lengths = input_lengths.cpu().numpy() x = nn.utils.rnn.pack_padded_sequence( x, input_lengths, batch_first=True) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) outputs, _ = nn.utils.rnn.pad_packed_sequence( outputs, batch_first=True) return outputs def inference(self, x): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) return outputs class AudioEncoder(nn.Module): def __init__(self, hparams): super(AudioEncoder, self).__init__() assert hparams.lat_dim > 0 convolutions = [] inp_dim = hparams.n_mel_channels for _ in range(hparams.lat_n_convolutions): conv_layer = nn.Sequential( ConvNorm(inp_dim, hparams.lat_n_filters, kernel_size=hparams.lat_kernel_size, stride=1, padding=int((hparams.lat_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.lat_n_filters)) inp_dim = hparams.lat_n_filters convolutions.append(conv_layer) self.convolutions = nn.ModuleList(convolutions) self.lstm = nn.LSTM(hparams.lat_n_filters, int(hparams.lat_n_filters / 2), hparams.lat_n_blstms, batch_first=True, bidirectional=True) self.pool = GlobalAvgPool() self.mu_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim) self.logvar_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim) self.lat_dim = hparams.lat_dim def forward(self, x, lengths): """ Args: x (torch.Tensor): (B, F, T) """ for conv in self.convolutions: x = F.dropout(F.tanh(conv(x)), 0.5, self.training) x = x.transpose(1, 2) # (B, T, D) # x may not be sorted by length. Sort->process->unsort max_len = x.size(1) assert max_len == torch.max(lengths).item() lengths, perm_idx = lengths.sort(0, descending=True) x = x[perm_idx] x = nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True) _, unperm_idx = perm_idx.sort(0) outputs = outputs[unperm_idx] # (B, T, D) lengths = lengths[unperm_idx] # (B, T, D) outputs = self.pool(outputs, lengths) # (B, D) mu = self.mu_proj(outputs) logvar = self.logvar_proj(outputs) z = distr.Normal(mu, logvar).rsample() return z, mu, logvar class Decoder(nn.Module): def __init__(self, hparams): super(Decoder, self).__init__() self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step self.encoder_embedding_dim = hparams.encoder_embedding_dim self.obs_dim = hparams.obs_dim self.lat_dim = hparams.lat_dim self.attention_rnn_dim = hparams.attention_rnn_dim self.decoder_rnn_dim = hparams.decoder_rnn_dim self.prenet_dim = hparams.prenet_dim self.max_decoder_steps = hparams.max_decoder_steps self.gate_threshold = hparams.gate_threshold self.p_attention_dropout = hparams.p_attention_dropout self.p_decoder_dropout = hparams.p_decoder_dropout self.prenet = Prenet( hparams.n_mel_channels * hparams.n_frames_per_step, [hparams.prenet_dim, hparams.prenet_dim]) self.attention_rnn = nn.LSTMCell( hparams.prenet_dim + hparams.encoder_embedding_dim, hparams.attention_rnn_dim) self.attention_layer = Attention( hparams.attention_rnn_dim, hparams.encoder_embedding_dim, hparams.attention_dim, hparams.attention_location_n_filters, hparams.attention_location_kernel_size) encoder_tot_dim = (hparams.encoder_embedding_dim + \ hparams.lat_dim + hparams.obs_dim) self.decoder_rnn = nn.LSTMCell( hparams.attention_rnn_dim + encoder_tot_dim, hparams.decoder_rnn_dim, 1) self.linear_projection = LinearNorm( hparams.decoder_rnn_dim + encoder_tot_dim, hparams.n_mel_channels * hparams.n_frames_per_step) self.gate_layer = LinearNorm( hparams.decoder_rnn_dim + encoder_tot_dim, 1, bias=True, w_init_gain='sigmoid') def get_go_frame(self, memory): """ Gets all zeros frames to use as first decoder input PARAMS ------ memory: decoder outputs RETURNS ------- decoder_input: all zeros frames """ B = memory.size(0) decoder_input = Variable(memory.data.new( B, self.n_mel_channels * self.n_frames_per_step).zero_()) return decoder_input def initialize_decoder_states(self, memory, obs_and_lat, mask): """ Initializes attention rnn states, decoder rnn states, attention weights, attention cumulative weights, attention context, stores memory and stores processed memory PARAMS ------ memory: Encoder outputs obs_and_lat: Observed and latent attribute embeddings mask: Mask for padded data if training, expects None for inference """ B = memory.size(0) MAX_TIME = memory.size(1) self.attention_hidden = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.attention_cell = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.decoder_hidden = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.decoder_cell = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.attention_weights = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_weights_cum = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_context = Variable(memory.data.new( B, self.encoder_embedding_dim).zero_()) self.memory = memory self.processed_memory = self.attention_layer.memory_layer(memory) self.obs_and_lat = obs_and_lat self.mask = mask def parse_decoder_inputs(self, decoder_inputs): """ Prepares decoder inputs, i.e. mel outputs PARAMS ------ decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs RETURNS ------- inputs: processed decoder inputs """ # (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels) decoder_inputs = decoder_inputs.transpose(1, 2) decoder_inputs = decoder_inputs.view( decoder_inputs.size(0), int(decoder_inputs.size(1)/self.n_frames_per_step), -1) # (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels) decoder_inputs = decoder_inputs.transpose(0, 1) return decoder_inputs def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments): """ Prepares decoder outputs for output PARAMS ------ mel_outputs: gate_outputs: gate output energies alignments: RETURNS ------- mel_outputs: gate_outpust: gate output energies alignments: """ # (T_out, B) -> (B, T_out) alignments = torch.stack(alignments).transpose(0, 1) # (T_out, B) -> (B, T_out) gate_outputs = torch.stack(gate_outputs).transpose(0, 1) gate_outputs = gate_outputs.contiguous() # (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels) mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous() # decouple frames per step mel_outputs = mel_outputs.view( mel_outputs.size(0), -1, self.n_mel_channels) # (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out) mel_outputs = mel_outputs.transpose(1, 2) return mel_outputs, gate_outputs, alignments def decode(self, decoder_input): """ Decoder step using stored states, attention and memory PARAMS ------ decoder_input: previous mel output RETURNS ------- mel_output: gate_output: gate output energies attention_weights: """ cell_input = torch.cat((decoder_input, self.attention_context), -1) self.attention_hidden, self.attention_cell = self.attention_rnn( cell_input, (self.attention_hidden, self.attention_cell)) self.attention_hidden = F.dropout( self.attention_hidden, self.p_attention_dropout, self.training) attention_weights_cat = torch.cat( (self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1) self.attention_context, self.attention_weights = self.attention_layer( self.attention_hidden, self.memory, self.processed_memory, attention_weights_cat, self.mask) self.attention_weights_cum += self.attention_weights decoder_input = torch.cat( (self.attention_hidden, self.attention_context), -1) if self.obs_and_lat is not None: decoder_input = torch.cat((decoder_input, self.obs_and_lat), -1) self.decoder_hidden, self.decoder_cell = self.decoder_rnn( decoder_input, (self.decoder_hidden, self.decoder_cell)) self.decoder_hidden = F.dropout( self.decoder_hidden, self.p_decoder_dropout, self.training) decoder_hidden_attention_context = torch.cat( (self.decoder_hidden, self.attention_context), dim=1) if self.obs_and_lat is not None: decoder_hidden_attention_context = torch.cat( (decoder_hidden_attention_context, self.obs_and_lat), dim=1) decoder_output = self.linear_projection( decoder_hidden_attention_context) gate_prediction = self.gate_layer(decoder_hidden_attention_context) return decoder_output, gate_prediction, self.attention_weights def forward(self, memory, obs_and_lat, decoder_inputs, memory_lengths): """ Decoder forward pass for training PARAMS ------ memory: Encoder outputs obs_and_lat: Observed and latent attribute embeddings decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs memory_lengths: Encoder output lengths for attention masking. RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory).unsqueeze(0) decoder_inputs = self.parse_decoder_inputs(decoder_inputs) decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0) decoder_inputs = self.prenet(decoder_inputs) self.initialize_decoder_states( memory, obs_and_lat, mask=~get_mask_from_lengths(memory_lengths)) mel_outputs, gate_outputs, alignments = [], [], [] while len(mel_outputs) < decoder_inputs.size(0) - 1: decoder_input = decoder_inputs[len(mel_outputs)] mel_output, gate_output, attention_weights = self.decode( decoder_input) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output.squeeze()] alignments += [attention_weights] mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) return mel_outputs, gate_outputs, alignments def inference(self, memory, obs_and_lat, ret_has_eos=False): """ Decoder inference PARAMS ------ memory: Encoder outputs obs_and_lat: Observed and latent attribute embeddings RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory) self.initialize_decoder_states(memory, obs_and_lat, mask=None) mel_outputs, gate_outputs, alignments = [], [], [] has_eos = False while True: decoder_input = self.prenet(decoder_input) mel_output, gate_output, alignment = self.decode(decoder_input) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output] alignments += [alignment] if torch.sigmoid(gate_output.data) > self.gate_threshold: has_eos = True break elif len(mel_outputs) == self.max_decoder_steps: # print("Warning! Reached max decoder steps") break decoder_input = mel_output mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) if ret_has_eos: return mel_outputs, gate_outputs, alignments, has_eos else: return mel_outputs, gate_outputs, alignments class Tacotron2(nn.Module): def __init__(self, hparams): super(Tacotron2, self).__init__() self.mask_padding = hparams.mask_padding self.fp16_run = hparams.fp16_run self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step # initialize text encoder embedding self.embedding = nn.Embedding( hparams.n_symbols, hparams.symbols_embedding_dim) std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim)) val = sqrt(3.0) * std # uniform bounds for std self.embedding.weight.data.uniform_(-val, val) # initialize observed attribute embedding self.obs_embedding = None if hparams.obs_dim > 0: self.obs_embedding = nn.Embedding( hparams.obs_n_class, hparams.obs_dim) std = sqrt(2.0 / (hparams.obs_n_class + hparams.obs_dim)) val = sqrt(3.0) * std # uniform bounds for std self.obs_embedding.weight.data.uniform_(-val, val) self.encoder = Encoder(hparams) self.decoder = Decoder(hparams) self.postnet = Postnet(hparams) self.lat_encoder = None if hparams.lat_dim > 0: self.lat_encoder = AudioEncoder(hparams) def parse_batch(self, batch): (text_padded, input_lengths, obs_labels, mel_padded, gate_padded, output_lengths) = batch text_padded = to_gpu(text_padded).long() input_lengths = to_gpu(input_lengths).long() obs_labels = to_gpu(obs_labels).long() max_len = torch.max(input_lengths.data).item() mel_padded = to_gpu(mel_padded).float() gate_padded = to_gpu(gate_padded).float() output_lengths = to_gpu(output_lengths).long() return ( (text_padded, input_lengths, obs_labels, mel_padded, max_len, output_lengths), (mel_padded, gate_padded)) def parse_output(self, outputs, output_lengths=None): if self.mask_padding and output_lengths is not None: mask = ~get_mask_from_lengths(output_lengths) mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1)) mask = mask.permute(1, 0, 2) outputs[0].data.masked_fill_(mask, 0.0) outputs[1].data.masked_fill_(mask, 0.0) outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies return outputs def forward(self, inputs): (text_inputs, text_lengths, obs_labels, mels, max_len, output_lengths) = inputs text_lengths, output_lengths = text_lengths.data, output_lengths.data embedded_inputs = self.embedding(text_inputs).transpose(1, 2) encoder_outputs = self.encoder(embedded_inputs, text_lengths) obs = None if self.obs_embedding is not None: obs = self.obs_embedding(obs_labels) lat, lat_mu, lat_logvar = None, None, None if self.lat_encoder is not None: (lat, lat_mu, lat_logvar) = self.lat_encoder(mels, output_lengths) obs_and_lat = [x for x in [obs, lat] if x is not None] if bool(obs_and_lat): obs_and_lat = torch.cat(obs_and_lat, dim=-1) else: obs_and_lat = None mel_outputs, gate_outputs, alignments = self.decoder( encoder_outputs, obs_and_lat, mels, memory_lengths=text_lengths) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet return self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments, lat_mu, lat_logvar], output_lengths) def inference(self, inputs, obs_labels=None, lat=None, ret_has_eos=False): embedded_inputs = self.embedding(inputs).transpose(1, 2) encoder_outputs = self.encoder.inference(embedded_inputs) if obs_labels is None: obs_labels = torch.LongTensor(len(inputs)) obs_labels = obs_labels.to(inputs.device).zero_() obs = None if self.obs_embedding is not None: obs = self.obs_embedding(obs_labels) if self.lat_encoder is not None: if lat is None: lat = torch.FloatTensor(len(inputs), self.lat_encoder.lat_dim) lat = lat.to(inputs.device).zero_().type(encoder_outputs.type()) obs_and_lat = [x for x in [obs, lat] if x is not None] if bool(obs_and_lat): obs_and_lat = torch.cat(obs_and_lat, dim=-1) else: obs_and_lat = None mel_outputs, gate_outputs, alignments, has_eos = self.decoder.inference( encoder_outputs, obs_and_lat, ret_has_eos=True) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet outputs = self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments]) if ret_has_eos: return outputs + [has_eos] else: return outputs
25,989
37.791045
82
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/layers.py
import torch from librosa.filters import mel as librosa_mel_fn from .audio_processing import dynamic_range_compression from .audio_processing import dynamic_range_decompression from .stft import STFT from .utils import get_mask_from_lengths class LinearNorm(torch.nn.Module): def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): super(LinearNorm, self).__init__() self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) torch.nn.init.xavier_uniform_( self.linear_layer.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) def forward(self, x): return self.linear_layer(x) class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNorm, self).__init__() if padding is None: assert(kernel_size % 2 == 1) padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_( self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) def forward(self, signal): conv_signal = self.conv(signal) return conv_signal class GlobalAvgPool(torch.nn.Module): def __init__(self): super(GlobalAvgPool, self).__init__() def forward(self, x, lengths=None): """Average pooling across time steps (dim=1) with optionally lengths. Args: x: torch.Tensor of shape (N, T, ...) lengths: None or torch.Tensor of shape (N,) dim: dimension to pool """ if lengths is None: return x.mean(dim=1, keepdim=False) else: mask = get_mask_from_lengths(lengths).type(x.type()).to(x.device) mask_shape = list(mask.size()) + [1 for _ in range(x.ndimension()-2)] mask = mask.reshape(*mask_shape) numer = (x * mask).sum(dim=1, keepdim=False) denom = mask.sum(dim=1, keepdim=False) return numer / denom class TacotronSTFT(torch.nn.Module): def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0, mel_fmax=8000.0): super(TacotronSTFT, self).__init__() self.n_mel_channels = n_mel_channels self.sampling_rate = sampling_rate self.stft_fn = STFT(filter_length, hop_length, win_length) mel_basis = librosa_mel_fn( sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax) mel_basis = torch.from_numpy(mel_basis).float() self.register_buffer('mel_basis', mel_basis) def spectral_normalize(self, magnitudes): output = dynamic_range_compression(magnitudes) return output def spectral_de_normalize(self, magnitudes): output = dynamic_range_decompression(magnitudes) return output def mel_spectrogram(self, y): """Computes mel-spectrograms from a batch of waves PARAMS ------ y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1] RETURNS ------- mel_output: torch.FloatTensor of shape (B, n_mel_channels, T) """ assert(torch.min(y.data) >= -1) assert(torch.max(y.data) <= 1) magnitudes, phases = self.stft_fn.transform(y) magnitudes = magnitudes.data mel_output = torch.matmul(self.mel_basis, magnitudes) mel_output = self.spectral_normalize(mel_output) return mel_output
3,859
36.115385
81
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py
# import sys # sys.path.append('tacotron2') import torch from .layers import STFT class Denoiser(torch.nn.Module): """ Removes model bias from audio produced with waveglow """ def __init__(self, waveglow, filter_length=1024, n_overlap=4, win_length=1024, mode='zeros'): super(Denoiser, self).__init__() self.stft = STFT(filter_length=filter_length, hop_length=int(filter_length/n_overlap), win_length=win_length).cuda() if mode == 'zeros': mel_input = torch.zeros( (1, 80, 88), dtype=waveglow.upsample.weight.dtype, device=waveglow.upsample.weight.device) elif mode == 'normal': mel_input = torch.randn( (1, 80, 88), dtype=waveglow.upsample.weight.dtype, device=waveglow.upsample.weight.device) else: raise Exception("Mode {} if not supported".format(mode)) with torch.no_grad(): bias_audio = waveglow.infer(mel_input, sigma=0.0).float() bias_spec, _ = self.stft.transform(bias_audio) self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None]) def forward(self, audio, strength=0.1): audio_spec, audio_angles = self.stft.transform(audio.cuda().float()) audio_spec_denoised = audio_spec - self.bias_spec * strength audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles) return audio_denoised
1,610
38.292683
77
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/audio_processing.py
import torch import numpy as np from scipy.signal import get_window import librosa.util as librosa_util def window_sumsquare(window, n_frames, hop_length=200, win_length=800, n_fft=800, dtype=np.float32, norm=None): """ # from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function """ if win_length is None: win_length = n_fft n = n_fft + hop_length * (n_frames - 1) x = np.zeros(n, dtype=dtype) # Compute the squared window at the desired length win_sq = get_window(window, win_length, fftbins=True) win_sq = librosa_util.normalize(win_sq, norm=norm)**2 win_sq = librosa_util.pad_center(win_sq, n_fft) # Fill the envelope for i in range(n_frames): sample = i * hop_length x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))] return x def griffin_lim(magnitudes, stft_fn, n_iters=30): """ PARAMS ------ magnitudes: spectrogram magnitudes stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods """ angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size()))) angles = angles.astype(np.float32) angles = torch.autograd.Variable(torch.from_numpy(angles)) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) for i in range(n_iters): _, angles = stft_fn.transform(signal) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) return signal def dynamic_range_compression(x, C=1, clip_val=1e-5): """ PARAMS ------ C: compression factor """ return torch.log(torch.clamp(x, min=clip_val) * C) def dynamic_range_decompression(x, C=1): """ PARAMS ------ C: compression factor used to compress """ return torch.exp(x) / C
2,610
26.776596
83
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/metrics/asr_metrics/ppx.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import numpy as np import warnings def get_target_sequences(manifest, ground_truth, to_take=1000): import json import pathlib with open(ground_truth, 'r') as fin: original_continuations = json.loads(fin.read()) sequence2length = [(k, v[0]) for k, v in original_continuations.items()] assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds sequence2length.sort(key=lambda x: x[1]) to_take_sequences = set(v[0] for v in sequence2length[:to_take]) to_take_ids = [] with open(manifest, 'r') as f: f.readline() for i, line in enumerate(f.readlines()): seq_id = line.split()[0] seq_id = pathlib.Path(seq_id).name.split('__')[0] if seq_id in to_take_sequences: to_take_ids.append(i) print(f'Took {len(to_take_ids)} ids') return set(to_take_ids) def get_args(): import argparse parser = argparse.ArgumentParser("Evaluate PPX metric of a transcript.") parser.add_argument('--asr-transcript', type=str, help='Path to the transcript file.') parser.add_argument('--cut-id', action='store_true', help='Whether cut the first token (typically a seq id)') parser.add_argument('--cut-tail', action='store_true', help='Whether cut the last token (typically a speaker id)') parser.add_argument('--manifest', type=str, default=None) parser.add_argument('--prompts-description', type=str, default=None) args = parser.parse_args() return args def main(): args = get_args() lm = torch.hub.load( 'pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe') lm.eval().cuda() # disable dropout if args.manifest is None and args.prompts_description is None: target_ids = None else: target_ids = get_target_sequences( args.manifest, args.prompts_description) with open(args.asr_transcript, 'r') as fin: lines = fin.readlines() if target_ids is not None: filtered = [] for line in lines: line_id = line.split()[-1] line_id = int(line_id.split('-')[1][:-1]) if line_id in target_ids: filtered.append(line) lines = filtered else: pass if args.cut_id: lines = [' '.join(x.split()[1:]) for x in lines] if args.cut_tail: lines = [' '.join(x.split()[:-1]) for x in lines] lines = [x.strip().lower() for x in lines] def get_logprob(sent): return \ lm.score(sent)['positional_scores'].mean().neg().item() logprobs = [get_logprob(l) for l in lines] filtered = [x for x in logprobs if not np.isnan(x)] if len(filtered) != len(logprobs): warnings.warn("NaNs detected!") logprobs = filtered perplexities = [np.exp(l) for l in logprobs] for name, stats in [('logprob', logprobs), ('perplexity', perplexities)]: mean = np.mean(stats) sem = np.std(stats) / np.sqrt(len(stats)) median = np.median(stats) interval = list(np.percentile(stats, [10, 90])) mean, sem, median, percentile10, percentile90 = [ round(x, 2) for x in [mean, sem, median] + interval] print(name) print(f"\tMean {mean} +- {sem}") print( f"\tMedian {median}, 90% confidence interval {percentile10}...{percentile90}") if __name__ == '__main__': main()
3,692
29.02439
90
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/metrics/asr_metrics/misc/cut_as.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torchaudio import argparse import json import pathlib def get_args(): parser = argparse.ArgumentParser( "Assuring generated audio have the same length as ground-truth audio") parser.add_argument('--samples_dir', required=True, type=str) parser.add_argument('--out_dir', required=True, type=str) parser.add_argument('--prompts_description', required=True, type=str) return parser.parse_args() def cut(src, tgt, l): x, sr = torchaudio.load(str(src)) assert sr == 16_000 x = x.squeeze() target_frames = int(l * sr) flag = 0 if target_frames <= x.size(0): x = x[:target_frames] flag = 1 else: flag = 0 torchaudio.save(str(tgt), x.unsqueeze(0), sr) return flag def main(): args = get_args() tgt_dir = pathlib.Path(args.out_dir) tgt_dir.mkdir(exist_ok=True, parents=True) total_files, sufficiently_long = 0, 0 with open(args.prompts_description, 'r') as f: description = json.loads(f.read()) for src_f in pathlib.Path(args.samples_dir).glob('*.wav'): name_prompt = src_f.with_suffix('').name.split('__')[0] assert name_prompt in description, f'Cannot find {name_prompt}!' target_length = description[name_prompt][0] tgt_f = tgt_dir / (src_f.name) is_long_enough = cut(src_f, tgt_f, target_length) sufficiently_long += is_long_enough if not is_long_enough: print(f'{src_f} is not long enough') total_files += 1 print( f'Total files: {total_files}; sufficiently long: {sufficiently_long}') if __name__ == '__main__': main()
1,832
25.185714
78
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/hubert_feature_reader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import fairseq import soundfile as sf import torch.nn.functional as F class HubertFeatureReader: """ Wrapper class to run inference on HuBERT model. Helps extract features for a given audio file. """ def __init__(self, checkpoint_path, layer, max_chunk=1600000): ( model, cfg, task, ) = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ) self.model = model[0].eval().cuda() self.task = task self.layer = layer self.max_chunk = max_chunk def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim assert sr == self.task.cfg.sample_rate, sr if ref_len is not None and abs(ref_len - len(wav)) > 160: print(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, file_path, ref_len=None): x = self.read_audio(file_path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float().cuda() if self.task.cfg.normalize: x = F.layer_norm(x, x.shape) x = x.view(1, -1) feat = [] for start in range(0, x.size(1), self.max_chunk): x_chunk = x[:, start: start + self.max_chunk] feat_chunk, _ = self.model.extract_features( source=x_chunk, padding_mask=None, mask=False, output_layer=self.layer, ) feat.append(feat_chunk) return torch.cat(feat, 1).squeeze(0)
1,912
30.883333
66
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/w2v2_feature_reader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import fairseq import soundfile as sf class Wav2VecFeatureReader: """ Wrapper class to run inference on Wav2Vec 2.0 model. Helps extract features for a given audio file. """ def __init__(self, checkpoint_path, layer): state = fairseq.checkpoint_utils.load_checkpoint_to_cpu( checkpoint_path ) w2v_args = state["args"] self.task = fairseq.tasks.setup_task(w2v_args) model = self.task.build_model(w2v_args) model.load_state_dict(state["model"], strict=True) model.eval() model.cuda() self.model = model self.layer = layer def read_audio(self, fname): wav, sr = sf.read(fname) if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim assert sr == self.task.cfg.sample_rate, sr return wav def get_feats(self, file_path): x = self.read_audio(file_path) with torch.no_grad(): source = torch.from_numpy(x).view(1, -1).float().cuda() res = self.model( source=source, mask=False, features_only=True, layer=self.layer ) return res["layer_results"][self.layer][0].squeeze(1)
1,424
29.319149
79
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import gc import os import random import shutil import numpy as np import torch import tqdm from examples.textless_nlp.gslm.speech2unit.pretrained.cpc_feature_reader import ( CpcFeatureReader, ) from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import ( HubertFeatureReader, ) from examples.textless_nlp.gslm.speech2unit.pretrained.logmel_feature_reader import ( LogMelFeatureReader, ) from examples.textless_nlp.gslm.speech2unit.pretrained.w2v2_feature_reader import ( Wav2VecFeatureReader, ) def get_feature_reader(feature_type): if feature_type == "logmel": return LogMelFeatureReader elif feature_type == "hubert": return HubertFeatureReader elif feature_type == "w2v2": return Wav2VecFeatureReader elif feature_type == "cpc": return CpcFeatureReader else: raise NotImplementedError(f"{feature_type} is not supported.") def get_feature_iterator( feature_type, checkpoint_path, layer, manifest_path, sample_pct ): feature_reader_cls = get_feature_reader(feature_type) with open(manifest_path, "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() file_path_list = [ os.path.join(root, line.split("\t")[0]) for line in lines if len(line) > 0 ] if sample_pct < 1.0: file_path_list = random.sample( file_path_list, int(sample_pct * len(file_path_list)) ) num_files = len(file_path_list) reader = feature_reader_cls( checkpoint_path=checkpoint_path, layer=layer ) def iterate(): for file_path in file_path_list: feats = reader.get_feats(file_path) yield feats.cpu().numpy() return iterate, num_files def get_features( feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten ): generator, num_files = get_feature_iterator( feature_type=feature_type, checkpoint_path=checkpoint_path, layer=layer, manifest_path=manifest_path, sample_pct=sample_pct, ) iterator = generator() features_list = [] for features in tqdm.tqdm(iterator, total=num_files): features_list.append(features) # Explicit clean up del iterator del generator gc.collect() torch.cuda.empty_cache() if flatten: return np.concatenate(features_list) return features_list def get_and_dump_features( feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten, out_features_path, ): # Feature extraction features_batch = get_features( feature_type=feature_type, checkpoint_path=checkpoint_path, layer=layer, manifest_path=manifest_path, sample_pct=sample_pct, flatten=flatten, ) # Save features out_dir_path = os.path.dirname(out_features_path) os.makedirs(out_dir_path, exist_ok=True) shutil.copyfile( manifest_path, os.path.join(out_dir_path, os.path.basename(manifest_path)), ) np.save(out_features_path, features_batch) return features_batch
3,407
25.834646
85
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/logmel_feature_reader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import soundfile as sf import torch import torchaudio.compliance.kaldi as kaldi class LogMelFeatureReader: """ Wrapper class to run inference on HuBERT model. Helps extract features for a given audio file. """ def __init__(self, *args, **kwargs): self.num_mel_bins = kwargs.get("num_mel_bins", 80) self.frame_length = kwargs.get("frame_length", 25.0) def get_feats(self, file_path): wav, sr = sf.read(file_path) feats = torch.from_numpy(wav).float() feats = kaldi.fbank( feats.unsqueeze(0), num_mel_bins=self.num_mel_bins, frame_length=self.frame_length, sample_frequency=sr, ) return feats
905
28.225806
65
py
sign-topic
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py
import soundfile as sf import torch import torch.nn as nn import torch.nn.functional as F class CpcFeatureReader: """ Wrapper class to run inference on CPC model. Helps extract features for a given audio file. """ def __init__( self, checkpoint_path, layer, use_encoder_layer=False, norm_features=False, sample_rate=16000, max_chunk=64000, ): self.model = load_cpc_model(checkpoint_path, layer).eval().cuda() self.sample_rate = sample_rate self.max_chunk = max_chunk self.norm_features = norm_features self.use_encoder_layer = use_encoder_layer def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim assert sr == self.sample_rate, sr if ref_len is not None and abs(ref_len - len(wav)) > 160: print(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, file_path, ref_len=None): x = self.read_audio(file_path, ref_len) # Inspired from CPC_audio feature_loader.py with torch.no_grad(): x = torch.from_numpy(x).float().cuda() x = x.view(1, 1, -1) size = x.size(2) feat = [] start = 0 while start < size: if start + self.max_chunk > size: break x_chunk = x[..., start : start + self.max_chunk] feat_chunk = self.model.extract_features( source=x_chunk, get_encoded=self.use_encoder_layer, norm_output=self.norm_features, ) feat.append(feat_chunk) start += self.max_chunk if start < size: x_chunk = x[:, -self.max_chunk :] feat_chunk = self.model.extract_features( source=x_chunk, get_encoded=self.use_encoder_layer, norm_output=self.norm_features, ) df = x_chunk.size(2) // feat_chunk.size(1) delta = (size - start) // df feat.append(feat_chunk[:, -delta:]) return torch.cat(feat, 1).squeeze(0) def load_cpc_model(checkpoint_path, layer=None): state_dict = torch.load(checkpoint_path) weights = state_dict["weights"] config = state_dict["config"] if layer is not None: config["nLevelsGRU"] = layer encoder = CPCEncoder(config["hiddenEncoder"]) ar_net = CPCAR( config["hiddenEncoder"], config["hiddenGar"], False, config["nLevelsGRU"] ) model = CPCModel(encoder, ar_net) model.load_state_dict(weights, strict=False) model.config = config return model class ChannelNorm(nn.Module): def __init__(self, num_features, epsilon=1e-05, affine=True): super(ChannelNorm, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, num_features, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, num_features, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, x): cum_mean = x.mean(dim=1, keepdim=True) cum_var = x.var(dim=1, keepdim=True) x = (x - cum_mean) * torch.rsqrt(cum_var + self.epsilon) if self.weight is not None: x = x * self.weight + self.bias return x class CPCEncoder(nn.Module): def __init__(self, hidden_dim=512): super(CPCEncoder, self).__init__() self.conv0 = nn.Conv1d(1, hidden_dim, 10, stride=5, padding=3) self.batchNorm0 = ChannelNorm(hidden_dim) self.conv1 = nn.Conv1d(hidden_dim, hidden_dim, 8, stride=4, padding=2) self.batchNorm1 = ChannelNorm(hidden_dim) self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) self.batchNorm2 = ChannelNorm(hidden_dim) self.conv3 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) self.batchNorm3 = ChannelNorm(hidden_dim) self.conv4 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) self.batchNorm4 = ChannelNorm(hidden_dim) self.DOWNSAMPLING = 160 def get_output_dim(self): return self.conv4.out_channels def forward(self, x): x = F.relu(self.batchNorm0(self.conv0(x))) x = F.relu(self.batchNorm1(self.conv1(x))) x = F.relu(self.batchNorm2(self.conv2(x))) x = F.relu(self.batchNorm3(self.conv3(x))) x = F.relu(self.batchNorm4(self.conv4(x))) return x class CPCAR(nn.Module): def __init__(self, dim_encoded, dim_output, keep_hidden, num_layers): super(CPCAR, self).__init__() self.baseNet = nn.LSTM( dim_encoded, dim_output, num_layers=num_layers, batch_first=True ) self.hidden = None self.keep_hidden = keep_hidden def get_output_dim(self): return self.baseNet.hidden_size def forward(self, x): try: self.baseNet.flatten_parameters() except RuntimeError: pass x, h = self.baseNet(x, self.hidden) if self.keep_hidden: if isinstance(h, tuple): self.hidden = tuple(x.detach() for x in h) else: self.hidden = h.detach() return x class CPCModel(nn.Module): def __init__(self, encoder, ar_net): super(CPCModel, self).__init__() self.gEncoder = encoder self.gAR = ar_net self.config = None def forward(self, x, label): encoded = self.gEncoder(x).permute(0, 2, 1) cpc_feature = self.gAR(encoded) return cpc_feature, encoded, label def extract_features(self, source, get_encoded=False, norm_output=False): cpc_feature, encoded, _ = self.forward(source, None) if get_encoded: cpc_feature = encoded if norm_output: mean = cpc_feature.mean(dim=1, keepdim=True) var = cpc_feature.var(dim=1, keepdim=True) cpc_feature = (cpc_feature - mean) / torch.sqrt(var + 1e-08) return cpc_feature
6,525
32.813472
82
py
sign-topic
sign-topic-main/examples/discriminative_reranking_nmt/drnmt_rerank.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Score raw text with a trained model. """ from collections import namedtuple import logging from multiprocessing import Pool import sys import os import random import numpy as np import sacrebleu import torch from fairseq import checkpoint_utils, options, utils logger = logging.getLogger("fairseq_cli.drnmt_rerank") logger.setLevel(logging.INFO) Batch = namedtuple("Batch", "ids src_tokens src_lengths") pool_init_variables = {} def init_loaded_scores(mt_scores, model_scores, hyp, ref): global pool_init_variables pool_init_variables["mt_scores"] = mt_scores pool_init_variables["model_scores"] = model_scores pool_init_variables["hyp"] = hyp pool_init_variables["ref"] = ref def parse_fairseq_gen(filename, task): source = {} hypos = {} scores = {} with open(filename, "r", encoding="utf-8") as f: for line in f: line = line.strip() if line.startswith("S-"): # source uid, text = line.split("\t", 1) uid = int(uid[2:]) source[uid] = text elif line.startswith("D-"): # hypo uid, score, text = line.split("\t", 2) uid = int(uid[2:]) if uid not in hypos: hypos[uid] = [] scores[uid] = [] hypos[uid].append(text) scores[uid].append(float(score)) else: continue source_out = [source[i] for i in range(len(hypos))] hypos_out = [h for i in range(len(hypos)) for h in hypos[i]] scores_out = [s for i in range(len(scores)) for s in scores[i]] return source_out, hypos_out, scores_out def read_target(filename): with open(filename, "r", encoding="utf-8") as f: output = [line.strip() for line in f] return output def make_batches(args, src, hyp, task, max_positions, encode_fn): assert len(src) * args.beam == len( hyp ), f"Expect {len(src) * args.beam} hypotheses for {len(src)} source sentences with beam size {args.beam}. Got {len(hyp)} hypotheses intead." hyp_encode = [ task.source_dictionary.encode_line(encode_fn(h), add_if_not_exist=False).long() for h in hyp ] if task.cfg.include_src: src_encode = [ task.source_dictionary.encode_line( encode_fn(s), add_if_not_exist=False ).long() for s in src ] tokens = [(src_encode[i // args.beam], h) for i, h in enumerate(hyp_encode)] lengths = [(t1.numel(), t2.numel()) for t1, t2 in tokens] else: tokens = [(h,) for h in hyp_encode] lengths = [(h.numel(),) for h in hyp_encode] itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.max_tokens, max_sentences=args.batch_size, max_positions=max_positions, ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, ).next_epoch_itr(shuffle=False) for batch in itr: yield Batch( ids=batch["id"], src_tokens=batch["net_input"]["src_tokens"], src_lengths=batch["net_input"]["src_lengths"], ) def decode_rerank_scores(args): if args.max_tokens is None and args.batch_size is None: args.batch_size = 1 logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load ensemble logger.info("loading model(s) from {}".format(args.path)) models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task( [args.path], arg_overrides=eval(args.model_overrides), ) for model in models: if args.fp16: model.half() if use_cuda: model.cuda() # Initialize generator generator = task.build_generator(args) # Handle tokenization and BPE tokenizer = task.build_tokenizer(args) bpe = task.build_bpe(args) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) src, hyp, mt_scores = parse_fairseq_gen(args.in_text, task) model_scores = {} logger.info("decode reranker score") for batch in make_batches(args, src, hyp, task, max_positions, encode_fn): src_tokens = batch.src_tokens src_lengths = batch.src_lengths if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}, } scores = task.inference_step(generator, models, sample) for id, sc in zip(batch.ids.tolist(), scores.tolist()): model_scores[id] = sc[0] model_scores = [model_scores[i] for i in range(len(model_scores))] return src, hyp, mt_scores, model_scores def get_score(mt_s, md_s, w1, lp, tgt_len): return mt_s / (tgt_len ** lp) * w1 + md_s def get_best_hyps(mt_scores, md_scores, hypos, fw_weight, lenpen, beam): assert len(mt_scores) == len(md_scores) and len(mt_scores) == len(hypos) hypo_scores = [] best_hypos = [] best_scores = [] offset = 0 for i in range(len(hypos)): tgt_len = len(hypos[i].split()) hypo_scores.append( get_score(mt_scores[i], md_scores[i], fw_weight, lenpen, tgt_len) ) if (i + 1) % beam == 0: max_i = np.argmax(hypo_scores) best_hypos.append(hypos[offset + max_i]) best_scores.append(hypo_scores[max_i]) hypo_scores = [] offset += beam return best_hypos, best_scores def eval_metric(args, hypos, ref): if args.metric == "bleu": score = sacrebleu.corpus_bleu(hypos, [ref]).score else: score = sacrebleu.corpus_ter(hypos, [ref]).score return score def score_target_hypo(args, fw_weight, lp): mt_scores = pool_init_variables["mt_scores"] model_scores = pool_init_variables["model_scores"] hyp = pool_init_variables["hyp"] ref = pool_init_variables["ref"] best_hypos, _ = get_best_hyps( mt_scores, model_scores, hyp, fw_weight, lp, args.beam ) rerank_eval = None if ref: rerank_eval = eval_metric(args, best_hypos, ref) print(f"fw_weight {fw_weight}, lenpen {lp}, eval {rerank_eval}") return rerank_eval def print_result(best_scores, best_hypos, output_file): for i, (s, h) in enumerate(zip(best_scores, best_hypos)): print(f"{i}\t{s}\t{h}", file=output_file) def main(args): utils.import_user_module(args) src, hyp, mt_scores, model_scores = decode_rerank_scores(args) assert ( not args.tune or args.target_text is not None ), "--target-text has to be set when tuning weights" if args.target_text: ref = read_target(args.target_text) assert len(src) == len( ref ), f"different numbers of source and target sentences ({len(src)} vs. {len(ref)})" orig_best_hypos = [hyp[i] for i in range(0, len(hyp), args.beam)] orig_eval = eval_metric(args, orig_best_hypos, ref) if args.tune: logger.info("tune weights for reranking") random_params = np.array( [ [ random.uniform( args.lower_bound_fw_weight, args.upper_bound_fw_weight ), random.uniform(args.lower_bound_lenpen, args.upper_bound_lenpen), ] for k in range(args.num_trials) ] ) logger.info("launching pool") with Pool( 32, initializer=init_loaded_scores, initargs=(mt_scores, model_scores, hyp, ref), ) as p: rerank_scores = p.starmap( score_target_hypo, [ (args, random_params[i][0], random_params[i][1],) for i in range(args.num_trials) ], ) if args.metric == "bleu": best_index = np.argmax(rerank_scores) else: best_index = np.argmin(rerank_scores) best_fw_weight = random_params[best_index][0] best_lenpen = random_params[best_index][1] else: assert ( args.lenpen is not None and args.fw_weight is not None ), "--lenpen and --fw-weight should be set" best_fw_weight, best_lenpen = args.fw_weight, args.lenpen best_hypos, best_scores = get_best_hyps( mt_scores, model_scores, hyp, best_fw_weight, best_lenpen, args.beam ) if args.results_path is not None: os.makedirs(args.results_path, exist_ok=True) output_path = os.path.join( args.results_path, "generate-{}.txt".format(args.gen_subset), ) with open(output_path, "w", buffering=1, encoding="utf-8") as o: print_result(best_scores, best_hypos, o) else: print_result(best_scores, best_hypos, sys.stdout) if args.target_text: rerank_eval = eval_metric(args, best_hypos, ref) print(f"before reranking, {args.metric.upper()}:", orig_eval) print( f"after reranking with fw_weight={best_fw_weight}, lenpen={best_lenpen}, {args.metric.upper()}:", rerank_eval, ) def cli_main(): parser = options.get_generation_parser(interactive=True) parser.add_argument( "--in-text", default=None, required=True, help="text from fairseq-interactive output, containing source sentences and hypotheses", ) parser.add_argument("--target-text", default=None, help="reference text") parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu") parser.add_argument( "--tune", action="store_true", help="if set, tune weights on fw scores and lenpen instead of applying fixed weights for reranking", ) parser.add_argument( "--lower-bound-fw-weight", default=0.0, type=float, help="lower bound of search space", ) parser.add_argument( "--upper-bound-fw-weight", default=3, type=float, help="upper bound of search space", ) parser.add_argument( "--lower-bound-lenpen", default=0.0, type=float, help="lower bound of search space", ) parser.add_argument( "--upper-bound-lenpen", default=3, type=float, help="upper bound of search space", ) parser.add_argument( "--fw-weight", type=float, default=None, help="weight on the fw model score" ) parser.add_argument( "--num-trials", default=1000, type=int, help="number of trials to do for random search", ) args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
11,312
29.994521
144
py
sign-topic
sign-topic-main/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import ChoiceEnum, FairseqDataclass _EPSILON = torch.finfo(torch.float32).eps TARGET_DIST_NORM_CHOICES = ChoiceEnum(["none", "minmax"]) @dataclass class KLDivergenceRerankingCriterionConfig(FairseqDataclass): target_dist_norm: TARGET_DIST_NORM_CHOICES = field( default="none", metadata={"help": "method to normalize the range of target scores"}, ) temperature: float = field( default=1.0, metadata={"help": "temperature in softmax for target distributions"}, ) forward_batch_size: int = field( default=32, metadata={ "help": "number of hypotheses per batch for model forward (set a value smaller than --mt-beam to avoid OOM when training with a large beam size)" }, ) @register_criterion( "kl_divergence_rereanking", dataclass=KLDivergenceRerankingCriterionConfig ) class KLDivergenceRerankingCriterion(FairseqCriterion): def __init__( self, task, target_dist_norm, temperature, forward_batch_size, ): super().__init__(task) self.target_dist_norm = target_dist_norm self.temperature = temperature self.forward_batch_size = forward_batch_size def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ sample_size = sample["id"].numel() assert sample_size % self.task.cfg.mt_beam == 0, ( f"sample_size ({sample_size}) cannot be divided by beam size ({self.task.cfg.mt_beam})." f"Please set --required-batch-size-multiple={self.task.cfg.mt_beam}." ) # split into smaller batches for model forward batch_out = [] for i in range(0, sample_size, self.forward_batch_size): j = min(i + self.forward_batch_size, sample_size) out = model( src_tokens=sample["net_input"]["src_tokens"][i:j, :], src_lengths=sample["net_input"]["src_lengths"][i:j], ) batch_out.append( model.sentence_forward(out, sample["net_input"]["src_tokens"][i:j, :]) ) batch_out = torch.cat(batch_out, dim=0).view( self.task.cfg.mt_beam, sample_size // self.task.cfg.mt_beam, -1 ) # T x B x C if model.joint_classification == "sent": batch_out = model.joint_forward(batch_out) scores = model.classification_forward(batch_out.view(sample_size, 1, -1)).view( -1, self.task.cfg.mt_beam ) # input: B x T x C loss = self.compute_kl_loss( scores, sample["target"][:, 0].view(-1, self.task.cfg.mt_beam) ) sample_size = sample_size // self.task.cfg.mt_beam logging_output = { "loss": loss.detach(), "ntokens": sample["ntokens"], "nsentences": sample_size * self.task.cfg.mt_beam, "sample_size": sample_size, "scores": scores.detach(), } return loss, sample_size, logging_output def compute_kl_loss(self, logits, target): norm_target = target if self.target_dist_norm == "minmax": min_v = torch.min(target, 1, keepdim=True).values max_v = torch.max(target, 1, keepdim=True).values norm_target = (target - min_v) / (max_v - min_v + _EPSILON) target_dist = F.softmax( norm_target / self.temperature, dim=-1, dtype=torch.float32 ) model_dist = F.log_softmax(logits, dim=-1, dtype=torch.float32) loss = -(target_dist * model_dist - target_dist * target_dist.log()).sum() return loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) loss = loss_sum / sample_size / math.log(2) metrics.log_scalar("loss", loss, sample_size, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
4,997
34.956835
157
py
sign-topic
sign-topic-main/examples/discriminative_reranking_nmt/models/discriminative_reranking_model.py
from dataclasses import dataclass, field import os import torch import torch.nn as nn from fairseq import utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( BaseFairseqModel, register_model, ) from fairseq.models.roberta.model import RobertaClassificationHead from fairseq.modules import ( LayerNorm, TransformerSentenceEncoder, TransformerSentenceEncoderLayer, ) ACTIVATION_FN_CHOICES = ChoiceEnum(utils.get_available_activation_fns()) JOINT_CLASSIFICATION_CHOICES = ChoiceEnum(["none", "sent"]) SENTENCE_REP_CHOICES = ChoiceEnum(["head", "meanpool", "maxpool"]) def update_init_roberta_model_state(state): """ update the state_dict of a Roberta model for initializing weights of the BertRanker """ for k in list(state.keys()): if ".lm_head." in k or "version" in k: del state[k] continue # remove 'encoder/decoder.sentence_encoder.' from the key assert k.startswith("encoder.sentence_encoder.") or k.startswith( "decoder.sentence_encoder." ), f"Cannot recognize parameter name {k}" if "layernorm_embedding" in k: new_k = k.replace(".layernorm_embedding.", ".emb_layer_norm.") state[new_k[25:]] = state[k] else: state[k[25:]] = state[k] del state[k] class BaseRanker(nn.Module): def __init__(self, args, task): super().__init__() self.separator_token = task.dictionary.eos() self.padding_idx = task.dictionary.pad() def forward(self, src_tokens): raise NotImplementedError def get_segment_labels(self, src_tokens): segment_boundary = (src_tokens == self.separator_token).long() segment_labels = ( segment_boundary.cumsum(dim=1) - segment_boundary - (src_tokens == self.padding_idx).long() ) return segment_labels def get_positions(self, src_tokens, segment_labels): segment_positions = ( torch.arange(src_tokens.shape[1]) .to(src_tokens.device) .repeat(src_tokens.shape[0], 1) ) segment_boundary = (src_tokens == self.separator_token).long() _, col_idx = (segment_positions * segment_boundary).nonzero(as_tuple=True) col_idx = torch.cat([torch.zeros(1).type_as(col_idx), col_idx]) offset = torch.cat( [ torch.zeros(1).type_as(segment_boundary), segment_boundary.sum(dim=1).cumsum(dim=0)[:-1], ] ) segment_positions -= col_idx[segment_labels + offset.unsqueeze(1)] * ( segment_labels != 0 ) padding_mask = src_tokens.ne(self.padding_idx) segment_positions = (segment_positions + 1) * padding_mask.type_as( segment_positions ) + self.padding_idx return segment_positions class BertRanker(BaseRanker): def __init__(self, args, task): super(BertRanker, self).__init__(args, task) init_model = getattr(args, "pretrained_model", "") self.joint_layers = nn.ModuleList() if os.path.isfile(init_model): print(f"initialize weight from {init_model}") from fairseq import hub_utils x = hub_utils.from_pretrained( os.path.dirname(init_model), checkpoint_file=os.path.basename(init_model), ) in_state_dict = x["models"][0].state_dict() init_args = x["args"].model num_positional_emb = init_args.max_positions + task.dictionary.pad() + 1 # follow the setup in roberta self.model = TransformerSentenceEncoder( padding_idx=task.dictionary.pad(), vocab_size=len(task.dictionary), num_encoder_layers=getattr( args, "encoder_layers", init_args.encoder_layers ), embedding_dim=init_args.encoder_embed_dim, ffn_embedding_dim=init_args.encoder_ffn_embed_dim, num_attention_heads=init_args.encoder_attention_heads, dropout=init_args.dropout, attention_dropout=init_args.attention_dropout, activation_dropout=init_args.activation_dropout, num_segments=2, # add language embeddings max_seq_len=num_positional_emb, offset_positions_by_padding=False, encoder_normalize_before=True, apply_bert_init=True, activation_fn=init_args.activation_fn, freeze_embeddings=args.freeze_embeddings, n_trans_layers_to_freeze=args.n_trans_layers_to_freeze, ) # still need to learn segment embeddings as we added a second language embedding if args.freeze_embeddings: for p in self.model.segment_embeddings.parameters(): p.requires_grad = False update_init_roberta_model_state(in_state_dict) print("loading weights from the pretrained model") self.model.load_state_dict( in_state_dict, strict=False ) # ignore mismatch in language embeddings ffn_embedding_dim = init_args.encoder_ffn_embed_dim num_attention_heads = init_args.encoder_attention_heads dropout = init_args.dropout attention_dropout = init_args.attention_dropout activation_dropout = init_args.activation_dropout activation_fn = init_args.activation_fn classifier_embed_dim = getattr( args, "embed_dim", init_args.encoder_embed_dim ) if classifier_embed_dim != init_args.encoder_embed_dim: self.transform_layer = nn.Linear( init_args.encoder_embed_dim, classifier_embed_dim ) else: self.model = TransformerSentenceEncoder( padding_idx=task.dictionary.pad(), vocab_size=len(task.dictionary), num_encoder_layers=args.encoder_layers, embedding_dim=args.embed_dim, ffn_embedding_dim=args.ffn_embed_dim, num_attention_heads=args.attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, max_seq_len=task.max_positions() if task.max_positions() else args.tokens_per_sample, num_segments=2, offset_positions_by_padding=False, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, ) classifier_embed_dim = args.embed_dim ffn_embedding_dim = args.ffn_embed_dim num_attention_heads = args.attention_heads dropout = args.dropout attention_dropout = args.attention_dropout activation_dropout = args.activation_dropout activation_fn = args.activation_fn self.joint_classification = args.joint_classification if args.joint_classification == "sent": if args.joint_normalize_before: self.joint_layer_norm = LayerNorm(classifier_embed_dim) else: self.joint_layer_norm = None self.joint_layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=classifier_embed_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, ) for _ in range(args.num_joint_layers) ] ) self.classifier = RobertaClassificationHead( classifier_embed_dim, classifier_embed_dim, 1, # num_classes "tanh", args.classifier_dropout, ) def forward(self, src_tokens, src_lengths): segment_labels = self.get_segment_labels(src_tokens) positions = self.get_positions(src_tokens, segment_labels) inner_states, _ = self.model( tokens=src_tokens, segment_labels=segment_labels, last_state_only=True, positions=positions, ) return inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C def sentence_forward(self, encoder_out, src_tokens=None, sentence_rep="head"): # encoder_out: B x T x C if sentence_rep == "head": x = encoder_out[:, :1, :] else: # 'meanpool', 'maxpool' assert src_tokens is not None, "meanpool requires src_tokens input" segment_labels = self.get_segment_labels(src_tokens) padding_mask = src_tokens.ne(self.padding_idx) encoder_mask = segment_labels * padding_mask.type_as(segment_labels) if sentence_rep == "meanpool": ntokens = torch.sum(encoder_mask, dim=1, keepdim=True) x = torch.sum( encoder_out * encoder_mask.unsqueeze(2), dim=1, keepdim=True ) / ntokens.unsqueeze(2).type_as(encoder_out) else: # 'maxpool' encoder_out[ (encoder_mask == 0).unsqueeze(2).repeat(1, 1, encoder_out.shape[-1]) ] = -float("inf") x, _ = torch.max(encoder_out, dim=1, keepdim=True) if hasattr(self, "transform_layer"): x = self.transform_layer(x) return x # B x 1 x C def joint_forward(self, x): # x: T x B x C if self.joint_layer_norm: x = self.joint_layer_norm(x.transpose(0, 1)) x = x.transpose(0, 1) for layer in self.joint_layers: x, _ = layer(x, self_attn_padding_mask=None) return x def classification_forward(self, x): # x: B x T x C return self.classifier(x) @dataclass class DiscriminativeNMTRerankerConfig(FairseqDataclass): pretrained_model: str = field( default="", metadata={"help": "pretrained model to load"} ) sentence_rep: SENTENCE_REP_CHOICES = field( default="head", metadata={ "help": "method to transform the output of the transformer stack to a sentence-level representation" }, ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"} ) classifier_dropout: float = field( default=0.0, metadata={"help": "classifier dropout probability"} ) embed_dim: int = field(default=768, metadata={"help": "embedding dimension"}) ffn_embed_dim: int = field( default=2048, metadata={"help": "embedding dimension for FFN"} ) encoder_layers: int = field(default=12, metadata={"help": "num encoder layers"}) attention_heads: int = field(default=8, metadata={"help": "num attention heads"}) encoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each encoder block"} ) apply_bert_init: bool = field( default=False, metadata={"help": "use custom param initialization for BERT"} ) activation_fn: ACTIVATION_FN_CHOICES = field( default="relu", metadata={"help": "activation function to use"} ) freeze_embeddings: bool = field( default=False, metadata={"help": "freeze embeddings in the pretrained model"} ) n_trans_layers_to_freeze: int = field( default=0, metadata={ "help": "number of layers to freeze in the pretrained transformer model" }, ) # joint classfication joint_classification: JOINT_CLASSIFICATION_CHOICES = field( default="none", metadata={"help": "method to compute joint features for classification"}, ) num_joint_layers: int = field( default=1, metadata={"help": "number of joint layers"} ) joint_normalize_before: bool = field( default=False, metadata={"help": "apply layer norm on the input to the joint layer"}, ) @register_model( "discriminative_nmt_reranker", dataclass=DiscriminativeNMTRerankerConfig ) class DiscriminativeNMTReranker(BaseFairseqModel): @classmethod def build_model(cls, args, task): model = BertRanker(args, task) return DiscriminativeNMTReranker(args, model) def __init__(self, args, model): super().__init__() self.model = model self.sentence_rep = args.sentence_rep self.joint_classification = args.joint_classification def forward(self, src_tokens, src_lengths, **kwargs): return self.model(src_tokens, src_lengths) def sentence_forward(self, encoder_out, src_tokens): return self.model.sentence_forward(encoder_out, src_tokens, self.sentence_rep) def joint_forward(self, x): return self.model.joint_forward(x) def classification_forward(self, x): return self.model.classification_forward(x)
13,714
36.472678
112
py
sign-topic
sign-topic-main/examples/discriminative_reranking_nmt/tasks/discriminative_reranking_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import itertools import logging import os import numpy as np import torch from fairseq import metrics from fairseq.data import ( ConcatDataset, ConcatSentencesDataset, data_utils, Dictionary, IdDataset, indexed_dataset, NestedDictionaryDataset, NumSamplesDataset, NumelDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, SortDataset, TruncateDataset, TokenBlockDataset, ) from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.tasks import FairseqTask, register_task from omegaconf import II, MISSING EVAL_BLEU_ORDER = 4 TARGET_METRIC_CHOICES = ChoiceEnum(["bleu", "ter"]) logger = logging.getLogger(__name__) @dataclass class DiscriminativeRerankingNMTConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={"help": "path to data directory"}) num_data_splits: int = field( default=1, metadata={"help": "total number of data splits"} ) no_shuffle: bool = field( default=False, metadata={"help": "do not shuffle training data"} ) max_positions: int = field( default=512, metadata={"help": "number of positional embeddings to learn"} ) include_src: bool = field( default=False, metadata={"help": "include source sentence"} ) mt_beam: int = field(default=50, metadata={"help": "beam size of input hypotheses"}) eval_target_metric: bool = field( default=False, metadata={"help": "evaluation with the target metric during validation"}, ) target_metric: TARGET_METRIC_CHOICES = field( default="bleu", metadata={"help": "name of the target metric to optimize for"} ) train_subset: str = field( default=II("dataset.train_subset"), metadata={"help": "data subset to use for training (e.g. train, valid, test)"}, ) seed: int = field( default=II("common.seed"), metadata={"help": "pseudo random number generator seed"}, ) class RerankerScorer(object): """Scores the target for a given (source (optional), target) input.""" def __init__(self, args, mt_beam): self.mt_beam = mt_beam @torch.no_grad() def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample["net_input"] assert len(models) == 1, "does not support model ensemble" model = models[0] bs = net_input["src_tokens"].shape[0] assert ( model.joint_classification == "none" or bs % self.mt_beam == 0 ), f"invalid batch size ({bs}) for joint classification with beam size ({self.mt_beam})" model.eval() logits = model(**net_input) batch_out = model.sentence_forward(logits, net_input["src_tokens"]) if model.joint_classification == "sent": batch_out = model.joint_forward( batch_out.view(self.mt_beam, bs // self.mt_beam, -1) ) scores = model.classification_forward( batch_out.view(bs, 1, -1) ) # input: B x T x C return scores @register_task( "discriminative_reranking_nmt", dataclass=DiscriminativeRerankingNMTConfig ) class DiscriminativeRerankingNMTTask(FairseqTask): """ Translation rerank task. The input can be either (src, tgt) sentence pairs or tgt sentence only. """ cfg: DiscriminativeRerankingNMTConfig def __init__(self, cfg: DiscriminativeRerankingNMTConfig, data_dictionary=None): super().__init__(cfg) self.dictionary = data_dictionary self._max_positions = cfg.max_positions # args.tokens_per_sample = self._max_positions # self.num_classes = 1 # for model @classmethod def load_dictionary(cls, cfg, filename): """Load the dictionary from the filename""" dictionary = Dictionary.load(filename) dictionary.add_symbol("<mask>") # for loading pretrained XLMR model return dictionary @classmethod def setup_task(cls, cfg: DiscriminativeRerankingNMTConfig, **kwargs): # load data dictionary (assume joint dictionary) data_path = cfg.data data_dict = cls.load_dictionary( cfg, os.path.join(data_path, "input_src/dict.txt") ) logger.info("[input] src dictionary: {} types".format(len(data_dict))) return DiscriminativeRerankingNMTTask(cfg, data_dict) def load_dataset(self, split, epoch=0, combine=False, **kwargs): """Load a given dataset split (e.g., train, valid, test).""" if self.cfg.data.endswith("1"): data_shard = (epoch - 1) % self.cfg.num_data_splits + 1 data_path = self.cfg.data[:-1] + str(data_shard) else: data_path = self.cfg.data def get_path(type, data_split): return os.path.join(data_path, str(type), data_split) def make_dataset(type, dictionary, data_split, combine): split_path = get_path(type, data_split) dataset = data_utils.load_indexed_dataset( split_path, dictionary, combine=combine, ) return dataset def load_split(data_split, metric): input_src = None if self.cfg.include_src: input_src = make_dataset( "input_src", self.dictionary, data_split, combine=False ) assert input_src is not None, "could not find dataset: {}".format( get_path("input_src", data_split) ) input_tgt = make_dataset( "input_tgt", self.dictionary, data_split, combine=False ) assert input_tgt is not None, "could not find dataset: {}".format( get_path("input_tgt", data_split) ) label_path = f"{get_path(metric, data_split)}.{metric}" assert os.path.exists(label_path), f"could not find dataset: {label_path}" np_labels = np.loadtxt(label_path) if self.cfg.target_metric == "ter": np_labels = -np_labels label = RawLabelDataset(np_labels) return input_src, input_tgt, label src_datasets = [] tgt_datasets = [] label_datasets = [] if split == self.cfg.train_subset: for k in itertools.count(): split_k = "train" + (str(k) if k > 0 else "") prefix = os.path.join(data_path, "input_tgt", split_k) if not indexed_dataset.dataset_exists(prefix, impl=None): if k > 0: break else: raise FileNotFoundError(f"Dataset not found: {prefix}") input_src, input_tgt, label = load_split( split_k, self.cfg.target_metric ) src_datasets.append(input_src) tgt_datasets.append(input_tgt) label_datasets.append(label) else: input_src, input_tgt, label = load_split(split, self.cfg.target_metric) src_datasets.append(input_src) tgt_datasets.append(input_tgt) label_datasets.append(label) if len(tgt_datasets) == 1: input_tgt, label = tgt_datasets[0], label_datasets[0] if self.cfg.include_src: input_src = src_datasets[0] else: input_tgt = ConcatDataset(tgt_datasets) label = ConcatDataset(label_datasets) if self.cfg.include_src: input_src = ConcatDataset(src_datasets) input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions) if self.cfg.include_src: input_src = PrependTokenDataset(input_src, self.dictionary.bos()) input_src = TruncateDataset(input_src, self.cfg.max_positions) src_lengths = NumelDataset(input_src, reduce=False) src_tokens = ConcatSentencesDataset(input_src, input_tgt) else: src_tokens = PrependTokenDataset(input_tgt, self.dictionary.bos()) src_lengths = NumelDataset(src_tokens, reduce=False) dataset = { "id": IdDataset(), "net_input": { "src_tokens": RightPadDataset( src_tokens, pad_idx=self.source_dictionary.pad(), ), "src_lengths": src_lengths, }, "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_tokens, reduce=True), "target": label, } dataset = NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes],) assert len(dataset) % self.cfg.mt_beam == 0, ( "dataset size (%d) is not a multiple of beam size (%d)" % (len(dataset), self.cfg.mt_beam) ) # no need to shuffle valid/test sets if not self.cfg.no_shuffle and split == self.cfg.train_subset: # need to keep all hypothese together start_idx = np.arange(0, len(dataset), self.cfg.mt_beam) with data_utils.numpy_seed(self.cfg.seed + epoch): np.random.shuffle(start_idx) idx = np.arange(0, self.cfg.mt_beam) shuffle = np.tile(idx, (len(start_idx), 1)).reshape(-1) + np.tile( start_idx, (self.cfg.mt_beam, 1) ).transpose().reshape(-1) dataset = SortDataset(dataset, sort_order=[shuffle],) logger.info(f"Loaded {split} with #samples: {len(dataset)}") self.datasets[split] = dataset return self.datasets[split] def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): assert not self.cfg.include_src or len(src_tokens[0]) == 2 input_src = None if self.cfg.include_src: input_src = TokenBlockDataset( [t[0] for t in src_tokens], [l[0] for l in src_lengths], block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ) input_src = PrependTokenDataset(input_src, self.dictionary.bos()) input_src = TruncateDataset(input_src, self.cfg.max_positions) input_tgt = TokenBlockDataset( [t[-1] for t in src_tokens], [l[-1] for l in src_lengths], block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ) input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions) if self.cfg.include_src: src_tokens = ConcatSentencesDataset(input_src, input_tgt) src_lengths = NumelDataset(input_src, reduce=False) else: input_tgt = PrependTokenDataset(input_tgt, self.dictionary.bos()) src_tokens = input_tgt src_lengths = NumelDataset(src_tokens, reduce=False) dataset = { "id": IdDataset(), "net_input": { "src_tokens": RightPadDataset( src_tokens, pad_idx=self.source_dictionary.pad(), ), "src_lengths": src_lengths, }, "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_tokens, reduce=True), } return NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes],) def build_model(self, cfg: FairseqDataclass): return super().build_model(cfg) def build_generator(self, args): return RerankerScorer(args, mt_beam=self.cfg.mt_beam) def max_positions(self): return self._max_positions @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary def create_dummy_batch(self, device): dummy_target = ( torch.zeros(self.cfg.mt_beam, EVAL_BLEU_ORDER * 2 + 3).long().to(device) if not self.cfg.eval_ter else torch.zeros(self.cfg.mt_beam, 3).long().to(device) ) return { "id": torch.zeros(self.cfg.mt_beam, 1).long().to(device), "net_input": { "src_tokens": torch.zeros(self.cfg.mt_beam, 4).long().to(device), "src_lengths": torch.ones(self.cfg.mt_beam, 1).long().to(device), }, "nsentences": 0, "ntokens": 0, "target": dummy_target, } def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): if ignore_grad and sample is None: sample = self.create_dummy_batch(model.device) return super().train_step( sample, model, criterion, optimizer, update_num, ignore_grad ) def valid_step(self, sample, model, criterion): if sample is None: sample = self.create_dummy_batch(model.device) loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if not self.cfg.eval_target_metric: return loss, sample_size, logging_output scores = logging_output["scores"] if self.cfg.target_metric == "bleu": assert sample["target"].shape[1] == EVAL_BLEU_ORDER * 2 + 3, ( "target does not contain enough information (" + str(sample["target"].shape[1]) + "for evaluating BLEU" ) max_id = torch.argmax(scores, dim=1) select_id = max_id + torch.arange( 0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam ).to(max_id.device) bleu_data = sample["target"][select_id, 1:].sum(0).data logging_output["_bleu_sys_len"] = bleu_data[0] logging_output["_bleu_ref_len"] = bleu_data[1] for i in range(EVAL_BLEU_ORDER): logging_output["_bleu_counts_" + str(i)] = bleu_data[2 + i] logging_output["_bleu_totals_" + str(i)] = bleu_data[ 2 + EVAL_BLEU_ORDER + i ] elif self.cfg.target_metric == "ter": assert sample["target"].shape[1] == 3, ( "target does not contain enough information (" + str(sample["target"].shape[1]) + "for evaluating TER" ) max_id = torch.argmax(scores, dim=1) select_id = max_id + torch.arange( 0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam ).to(max_id.device) ter_data = sample["target"][select_id, 1:].sum(0).data logging_output["_ter_num_edits"] = -ter_data[0] logging_output["_ter_ref_len"] = -ter_data[1] return loss, sample_size, logging_output def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if not self.cfg.eval_target_metric: return def sum_logs(key): return sum(log.get(key, 0) for log in logging_outputs) if self.cfg.target_metric == "bleu": counts, totals = [], [] for i in range(EVAL_BLEU_ORDER): counts.append(sum_logs("_bleu_counts_" + str(i))) totals.append(sum_logs("_bleu_totals_" + str(i))) if max(totals) > 0: # log counts as numpy arrays -- log_scalar will sum them correctly metrics.log_scalar("_bleu_counts", np.array(counts)) metrics.log_scalar("_bleu_totals", np.array(totals)) metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len")) metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len")) def compute_bleu(meters): import inspect import sacrebleu fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0] if "smooth_method" in fn_sig: smooth = {"smooth_method": "exp"} else: smooth = {"smooth": "exp"} bleu = sacrebleu.compute_bleu( correct=meters["_bleu_counts"].sum, total=meters["_bleu_totals"].sum, sys_len=meters["_bleu_sys_len"].sum, ref_len=meters["_bleu_ref_len"].sum, **smooth, ) return round(bleu.score, 2) metrics.log_derived("bleu", compute_bleu) elif self.cfg.target_metric == "ter": num_edits = sum_logs("_ter_num_edits") ref_len = sum_logs("_ter_ref_len") if ref_len > 0: metrics.log_scalar("_ter_num_edits", num_edits) metrics.log_scalar("_ter_ref_len", ref_len) def compute_ter(meters): score = meters["_ter_num_edits"].sum / meters["_ter_ref_len"].sum return round(score.item(), 2) metrics.log_derived("ter", compute_ter)
17,491
35.747899
96
py
sign-topic
sign-topic-main/examples/pointer_generator/pointer_generator_src/transformer_pg.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Any, Dict, Optional, List, Tuple import torch import torch.nn as nn from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture, ) from torch import Tensor logger = logging.getLogger(__name__) @register_model("transformer_pointer_generator") class TransformerPointerGeneratorModel(TransformerModel): """ Transformer model from `"Attention Is All You Need" (Vaswani et al, 2017) <https://arxiv.org/abs/1706.03762>`_, augmented with a pointer-generator network from `"Get To The Point: Summarization with Pointer-Generator Networks" (See et al, 2017) <https://arxiv.org/abs/1704.04368>`_. Args: encoder (TransformerPointerGeneratorEncoder): the encoder decoder (TransformerPointerGeneratorDecoder): the decoder The Transformer pointer-generator model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_pointer_generator_parser :prog: """ @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off TransformerModel.add_args(parser) parser.add_argument('--alignment-heads', type=int, metavar='N', help='number of attention heads to be used for ' 'pointing') parser.add_argument('--alignment-layer', type=int, metavar='I', help='layer number to be used for pointing (0 ' 'corresponding to the bottommost layer)') parser.add_argument('--source-position-markers', type=int, metavar='N', help='dictionary includes N additional items that ' 'represent an OOV token at a particular input ' 'position') parser.add_argument('--force-generation', type=float, metavar='P', default=None, help='set the vocabulary distribution weight to P, ' 'instead of predicting it from the input (1.0 ' 'corresponding to generation, 0.0 to pointing)') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if getattr(args, "max_target_positions", None) is None: args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS if getattr(args, "source_position_markers", None) is None: args.source_position_markers = args.max_source_positions src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if src_dict != tgt_dict: raise ValueError("Pointer-generator requires a joined dictionary") def build_embedding(dictionary, embed_dim, path=None): # The dictionary may include additional items that can be used in # place of the normal OOV token and that all map to the same # embedding. Using a different token for each input position allows # one to restore the word identities from the original source text. num_embeddings = len(dictionary) - args.source_position_markers padding_idx = dictionary.pad() unk_idx = dictionary.unk() logger.info( "dictionary indices from {0} to {1} will be mapped to {2}".format( num_embeddings, len(dictionary) - 1, unk_idx ) ) emb = Embedding(num_embeddings, embed_dim, padding_idx, unk_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return cls(args, encoder, decoder) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerPointerGeneratorEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerPointerGeneratorDecoder(args, tgt_dict, embed_tokens) class TransformerPointerGeneratorEncoder(TransformerEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. The pointer-generator variant adds the source tokens to the encoder output as these are otherwise not passed to the decoder. """ def forward( self, src_tokens, src_lengths: Optional[Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[Tensor] = None ): """ Runs the `forward()` method of the parent Transformer class. Then adds the source tokens into the encoder output tuple. While it might be more elegant that the model would pass the source tokens to the `forward()` method of the decoder too, this would require changes to `SequenceGenerator`. Args: src_tokens (torch.LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: namedtuple: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. - **src_tokens** (Tensor): input token ids of shape `(batch, src_len)` """ encoder_out = self.forward_scriptable(src_tokens, src_lengths, return_all_hiddens, token_embeddings) # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `forward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. return { "encoder_out": encoder_out["encoder_out"], # T x B x C "encoder_padding_mask": encoder_out["encoder_padding_mask"], # B x T "encoder_embedding": encoder_out["encoder_embedding"], # B x T x C "encoder_states": encoder_out["encoder_states"], # List[T x B x C] "src_tokens": [src_tokens], # B x T "src_lengths": [], } class TransformerPointerGeneratorDecoder(TransformerDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. The pointer-generator variant mixes the output probabilities with an attention distribution in the output layer. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False) # In the pointer-generator model these arguments define the decoder # layer and the number of attention heads that will be averaged to # create the alignment for pointing. self.alignment_heads = args.alignment_heads self.alignment_layer = args.alignment_layer input_embed_dim = embed_tokens.embedding_dim # Generation probabilities / interpolation coefficients are predicted # from the current decoder input embedding and the decoder output, which # is the size of output_embed_dim. p_gen_input_size = input_embed_dim + self.output_embed_dim self.project_p_gens = nn.Linear(p_gen_input_size, 1) nn.init.zeros_(self.project_p_gens.bias) # The dictionary may include a separate entry for an OOV token in each # input position, so that their identity can be restored from the # original source text. self.num_types = len(dictionary) self.num_oov_types = args.source_position_markers self.num_embeddings = self.num_types - self.num_oov_types self.force_p_gen = args.force_generation def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, alignment_layer: Optional[int] = 0, alignment_heads: Optional[int] = 1, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict, optional): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False) alignment_layer (int, optional): 0-based index of the layer to be used for pointing (default: 0) alignment_heads (int, optional): number of attention heads to be used for pointing (default: 1) Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ # The normal Transformer model doesn't pass the alignment_layer and # alignment_heads parameters correctly. We use our local variables. x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, alignment_layer=self.alignment_layer, alignment_heads=self.alignment_heads, ) if not features_only: # Embedding the tokens again for generation probability prediction, # so that we don't have to reimplement the whole extract_features() # method. if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] prev_output_embed = self.embed_tokens(prev_output_tokens) prev_output_embed *= self.embed_scale predictors = torch.cat((prev_output_embed, x), 2) p_gens = self.project_p_gens(predictors) p_gens = torch.sigmoid(p_gens.float()) # Torchscript complains if encoder_out or attn are None because # `output_layer()` signature expects tensors instead attn: Optional[Tensor] = extra["attn"][0] assert encoder_out is not None assert attn is not None x = self.output_layer(x, attn, encoder_out["src_tokens"][0], p_gens) return x, extra def output_layer( self, features: Tensor, attn: Tensor, src_tokens: Tensor, p_gens: Tensor ) -> Tensor: """ Project features to the vocabulary size and mix with the attention distributions. """ if self.force_p_gen is not None: p_gens = self.force_p_gen # project back to size of vocabulary if self.adaptive_softmax is None: logits = self.output_projection(features) else: logits = features batch_size = logits.shape[0] output_length = logits.shape[1] assert logits.shape[2] == self.num_embeddings assert src_tokens.shape[0] == batch_size src_length = src_tokens.shape[1] # The final output distribution will be a mixture of the normal output # distribution (softmax of logits) and attention weights. gen_dists = self.get_normalized_probs_scriptable( (logits, None), log_probs=False, sample=None ) gen_dists = torch.mul(gen_dists, p_gens) padding_size = (batch_size, output_length, self.num_oov_types) padding = gen_dists.new_zeros(padding_size) gen_dists = torch.cat((gen_dists, padding), 2) assert gen_dists.shape[2] == self.num_types # Scatter attention distributions to distributions over the extended # vocabulary in a tensor of shape [batch_size, output_length, # vocab_size]. Each attention weight will be written into a location # that is for other dimensions the same as in the index tensor, but for # the third dimension it's the value of the index tensor (the token ID). attn = torch.mul(attn.float(), 1 - p_gens) index = src_tokens[:, None, :] index = index.expand(batch_size, output_length, src_length) attn_dists_size = (batch_size, output_length, self.num_types) attn_dists = attn.new_zeros(attn_dists_size) attn_dists.scatter_add_(2, index, attn.float()) # Final distributions, [batch_size, output_length, num_types]. return gen_dists + attn_dists def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """ Get normalized probabilities (or log probs) from a net's output. Pointer-generator network output is already normalized. """ probs = net_output[0] # Make sure the probabilities are greater than zero when returning log # probabilities. return probs.clamp(1e-10, 1.0).log() if log_probs else probs class Embedding(nn.Embedding): r"""A simple lookup table that stores embeddings of a fixed dictionary and size. This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings. This subclass differs from the standard PyTorch Embedding class by allowing additional vocabulary entries that will be mapped to the unknown token embedding. Args: num_embeddings (int): size of the dictionary of embeddings embedding_dim (int): the size of each embedding vector padding_idx (int): Pads the output with the embedding vector at :attr:`padding_idx` (initialized to zeros) whenever it encounters the index. unk_idx (int): Maps all token indices that are greater than or equal to num_embeddings to this index. Attributes: weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim) initialized from :math:`\mathcal{N}(0, 1)` Shape: - Input: :math:`(*)`, LongTensor of arbitrary shape containing the indices to extract - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}` .. note:: Keep in mind that only a limited number of optimizers support sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`), :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`) .. note:: With :attr:`padding_idx` set, the embedding vector at :attr:`padding_idx` is initialized to all zeros. However, note that this vector can be modified afterwards, e.g., using a customized initialization method, and thus changing the vector used to pad the output. The gradient for this vector from :class:`~torch.nn.Embedding` is always zero. """ __constants__ = ["unk_idx"] # Torchscript: Inheriting from Embedding class produces an error when exporting to Torchscript # -> RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details # It's happening because max_norm attribute from nn.Embedding is None by default and it cannot be # cast to a C++ type def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int], unk_idx: int, max_norm: Optional[float] = float("inf"), ): super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx, max_norm=max_norm) self.unk_idx = unk_idx nn.init.normal_(self.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(self.weight[padding_idx], 0) def forward(self, input): input = torch.where( input >= self.num_embeddings, torch.ones_like(input) * self.unk_idx, input ) return nn.functional.embedding( input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse ) @register_model_architecture( "transformer_pointer_generator", "transformer_pointer_generator" ) def transformer_pointer_generator(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", -1) base_architecture(args) if args.alignment_layer < 0: args.alignment_layer = args.decoder_layers + args.alignment_layer @register_model_architecture( "transformer_pointer_generator", "transformer_pointer_generator_iwslt_de_en" ) def transformer_pointer_generator_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) transformer_pointer_generator(args) @register_model_architecture( "transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de" ) def transformer_pointer_generator_wmt_en_de(args): transformer_pointer_generator(args) # Transformer pointer-generator with the base Transformer parameters as used in # the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "transformer_pointer_generator", "transformer_pointer_generator_vaswani_wmt_en_de_big", ) def transformer_pointer_generator_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) transformer_pointer_generator(args) @register_model_architecture( "transformer_pointer_generator", "transformer_pointer_generator_vaswani_wmt_en_fr_big", ) def transformer_pointer_generator_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) transformer_pointer_generator_vaswani_wmt_en_de_big(args) @register_model_architecture( "transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big" ) def transformer_pointer_generator_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_pointer_generator_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture( "transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big_t2t" ) def transformer_pointer_generator_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) transformer_pointer_generator_vaswani_wmt_en_de_big(args)
23,439
44.163776
102
py
sign-topic
sign-topic-main/examples/speech_text_joint_to_text/criterions/text_guide_cross_entropy_acc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss from fairseq import metrics, utils @register_criterion("guided_label_smoothed_cross_entropy_with_accuracy") class GuidedCrossEntAccCriterion(FairseqCriterion): def __init__( self, task, sentence_avg, guide_alpha, text_input_cost_ratio, label_smoothing, disable_text_guide_update_num=0, attentive_cost_regularization=0, ): """ guide_alpha: alpha to inteplate nll and kd loss text_input_cost_ratio: loss ratio for text only input data label_smoothing: label smoothing ratio disable_text_guide_update_num: only use nll loss for the first N updates attentive_cost_regularization: ratio fo attentive cost """ super().__init__(task) self.alpha = guide_alpha self.attn_beta = attentive_cost_regularization self.sentence_avg = sentence_avg self.eps = label_smoothing self.text_input_cost_ratio = text_input_cost_ratio self.disable_update_num = disable_text_guide_update_num assert self.alpha >= 0 and self.alpha <= 1.0 @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--label-smoothing', default=0., type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing') # fmt: off parser.add_argument('--guide-alpha', default=0., type=float, metavar='D', help='alpha to merge kd cost from text to speech input with ce loss') # fmt: off parser.add_argument('--disable-text-guide-update-num', default=0, type=int, metavar='D', help='disable guided target from text for the first N updates.') parser.add_argument("--attentive-cost-regularization", default=0.0, type=float, metavar='D', help="use encoder attentive loss regularization with cost ratio D") parser.add_argument("--attentive-cost-without-normalize", action='store_true', help="Don't do normalization during attentive cost computation") def forward(self, model, sample, reduce=True): reduction = 'sum' if reduce else 'none' net_input = sample["net_input"] net_output = model(**net_input) attn_cost = None lprobs = model.get_normalized_probs(net_output, log_probs=True) is_dual_input = True if net_input['src_tokens'] is not None and net_input.get('src_txt_tokens') is not None else False target = model.get_targets(sample, net_output) src_token_num = 0 if is_dual_input: # lprobs_spch from speech encoder and lprobs_text from text encoder lprobs_spch, lprobs_text = torch.chunk(lprobs, 2) lprobs_spch.batch_first = lprobs.batch_first lprobs_text.batch_first = lprobs.batch_first speech_loss, speech_nll_loss, speech_correct, speech_total = \ self.guide_loss_and_acc(model, lprobs_spch, lprobs_text, target, reduce=(reduction == 'sum')) text_loss, text_nll_loss, text_correct, text_total = self.compute_loss_and_acc(model, lprobs_text, target, reduction=reduction) loss = (speech_loss + text_loss) nll_loss = (speech_nll_loss + text_nll_loss) correct = speech_correct + text_correct total = speech_total + text_total attn_cost = net_output[1].get('attn_cost') if attn_cost is not None: # attn_cost is batch_first and padding tokens have been masked already src_token_num = attn_cost.ne(0).sum() attn_cost = attn_cost.sum() loss = loss + attn_cost * self.attn_beta else: attn_cost = 0 else: loss, nll_loss, correct, total = self.compute_loss_and_acc(model, lprobs, target, reduction=reduction) if sample["net_input"]['src_tokens'] is None: # text input only loss = loss * self.text_input_cost_ratio speech_loss = None speech_nll_loss = None sample_size, logging_output = self.get_logging_output( sample, loss, nll_loss, correct, total, src_token_num, speech_loss, speech_nll_loss, attn_cost, is_dual_input ) return loss, sample_size, logging_output def compute_loss_and_acc(self, model, lprobs, target, reduction='sum'): if not lprobs.batch_first: lprobs = lprobs.transpose(0, 1) lprobs = lprobs.view(-1, lprobs.size(-1)) # -> (B x T) x C target = target.view(-1) loss, nll_loss = label_smoothed_nll_loss( lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=(reduction == 'sum'), ) mask = target.ne(self.padding_idx) correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))) total = torch.sum(mask) return loss, nll_loss, correct, total def guide_loss_and_acc(self, model, lprobs, lprobs_teacher, target, reduce=True): """ lprobs_teacher is used as guide for lprobs """ if self.alpha == 0.0 or model.num_updates < self.disable_update_num: return self.compute_loss_and_acc(model, lprobs, target, reduction=('sum' if reduce else 'none')) if not lprobs.batch_first: lprobs = lprobs.transpose(0, 1) lprobs_teacher = lprobs_teacher.transpose(0, 1) lprobs = lprobs.view(-1, lprobs.size(-1)).float() # -> (B x T) x C lprobs_teacher = lprobs_teacher.view(-1, lprobs_teacher.size(-1)).float() # -> (B x T) x C target = target.view(-1) loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction='sum' if reduce else 'none') nll_loss = loss probs_teacher = lprobs_teacher.exp().masked_fill_(target.unsqueeze(-1).eq(self.padding_idx), 0) probs_teacher = probs_teacher.detach() guide_loss = -(probs_teacher*lprobs).sum() if reduce else -(probs_teacher*lprobs).sum(-1, keepdim=True) loss = self.alpha*guide_loss + (1.0 - self.alpha)*loss mask = target.ne(self.padding_idx) correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))) total = torch.sum(mask) return loss, nll_loss, correct, total def get_logging_output( self, sample, loss, nll_loss, correct, total, src_token_num=0, speech_loss=None, speech_nll_loss=None, attn_cost=None, is_dual_input=False, ): sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) mul_size = 2 if is_dual_input else 1 logging_output = { "loss": utils.item(loss.data), # * sample['ntokens'], "nll_loss": utils.item(nll_loss.data), # * sample['ntokens'], "ntokens": sample["ntokens"]*mul_size, "nsentences": sample["target"].size(0)*mul_size, "sample_size": sample_size*mul_size, "correct": utils.item(correct.data), "total": utils.item(total.data), "src_token_num": utils.item(src_token_num.data) if src_token_num > 0 else 0, "nframes": torch.sum(sample["net_input"]["src_lengths"]).item(), } if speech_loss is not None: logging_output["speech_loss"] = utils.item(speech_loss.data) logging_output["speech_nll_loss"] = utils.item(speech_nll_loss.data) logging_output["sample_size_speech_cost"] = sample_size logging_output["speech_attn_loss"] = attn_cost return sample_size*mul_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" correct_sum = sum(log.get("correct", 0) for log in logging_outputs) total_sum = sum(log.get("total", 0) for log in logging_outputs) src_token_sum = sum(log.get("src_token_num", 0) for log in logging_outputs) loss_sum = sum(log.get("loss", 0) for log in logging_outputs) nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) nframes = sum(log.get("nframes", 0) for log in logging_outputs) speech_loss_sum = sum(log.get("speech_loss", 0) for log in logging_outputs) speech_nll_loss_sum = sum(log.get("speech_nll_loss", 0) for log in logging_outputs) speech_attn_loss_sum = sum(log.get("speech_attn_loss", 0) for log in logging_outputs) sample_size_speech = sum(log.get("sample_size_speech_cost", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0, "nll_loss": nll_loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0, # if args.sentence_avg, then sample_size is nsentences, and loss # is per-sentence loss; else sample_size is ntokens, and the loss # becomes per-output token loss "speech_loss": speech_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0, "speech_nll_loss": speech_nll_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0, "speech_attn_loss": speech_attn_loss_sum / src_token_sum / math.log(2) if src_token_sum > 0 else 0.0, "ntokens": ntokens, "nsentences": nsentences, "nframes": nframes, "sample_size": sample_size, "acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0, "correct": correct_sum, "total": total_sum, "src_token_num": src_token_sum, # total is the number of validate tokens } return agg_output @classmethod def reduce_metrics(cls, logging_outputs): """Aggregate logging outputs from data parallel training.""" agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs) for k, v in agg_logging_outputs.items(): if k in {'nsentences', 'ntokens', 'sample_size'}: continue metrics.log_scalar(k, v, round=3)
11,004
48.129464
139
py
sign-topic
sign-topic-main/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from collections import namedtuple import torch import torch.nn as nn from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.speech_to_text import ( TransformerDecoder, S2TTransformerEncoder, ) from fairseq.models.transformer import TransformerEncoder from fairseq.modules import ( TransformerEncoderLayer, GradMultiply, LayerNorm, ) logger = logging.getLogger(__name__) class SpeechEoSEncoder(FairseqEncoder): def __init__(self, encoder, eos_num, feat_dim, adapter_type="None", adapter_dim=0): super().__init__(None) self.encoder = encoder self.eos_num = eos_num # downsampling rate for speech input feature self.eos_emb = ( nn.Parameter(torch.zeros(1, feat_dim), requires_grad=True) if eos_num > 0 else None ) self.adapter = self.add_adapter(adapter_type, adapter_dim) def add_adapter(self, adapter_type, adapter_dim): def _make_identity(linear, eps=1e-5): assert isinstance(linear, nn.Linear) linear.weight.data.mul_(eps) linear.weight.data.fill_diagonal_(1.0) if linear.bias is not None: linear.bias.data.mul_(eps) adapter = None if adapter_type == "Linear": assert adapter_dim > 0 adapter = nn.Sequential( nn.Linear(adapter_dim, adapter_dim), LayerNorm(adapter_dim) ) # initialize the adapter as identity matrix first _make_identity(adapter[0]) elif adapter_type == "MLP": assert adapter_dim > 0 # assume the model is pre-norm model adapter = nn.Sequential( nn.Linear(adapter_dim, 2 * adapter_dim), nn.ReLU(), nn.Linear(2 * adapter_dim, adapter_dim), LayerNorm(adapter_dim), ) _make_identity(adapter[0]) _make_identity(adapter[2]) return adapter def add_eos(self, src_tokens, src_lengths): bsz, max_seq_len, fdim = src_tokens.size() if self.eos_num > 0: src_token_eos = torch.zeros( [bsz, max_seq_len + self.eos_num, fdim], dtype=src_tokens.dtype, device=src_tokens.device, ) src_token_eos[:, :max_seq_len] = src_tokens for bi in range(bsz): src_token_eos[bi][ src_lengths[bi] : src_lengths[bi] + self.eos_num ] = self.eos_emb.expand(self.eos_num, fdim) src_lengths = src_lengths + self.eos_num src_tokens = src_token_eos return src_tokens, src_lengths def apply_adapter(self, enc_out): if self.adapter is None: return enc_out rst = self.adapter(enc_out.encoder_out) if enc_out.encoder_padding_mask is not None: rst.masked_fill_( enc_out.encoder_padding_mask.transpose(0, 1).unsqueeze(-1), 0 ) return EncoderOut( encoder_out=rst, encoder_padding_mask=enc_out.encoder_padding_mask, encoder_embedding=enc_out.encoder_embedding, encoder_states=enc_out.encoder_states, src_tokens=enc_out.src_tokens, src_lengths=enc_out.src_lengths, ) def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs): """ src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ src_tokens, src_lengths = self.add_eos(src_tokens, src_lengths) enc_out = self.encoder(src_tokens, src_lengths, return_all_hiddens) enc_out = self.apply_adapter(enc_out) return enc_out def reorder_encoder_out(self, encoder_out, new_order): return self.encoder.reorder_encoder_out(encoder_out, new_order) class DualInputEncoder(FairseqEncoder): def __init__( self, args, spch_encoder, text_encoder, dictionary, cross_attentive_loss_before_last_layer=-1, ): super().__init__(dictionary) self.spch_encoder = spch_encoder self.text_encoder = text_encoder self.enc_grad_mult = args.enc_grad_mult self.cross_attentive_loss_before_last_layer = ( cross_attentive_loss_before_last_layer ) self.use_cross_attentive_loss = ( False if cross_attentive_loss_before_last_layer <= -1 else True ) self.enc2_along_grad_mult = args.enc2_along_grad_mult @classmethod def set_shared_layer(cls, share_level, src_layer, tgt_layer): """ share parameters from tgt_layer to src_layer share_level: 0: share everything 1: share everything but different model 2: share weight but not bias, layernorm """ if share_level == 0: return tgt_layer if isinstance(src_layer, nn.Linear): return tgt_layer if isinstance(src_layer, TransformerEncoderLayer): assert src_layer.embed_dim == tgt_layer.embed_dim assert src_layer.normalize_before == tgt_layer.normalize_before if share_level == 1: src_layer.fc1 = tgt_layer.fc1 src_layer.fc2 = tgt_layer.fc2 src_layer.self_attn = tgt_layer.self_attn src_layer.final_layer_norm = tgt_layer.final_layer_norm src_layer.self_attn_layer_norm = tgt_layer.self_attn_layer_norm src_layer.layernorm_embedding = tgt_layer.layernorm_embedding else: src_layer.fc1.weight = tgt_layer.fc1.weight src_layer.fc2.weight = tgt_layer.fc2.weight src_layer.self_attn.k_proj.weight = tgt_layer.self_attn.k_proj.weight src_layer.self_attn.v_proj.weight = tgt_layer.self_attn.v_proj.weight src_layer.self_attn.q_proj.weight = tgt_layer.self_attn.q_proj.weight src_layer.self_attn.out_proj.weight = ( tgt_layer.self_attn.out_proj.weight ) else: if share_level == 1: return tgt_layer return src_layer @classmethod def build_spch_encoder(cls, args): cfg = { "input_feat_per_channel": args.input_feat_per_channel, "input_channels": args.input_channels, "conv_kernel_sizes": args.conv_kernel_sizes, "conv_channels": args.conv_channels, "encoder_embed_dim": args.encoder_embed_dim, "encoder_ffn_embed_dim": args.encoder_ffn_embed_dim, "encoder_layers": args.speech_encoder_layers, "encoder_layerdrop": args.encoder_layerdrop, "encoder_attention_heads": args.encoder_attention_heads, "max_source_positions": args.max_source_positions, "dropout": args.dropout, "encoder_normalize_before": args.encoder_normalize_before, "activation_dropout": args.activation_dropout, "attention_dropout": args.attention_dropout, "activation_fn": args.activation_fn, "layernorm_embedding": args.layernorm_embedding, "no_token_positional_embeddings": args.no_token_positional_embeddings, "no_scale_embedding": args.no_scale_embedding, "quant_noise_pq": args.quant_noise_pq, "encoder_freezing_updates": 0, } model_args = namedtuple("args", cfg.keys())(*cfg.values()) spch_encoder = S2TTransformerEncoder(model_args) if args.add_speech_eos: spch_encoder = SpeechEoSEncoder( spch_encoder, 2 * len(args.conv_kernel_sizes.split(",")), args.input_feat_per_channel, adapter_type=getattr(args, "speech_encoder_adapter_type", "None"), adapter_dim=args.encoder_embed_dim, ) return spch_encoder @classmethod def build_text_encoder(cls, args, src_dictionary, spch_encoder): if args.encoder_shared_layers > 0: mx_shared_layers = ( args.speech_encoder_layers if args.speech_encoder_layers < args.text_encoder_layers else args.text_encoder_layers ) args.encoder_shared_layers = ( args.encoder_shared_layers if args.encoder_shared_layers <= mx_shared_layers else mx_shared_layers ) cfg = { "encoder_embed_dim": args.encoder_text_embed_dim, "encoder_ffn_embed_dim": args.encoder_ffn_embed_dim, "encoder_layers": args.text_encoder_layers, "encoder_layerdrop": args.encoder_layerdrop, "encoder_attention_heads": args.encoder_attention_heads, "encoder_learned_pos": args.encoder_learned_pos, "max_source_positions": args.max_source_positions, "dropout": args.dropout, "encoder_normalize_before": args.encoder_normalize_before, "activation_dropout": args.activation_dropout, "attention_dropout": args.attention_dropout, "activation_fn": args.activation_fn, "adaptive_input": args.adaptive_input, "no_token_positional_embeddings": args.no_token_positional_embeddings, "no_scale_embedding": args.no_scale_embedding, "quant_noise_pq": args.quant_noise_pq, } model_args = namedtuple("args", cfg.keys())(*cfg.values()) enc_emb = nn.Embedding( len(src_dictionary), model_args.encoder_embed_dim, src_dictionary.pad() ) text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb) if args.add_speech_eos: spch_encoder = spch_encoder.encoder if args.encoder_shared_layers > 0: text_encoder.layer_norm = cls.set_shared_layer( args.encoder_shared_layer_level, text_encoder.layer_norm, spch_encoder.layer_norm, ) for i, ly in enumerate( spch_encoder.transformer_layers[-args.encoder_shared_layers :] ): ly_id = i + args.text_encoder_layers - args.encoder_shared_layers if not isinstance(text_encoder.layers[ly_id], type(ly)): if text_encoder.layers[ly_id]._get_name() not in ('TransformerEncoderLayerBase', 'TransformerEncoderLayer'): raise ValueError("The shared layers are expected from the same class") text_encoder.layers[ly_id] = cls.set_shared_layer( args.encoder_shared_layer_level, text_encoder.layers[ly_id], ly, ) return text_encoder def mult_rst_grad(self, rst, ratio): assert isinstance(rst, dict) # instead of EncoderOut assert len(rst["encoder_out"]) == 1 rst["encoder_out"][0] = GradMultiply.apply(rst["encoder_out"][0], ratio) return rst def process_attentive_loss_states(self, rst, interstates): assert isinstance(rst, dict) # instead of EncoderOut rst["encoder_states"] = interstates return rst def forward( self, src_tokens, src_lengths=None, src_txt_tokens=None, src_txt_lengths=None, **kwargs ): """ Args: src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (speech) (B,) src_txt_tokens: padded tensor (B, T) src_txt_lengths: tensor of original lengths of input utterances (text) (B,) """ # src_tokens only: inference # src_tokens, src_lengths: speech only training # src_txt_tokens, src_txt_lengths: text only training # all valid: speech + text training if src_tokens is None and src_txt_tokens is None: raise ValueError( "src_tokens and src_txt_tokens cannot be None at the same time" ) ret1 = None ret2 = None return_all_hiddens = False if src_tokens is not None: if ( self.use_cross_attentive_loss and src_txt_tokens is not None ): # remove self.training so we can get attn score during validation step return_all_hiddens = True ret1 = self.spch_encoder( src_tokens, src_lengths, return_all_hiddens=return_all_hiddens ) if self.use_cross_attentive_loss and src_txt_tokens is not None: assert self.cross_attentive_loss_before_last_layer < len( ret1["encoder_states"] ) ret1 = self.process_attentive_loss_states( ret1, ret1["encoder_states"][ -self.cross_attentive_loss_before_last_layer - 1 ], ) if src_txt_tokens is not None: ret2 = self.text_encoder( src_txt_tokens, src_txt_lengths, return_all_hiddens=return_all_hiddens ) if return_all_hiddens: if self.cross_attentive_loss_before_last_layer == len( self.text_encoder.layers ): text_embedding, _ = self.text_encoder.forward_embedding( src_txt_tokens ) text_embedding = text_embedding.transpose(0, 1) ret2 = self.process_attentive_loss_states(ret2, text_embedding) else: assert self.cross_attentive_loss_before_last_layer < len( self.text_encoder.layers ) ret2 = self.process_attentive_loss_states( ret2, ret2["encoder_states"][ -self.cross_attentive_loss_before_last_layer - 1 ], ) def merge_output(rst1, rst2): if rst1 is None: if not (self.enc2_along_grad_mult == 1.0 or self.training): rst2 = self.mult_rst_grad(rst2, self.enc2_along_grad_mult) return rst2 if rst2 is None: return rst1 if self.enc_grad_mult != 1.0 and self.training: rst1 = self.mult_rst_grad(rst1, self.enc_grad_mult) rst2 = self.mult_rst_grad(rst2, self.enc_grad_mult) rst = (rst1, rst2) return rst return merge_output(ret1, ret2) def reorder_encoder_out(self, encoder_out, new_order): assert self.training is False # used for inference only return self.spch_encoder.reorder_encoder_out(encoder_out, new_order) # TransformerMultiInputDecoder: take one or two encoder inputs class TransformerMultiInputDecoder(FairseqDecoder): def __init__( self, dictionary, spch_decoder, text_decoder, compute_cross_attentive_loss=False, cross_attentive_loss_with_norm=True, cross_attentive_loss_reverse=False, ): super().__init__(dictionary) self.spch_decoder = spch_decoder self.text_decoder = text_decoder self.compute_cross_attentive_loss = compute_cross_attentive_loss self.cross_attentive_loss_with_norm = cross_attentive_loss_with_norm self.cross_attentive_loss_reverse = cross_attentive_loss_reverse @classmethod def share_spchdecoder(cls, task_args, text_decoder, spch_decoder): if task_args.decoder_shared_layer_level == 0: return text_decoder assert text_decoder.embed_tokens == spch_decoder.embed_tokens spch_decoder.project_in_dim = text_decoder.project_in_dim spch_decoder.embed_positions = text_decoder.embed_positions spch_decoder.layernorm_embedding = text_decoder.layernorm_embedding spch_decoder.project_out_dim = text_decoder.project_out_dim spch_decoder.adaptive_softmax = text_decoder.adaptive_softmax if task_args.decoder_shared_layer_level == 1: spch_decoder.output_projection = text_decoder.output_projection spch_decoder.layer_norm = text_decoder.layer_norm else: # 2 spch_decoder.output_projection.weight = ( text_decoder.output_projection.weight ) for i, ly in enumerate(text_decoder.layers): sly = spch_decoder.layers[i] sly.self_attn = ly.self_attn sly.self_attn_layer_norm = ly.self_attn_layer_norm # sly.encoder_attn = ly.encoder_attn if ( task_args.decoder_shared_layer_level == 1 ): # share everything, but under different models sly.encoder_attn = ly.encoder_attn sly.encoder_attn_layer_norm = ly.encoder_attn_layer_norm sly.fc1 = ly.fc1 sly.fc2 = ly.fc2 sly.final_layer_norm = ly.final_layer_norm else: # task_args.decoder_shared_layer_level == 2: #separated encoder_attn_layer_norm and bias sly.encoder_attn.k_proj.weight = ly.encoder_attn.k_proj.weight sly.encoder_attn.v_proj.weight = ly.encoder_attn.v_proj.weight sly.encoder_attn.q_proj.weight = ly.encoder_attn.q_proj.weight sly.encoder_attn.out_proj.weight = ly.encoder_attn.out_proj.weight sly.fc1.weight = ly.fc1.weight sly.fc2.weight = ly.fc2.weight return spch_decoder def cross_attentive_loss( self, teacher_states, student_states, teacher_masking, student_masking, eps=1e-6 ): x = teacher_states.transpose(0, 1) # from T X B X D to B X T X D y = student_states.transpose(0, 1) if self.cross_attentive_loss_with_norm: x = x / (x.norm(dim=2, keepdim=True) + eps) y = y / (y.norm(dim=2, keepdim=True) + eps) dim = x.size(-1) # lengths: batch X seqLen sim_scores_xy = torch.bmm(x, y.transpose(1, 2)) # batch X lenx X leny ] if y.dtype == torch.float16: sim_scores_xy = sim_scores_xy.float() y = y.float() x = x.float() if teacher_masking != []: assert len(teacher_masking) == 1 sim_scores_xy = sim_scores_xy.masked_fill( teacher_masking[0].unsqueeze(-1), float("-inf") ) if student_masking != []: sim_scores_xy = sim_scores_xy.masked_fill( student_masking[0].unsqueeze(1), float("-inf") ) # do masking y_weights = utils.softmax(sim_scores_xy, dim=-1) if teacher_masking != []: y_weights = y_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0) x_reconstruct_from_y = torch.bmm(y_weights, y) sim_scores_xx = torch.bmm(x, x.transpose(1, 2)) # batch X lenx X lenx ] x_weights = utils.softmax(sim_scores_xx, dim=-1) if teacher_masking != []: x_weights = x_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0) # no gradient for teacher state x_reconstruct_from_x = torch.bmm(x_weights, x).detach() cost = (x_reconstruct_from_x - x_reconstruct_from_y).norm(dim=2) if teacher_masking != []: cost = cost.masked_fill(teacher_masking[0], 0) if not self.cross_attentive_loss_with_norm: cost = cost / dim return cost def forward( self, prev_output_tokens, encoder_out, incremental_state=None, has_txt_input=False, **kwargs ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing. If there are two or more input during training, they will share the same prev_output_tokens encoder_out (tuple[Tensor]): output from the encoder, used for encoder-side attention. It will be tuple if there are more inputs, but a tensor if only one input incremental_state ([dict]): dictionary used for storing state during :ref:`Incremental decoding`. It is only valid for inference, only from single input Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)`. If there are N inputs, batch will be N bigger than a single input - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ assert not isinstance(encoder_out, EncoderOut) if isinstance(encoder_out, tuple): # training with mulitple input rst = [] assert len(encoder_out) == 2 for i, eo in enumerate(encoder_out): assert incremental_state is None if i == 0: rst.append( self.spch_decoder(prev_output_tokens, eo, incremental_state) ) else: rst.append( self.text_decoder(prev_output_tokens, eo, incremental_state) ) dec_out = torch.cat([r[0] for r in rst], dim=0) attn_cost = None if self.compute_cross_attentive_loss: assert isinstance(encoder_out[0], dict) if self.cross_attentive_loss_reverse: attn_cost = self.cross_attentive_loss( teacher_states=encoder_out[1]["encoder_states"], # text_states student_states=encoder_out[0]["encoder_states"], # spch_states teacher_masking=encoder_out[1]["encoder_padding_mask"], student_masking=encoder_out[0]["encoder_padding_mask"], ) else: attn_cost = self.cross_attentive_loss( teacher_states=encoder_out[0]["encoder_states"], # spch_states student_states=encoder_out[1]["encoder_states"], # text_states teacher_masking=encoder_out[0]["encoder_padding_mask"], student_masking=encoder_out[1]["encoder_padding_mask"], ) return (dec_out, {"attn_cost": attn_cost}) else: # inference or training with one input if has_txt_input: return self.text_decoder( prev_output_tokens, encoder_out, incremental_state ) return self.spch_decoder(prev_output_tokens, encoder_out, incremental_state) # Note: # dual input transformer: # encoder: S2TTransformerEncoder for speech + TransformerEncoder for text # decoder: TransformerDecoder for text @register_model("dual_input_s2t_transformer") class DualInputS2TTransformerModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) self.num_updates = 0 def max_positions(self): return None # it is provided in task @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # encoder 1: S2TTransformerEncoder for speech parser.add_argument( "--conv-kernel-sizes", type=str, metavar="N", help="kernel sizes of Conv1d subsampling layers", ) parser.add_argument( "--conv-channels", type=int, metavar="N", help="# of channels in Conv1d subsampling layers", ) parser.add_argument( "--enc-output-dim", type=int, metavar="N", help=""" encoder output dimension, can be None. If specified, projecting the transformer output to the specified dimension""", ) # standard Transformer parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-text-embed-dim", type=int, metavar="N", help="encoder text embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) # non-standard transformer parameters parser.add_argument( "--speech-encoder-layers", type=int, metavar="N", help="num speech encoder layers", ) parser.add_argument( "--text-encoder-layers", type=int, metavar="N", help="num text encoder layers", ) parser.add_argument( "--encoder-shared-layers", type=int, metavar="N", help="num shared encoder layers", ) parser.add_argument( "--encoder-shared-layer-level", type=int, metavar="N", default=0, choices=[0, 1, 2], help="share layer level 0: all share 1: all share with separate model 2: share weight but not bias and layernorm", ) parser.add_argument( "--decoder-shared-layer-level", default=0, choices=[0, 1, 2], type=int, metavar="N", help="0: share everything; 1: share everything with different model 2: no share layer_norm and bias", ) ### parser.add_argument( "--text-input-cost-ratio", type=float, default=1.0, metavar="V", help="text input cost ratio relative to speech input cost", ) parser.add_argument( "--init-scale", type=float, default=1.0, metavar="V", help="scale the initial weight by given factor", ) parser.add_argument( "--enc-grad-mult", type=float, metavar="V", default=1.0, help="multiply enc1 and enc2 gradient by V", ) parser.add_argument( "--enc2-along-grad-mult", type=float, metavar="V", default=1.0, help="multiply enc2 gradient by V if only enc2 is used", ) parser.add_argument( "--load-pretrain-encoder", type=str, default="", metavar="EXPR", help=""" path to the pretrained encoder """, ) parser.add_argument( "--load-pretrain-speech-encoder", type=str, default="", metavar="EXPR", help=""" path to the pretrained speech encoder """, ) parser.add_argument( "--load-pretrain-text-encoder", type=str, default="", metavar="EXPR", help=""" path to the pretrained text encoder """, ) parser.add_argument( "--load-pretrain-text-encoder-last", type=str, default="", metavar="EXPR", help=""" path to the pretrained text encoder """, ) parser.add_argument( "--load-pretrain-decoder", type=str, metavar="EXPR", default="", help=""" path to the pretrained encoder """, ) parser.add_argument( "--add-speech-eos", action="store_true", help="add eos token at the end of input feature", ) parser.add_argument( "--speech-encoder-adapter-type", type=str, metavar="EXPR", default="None", choices=["None", "Linear", "MLP"], help="add speech encoder adapter", ) @classmethod def build_encoder(cls, args, task): spch_encoder = DualInputEncoder.build_spch_encoder(args) text_encoder = DualInputEncoder.build_text_encoder( args, task.src_dict, spch_encoder ) cross_attentive_loss_before_last_layer = ( 0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1 ) encoder = DualInputEncoder( args, spch_encoder, text_encoder, task.src_dict, cross_attentive_loss_before_last_layer, ) if args.init_scale != 1.0: with torch.no_grad(): for param in encoder.parameters(): param.data.mul_(args.init_scale) if args.load_pretrain_text_encoder != "": checkpoint_utils.load_pretrained_component_from_model( text_encoder, args.load_pretrain_text_encoder ) if args.load_pretrain_speech_encoder != "": if hasattr(spch_encoder, "encoder"): checkpoint_utils.load_pretrained_component_from_model( spch_encoder.encoder, args.load_pretrain_speech_encoder ) else: checkpoint_utils.load_pretrained_component_from_model( spch_encoder, args.load_pretrain_speech_encoder ) if ( args.load_pretrain_text_encoder_last != "" ): # if share encoder, speech encoder parameters will be used. # It provides a chance to use pre-trained mt encoder instead checkpoint_utils.load_pretrained_component_from_model( text_encoder, args.load_pretrain_text_encoder_last ) if args.load_pretrain_encoder != "": checkpoint_utils.load_pretrained_component_from_model( encoder, args.load_pretrain_encoder ) return encoder @classmethod def build_decoder(cls, args, task): dec_cfg = { "decoder_layerdrop": args.decoder_layerdrop, "share_decoder_input_output_embed": args.share_decoder_input_output_embed, "decoder_embed_dim": args.decoder_embed_dim, "max_target_positions": args.max_target_positions, "dropout": args.dropout, "encoder_learned_pos": args.encoder_learned_pos, "decoder_learned_pos": args.decoder_learned_pos, "layernorm_embedding": args.layernorm_embedding, "decoder_normalize_before": args.decoder_normalize_before, "activation_dropout": args.activation_dropout, "attention_dropout": args.attention_dropout, "decoder_ffn_embed_dim": args.decoder_ffn_embed_dim, "decoder_layers": args.decoder_layers, "decoder_attention_heads": args.decoder_attention_heads, "decoder_output_dim": args.decoder_embed_dim, "no_scale_embedding": args.no_scale_embedding, "adaptive_input": args.adaptive_input, "quant_noise_pq": args.quant_noise_pq, "adaptive_softmax_cutoff": args.adaptive_softmax_cutoff, "tie_adaptive_weights": args.tie_adaptive_weights, "no_token_positional_embeddings": args.no_token_positional_embeddings, "encoder": {"embed_dim":args.encoder_embed_dim} } dec_cfg = namedtuple("args", dec_cfg.keys())(*dec_cfg.values()) dec_emb = nn.Embedding( len(task.target_dictionary), args.decoder_embed_dim, task.target_dictionary.pad(), ) compute_cross_attentive_loss = ( True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False ) cross_attentive_loss_without_norm = getattr( args, "attentive_cost_without_normalize", False ) cross_attentive_loss_reverse = ( False # getattr(args, "attentive_cost_reverse", False) ) text_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb) spch_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb) spch_decoder = TransformerMultiInputDecoder.share_spchdecoder( args, text_decoder, spch_decoder ) decoder = TransformerMultiInputDecoder( dictionary=task.target_dictionary, spch_decoder=spch_decoder, text_decoder=text_decoder, compute_cross_attentive_loss=compute_cross_attentive_loss, cross_attentive_loss_with_norm=True if not cross_attentive_loss_without_norm else False, cross_attentive_loss_reverse=cross_attentive_loss_reverse, ) if args.init_scale != 1.0: with torch.no_grad(): for param in decoder.parameters(): param.data.mul_(args.init_scale) if args.load_pretrain_decoder != "": try: checkpoint_utils.load_pretrained_component_from_model( decoder, args.load_pretrain_decoder ) except RuntimeError: checkpoint_utils.load_pretrained_component_from_model( decoder.text_decoder, args.load_pretrain_decoder ) if args.decoder_shared_layer_level > 0: checkpoint_utils.load_pretrained_component_from_model( decoder.spch_decoder, args.load_pretrain_decoder ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted # (in case there are any new ones) dualinputs2ttransformer_base(args) encoder = cls.build_encoder(args, task) decoder = cls.build_decoder(args, task) return cls(encoder, decoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) lprobs.batch_first = True return lprobs def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward( self, src_tokens, src_lengths, prev_output_tokens, use_encoder_outputs=False, src_txt_tokens=None, src_txt_lengths=None, mode="sup_speech", **kwargs ): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing mode = 'sup_speech' or 'text' Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ if mode == "text": assert src_txt_tokens is None src_txt_tokens = src_tokens src_txt_lengths = src_lengths src_tokens = None src_lengths = None encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, src_txt_tokens=src_txt_tokens, src_txt_lengths=src_txt_lengths, **kwargs ) has_txt_input = True if src_txt_tokens is not None else False decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, has_txt_input=has_txt_input, **kwargs ) if use_encoder_outputs: return decoder_out, encoder_out return decoder_out @register_model_architecture( "dual_input_s2t_transformer", "dualinputs2ttransformer_base" ) def dualinputs2ttransformer_base(args): args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) # Convolutional subsampler args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") args.conv_channels = getattr(args, "conv_channels", 1024) # Transformer args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_text_embed_dim = getattr( args, "encoder_text_embed_dim", args.encoder_embed_dim ) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", args.dropout) args.activation_dropout = getattr(args, "activation_dropout", args.dropout) args.activation_fn = getattr(args, "activation_fn", "relu") args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10) args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) args.encoder_shared_layers = getattr(args, "encoder_shared_layers", 0) args.decoder_layers = getattr(args, "decoder_layers", 6) args.add_speech_eos = getattr(args, "add_speech_eos", False) @register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_s") def dualinputs2ttransformer_s(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 7) args.text_encoder_layers = getattr(args, "text_encoder_layers", 7) args.decoder_layers = getattr(args, "decoder_layers", 7) dualinputs2ttransformer_base(args) @register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_m") def dualinputs2ttransformer_m(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.dropout = getattr(args, "dropout", 0.15) args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10) args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 6) dualinputs2ttransformer_base(args) @register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_b") def dualinputs2ttransformer_b(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12) args.dropout = getattr(args, "dropout", 0.15) args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12) args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 6) dualinputs2ttransformer_base(args) @register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_l") def dualinputs2ttransformer_l(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.2) args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12) args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 6) dualinputs2ttransformer_base(args)
45,047
40.177331
128
py
sign-topic
sign-topic-main/examples/speech_text_joint_to_text/models/s2t_dualinputxmtransformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import torch.nn as nn from fairseq import checkpoint_utils from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( register_model, register_model_architecture, FairseqEncoder, ) from fairseq.models.speech_to_text import Wav2VecEncoderWithAdaptor from fairseq.models.speech_to_text.xm_transformer import ( set_default_adaptor_args, set_default_w2v_encoder_args, need_finetuning ) from fairseq.models.transformer import TransformerEncoder, TransformerDecoder from fairseq.models.wav2vec import TransformerSentenceEncoderLayer from fairseq.utils import safe_hasattr from .s2t_dualinputtransformer import ( DualInputS2TTransformerModel, TransformerMultiInputDecoder, DualInputEncoder, ) class TransformerSentenceEncoderLayerStd(TransformerSentenceEncoderLayer): def __init__(self, sent_enc_layer): super(TransformerSentenceEncoderLayer, self).__init__() self.embedding_dim = sent_enc_layer.embedding_dim self.dropout = sent_enc_layer.dropout self.activation_dropout = sent_enc_layer.activation_dropout # Initialize blocks self.activation_fn = sent_enc_layer.activation_fn self.self_attn = sent_enc_layer.self_attn self.dropout1 = sent_enc_layer.dropout1 self.dropout2 = sent_enc_layer.dropout2 self.dropout3 = sent_enc_layer.dropout3 self.layer_norm_first = sent_enc_layer.layer_norm_first # layer norm associated with the self attention layer self.self_attn_layer_norm = sent_enc_layer.self_attn_layer_norm self.fc1 = sent_enc_layer.fc1 self.fc2 = sent_enc_layer.fc2 # layer norm associated with the position wise feed-forward NN self.final_layer_norm = sent_enc_layer.final_layer_norm def forward( self, x, self_attn_mask=None, self_attn_padding_mask=None, need_weights=None, att_args=None, ): x, attn = super().forward( x, self_attn_mask, self_attn_padding_mask, need_weights, att_args ) return x # TODO retire SharedEncoder class SharedEncoder(FairseqEncoder): def __init__(self, wav2vec_enc, mbart_enc, adaptor, shared_layers): super().__init__(None) self.w2v_encoder = wav2vec_enc self.shared_layers = self.w2v_encoder.w2v_model.encoder.layers[-shared_layers:] self.w2v_encoder.w2v_model.encoder.layers = ( self.w2v_encoder.w2v_model.encoder.layers[:-shared_layers] ) self.adaptor = adaptor if self.shared_layers[-1].layer_norm_first: self.final_layer_norm = mbart_enc.layer_norm else: mbart_enc.layer_norm = None self.final_layer_norm = None shared_layer_from = len(mbart_enc.layers) - shared_layers if shared_layer_from < 0: shared_layer_from = 0 for layer_id, layer in enumerate(self.shared_layers): mbart_enc.layers[ shared_layer_from + layer_id ] = TransformerSentenceEncoderLayerStd(layer) def forward(self, src_tokens, src_lengths=None, **kwargs): padding_mask = lengths_to_padding_mask(src_lengths) if not padding_mask.any(): padding_mask = None out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True) x = out["encoder_out"] enc_padding_mask = None if out["encoder_padding_mask"] is not None: enc_padding_mask = out["encoder_padding_mask"].transpose( 0, 1 ) # T X B --> B X T x, enc_padding_mask = self.adaptor(x, enc_padding_mask) for layer in self.shared_layers: x, _ = layer(x, enc_padding_mask) if self.final_layer_norm is not None: x = self.final_layer_norm(x) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [enc_padding_mask] if enc_padding_mask is not None else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": [], # List[T x B x C] "src_tokens": [], "src_lengths": [], } class StackedWav2VecEncoderWithAdaptor(FairseqEncoder): def __init__( self, wav2vec_enc, mbart_enc_layers, mbart_layer_norm, adaptor, drop_w2v_layers=0, ): super().__init__(None) self.w2v_encoder = wav2vec_enc self.adaptor = adaptor self.mbart_encoder_layers = mbart_enc_layers self.final_layer_norm = mbart_layer_norm if drop_w2v_layers > 0: self.w2v_encoder.w2v_model.encoder.layers = ( self.w2v_encoder.w2v_model.encoder.layers[:-drop_w2v_layers] ) def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs): padding_mask = lengths_to_padding_mask(src_lengths) if not padding_mask.any(): padding_mask = None out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True) x = out["encoder_out"] enc_padding_mask = None if out["padding_mask"] is not None: enc_padding_mask = out["padding_mask"] # B X T x, enc_padding_mask = self.adaptor(x, enc_padding_mask) encoder_states = [] for layer in self.mbart_encoder_layers: x = layer(x, enc_padding_mask) if return_all_hiddens: encoder_states.append(x) if self.final_layer_norm is not None: x = self.final_layer_norm(x) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [enc_padding_mask] if enc_padding_mask is not None else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [], } def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ( [] if len(encoder_out["encoder_out"]) == 0 else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] ) new_encoder_padding_mask = ( [] if len(encoder_out["encoder_padding_mask"]) == 0 else [ x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"] ] ) new_encoder_embedding = ( [] if len(encoder_out["encoder_embedding"]) == 0 else [ x.index_select(0, new_order) for x in encoder_out["encoder_embedding"] ] ) encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], # B x T "src_lengths": [], # B x 1 } # Note: # dual input transformer: # encoder: wav2vec for speech + mbart encoder for text # decoder: mbart decoder for text @register_model("dual_input_xm_transformer") class DualInputXMTransformerModel(DualInputS2TTransformerModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # wav2vec encoder Wav2VecEncoderWithAdaptor.add_args(parser) # add_decoder_args(parser) # mbart Transformer parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--mbart-dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--mbart-attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--mbart-activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-mbart-from", type=str, metavar="STR", help="model to take text encoder decoder weights from (for initialization)", ) # parser.add_argument("--finetune-w2v-params", type=str, metavar="STR", # help="comma-separated param strings to finetune.") parser.add_argument( "--finetune-mbart-decoder-params", type=str, metavar="STR", help="comma-separated param strings to finetune.", ) parser.add_argument( "--finetune-mbart-encoder-params", type=str, metavar="STR", help="comma-separated param strings to finetune.", ) parser.add_argument( "--skip-encoder-projection", action="store_true", help="skip the projection layer in encoder", ) parser.add_argument( "--enc-grad-mult", type=float, metavar="V", default=1.0, help="multiply enc1 and enc2 gradient by V", ) parser.add_argument( "--enc2-along-grad-mult", type=float, metavar="V", default=1.0, help="multiply enc2 gradient by V if only enc2 is used", ) parser.add_argument( "--text-input-cost-ratio", type=float, default=1.0, metavar="V", help="text input cost ratio relative to speech input cost", ) parser.add_argument( "--stack-w2v-mbart-encoder", action="store_true", help="stack w2v and mbart encoder", ) parser.add_argument( "--stack-w2v-mbart-nonorm-encoder", action="store_true", help="stack w2v and mbart encoder", ) parser.add_argument( "--no-final-norm-decoder", action="store_true", help="no layer norm" ) parser.add_argument( "--drop-w2v-layers", type=int, default=0, metavar="N", help="drop w2v encoder layers", ) parser.add_argument( "--share-w2v-text-encoder", action="store_true", help="share w2v encoder layers with text encoder", ) parser.add_argument( "--shared-w2v-layers", type=int, default=0, metavar="N", help="shared encoder layers from w2v encoder", ) @classmethod def build_encoder(cls, args, task): _args = copy.deepcopy(args) _args.dropout = args.mbart_dropout _args.attention_dropout = args.mbart_attention_dropout _args.activation_dropout = args.mbart_activation_dropout _args.max_source_positions = 1024 enc_emb = nn.Embedding( len(task.src_dict), _args.encoder_embed_dim, task.src_dict.pad() ) text_encoder = TransformerEncoder(_args, task.src_dict, enc_emb) spch_encoder = Wav2VecEncoderWithAdaptor(args) if getattr(args, "load_pretrained_mbart_from", None): text_encoder = checkpoint_utils.load_pretrained_component_from_model( component=text_encoder, checkpoint=args.load_pretrained_mbart_from ) if getattr(args, "stack_w2v_mbart_encoder", False): assert getattr(args, "share_w2v_text_encoder", False) is False spch_encoder = StackedWav2VecEncoderWithAdaptor( spch_encoder.w2v_encoder, text_encoder.layers, text_encoder.layer_norm, spch_encoder.adaptor, args.drop_w2v_layers, ) elif getattr(args, "stack_w2v_mbart_nonorm_encoder", False): text_encoder.layer_norm = None spch_encoder = StackedWav2VecEncoderWithAdaptor( spch_encoder.w2v_encoder, text_encoder.layers, text_encoder.layer_norm, spch_encoder.adaptor, args.drop_w2v_layers, ) elif getattr(args, "share_w2v_text_encoder", False): spch_encoder = SharedEncoder( spch_encoder.w2v_encoder, text_encoder, spch_encoder.adaptor, args.shared_w2v_layers, ) for k, p in spch_encoder.named_parameters(): # Freeze pretrained models by default if safe_hasattr( args, "finetune_w2v_params" ) and need_finetuning(args.finetune_w2v_params, k): p.requires_grad = True else: p.requires_grad = False for k, p in text_encoder.named_parameters(): # Freeze pretrained models by default if safe_hasattr( args, "finetune_mbart_encoder_params" ) and need_finetuning( args.finetune_mbart_encoder_params, k ): p.requires_grad = True else: p.requires_grad = False cross_attentive_loss_before_last_layer = ( 0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1 ) encoder = DualInputEncoder( args, spch_encoder, text_encoder, task.src_dict, cross_attentive_loss_before_last_layer, ) return encoder @classmethod def build_decoder(cls, args, task): _args = copy.deepcopy(args) _args.dropout = args.mbart_dropout _args.attention_dropout = args.mbart_attention_dropout _args.activation_dropout = args.mbart_activation_dropout _args.max_target_positions = 1024 dec_emb = nn.Embedding( len(task.tgt_dict), _args.encoder_embed_dim, task.tgt_dict.pad() ) decoder = TransformerDecoder(_args, task.tgt_dict, dec_emb) if getattr(args, "load_pretrained_mbart_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_mbart_from ) if getattr(args, "no_final_norm_decoder", False): decoder.layer_norm = None for k, p in decoder.named_parameters(): # Freeze pretrained models by default if safe_hasattr( args, "finetune_mbart_decoder_params" ) and need_finetuning( args.finetune_mbart_decoder_params, k ): p.requires_grad = True else: p.requires_grad = False compute_cross_attentive_loss = ( True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False ) cross_attentive_loss_without_norm = getattr( args, "attentive_cost_without_normalize", False ) cross_attentive_loss_reverse = ( False # getattr(args, "attentive_cost_reverse", False) ) decoder = TransformerMultiInputDecoder( dictionary=task.target_dictionary, spch_decoder=decoder, text_decoder=decoder, compute_cross_attentive_loss=compute_cross_attentive_loss, cross_attentive_loss_with_norm=True if not cross_attentive_loss_without_norm else False, cross_attentive_loss_reverse=cross_attentive_loss_reverse, ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted # (in case there are any new ones) dualinputxmtransformer_base(args) encoder = cls.build_encoder(args, task) decoder = cls.build_decoder(args, task) return cls(encoder, decoder) @register_model_architecture("dual_input_xm_transformer", "dualinputxmtransformer_base") def dualinputxmtransformer_base(args): # wav2vec encoder set_default_w2v_encoder_args(args) set_default_adaptor_args(args) # mbart model args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr( args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim ) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.adaptive_input = getattr(args, "adaptive_input", False) args.mbart_attention_dropout = getattr(args, "mbart_attention_dropout", 0.0) args.mbart_activation_dropout = getattr(args, "mbart_activation_dropout", 0.0) args.mbart_dropout = getattr(args, "mbart_dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
21,461
35.687179
88
py
sign-topic
sign-topic-main/examples/speech_text_joint_to_text/tasks/speech_text_joint.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from argparse import Namespace from pathlib import Path import torch from fairseq.data import ( encoders, Dictionary, ResamplingDataset, TransformEosLangPairDataset, ConcatDataset, ) from fairseq.data.iterators import GroupedEpochBatchIterator from fairseq.data.audio.multi_modality_dataset import ( MultiModalityDataset, LangPairMaskDataset, ModalityDatasetItem, ) from fairseq.data.audio.speech_to_text_dataset import ( SpeechToTextDataset, SpeechToTextDatasetCreator, ) from fairseq.data.audio.speech_to_text_joint_dataset import ( S2TJointDataConfig, SpeechToTextJointDatasetCreator, ) from fairseq.tasks import register_task from fairseq.tasks.speech_to_text import SpeechToTextTask from fairseq.tasks.translation import load_langpair_dataset logger = logging.getLogger(__name__) LANG_TAG_TEMPLATE = "<lang:{}>" @register_task("speech_text_joint_to_text") class SpeechTextJointToTextTask(SpeechToTextTask): """ Task for joint training speech and text to text. """ @classmethod def add_args(cls, parser): """Add task-specific arguments to the parser.""" super(SpeechTextJointToTextTask, cls).add_args(parser) ### parser.add_argument( "--parallel-text-data", default="", help="path to parallel text data directory", ) parser.add_argument( "--max-tokens-text", type=int, metavar="N", help="maximum tokens for encoder text input ", ) parser.add_argument( "--max-positions-text", type=int, metavar="N", default=400, help="maximum tokens for per encoder text input ", ) parser.add_argument( "--langpairs", default=None, metavar="S", help='language pairs for text training, separated with ","', ) parser.add_argument( "--speech-sample-ratio", default=1, type=float, metavar="N", help="Multiple Ratio for speech dataset with transcripts ", ) parser.add_argument( "--text-sample-ratio", default=1, type=float, metavar="N", help="Multiple Ratio for text set ", ) parser.add_argument( "--update-mix-data", action="store_true", help="use mixed data in one update when update-freq > 1", ) parser.add_argument( "--load-speech-only", action="store_true", help="load speech data only", ) parser.add_argument( "--mask-text-ratio", type=float, metavar="V", default=0.0, help="mask V source tokens for text only mode", ) parser.add_argument( "--mask-text-type", default="random", choices=["random", "tail"], help="mask text typed", ) parser.add_argument( "--noise-token", default="", help="noise token for masking src text tokens if mask-text-ratio > 0", ) parser.add_argument( "--infer-target-lang", default="", metavar="S", help="target language for inference", ) def __init__(self, args, src_dict, tgt_dict, infer_tgt_lang_id=None): super().__init__(args, tgt_dict) self.src_dict = src_dict self.data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml) assert self.tgt_dict.pad() == self.src_dict.pad() assert self.tgt_dict.eos() == self.src_dict.eos() self.speech_only = args.load_speech_only self._infer_tgt_lang_id = infer_tgt_lang_id @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries).""" data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml) tgt_dict_path = Path(args.data) / data_cfg.vocab_filename src_dict_path = Path(args.data) / data_cfg.src_vocab_filename if (not os.path.isfile(src_dict_path)) or (not os.path.isfile(tgt_dict_path)): raise FileNotFoundError("Dict not found: {}".format(args.data)) src_dict = Dictionary.load(src_dict_path.as_posix()) tgt_dict = Dictionary.load(tgt_dict_path.as_posix()) print("| src dictionary: {} types".format(len(src_dict))) print("| tgt dictionary: {} types".format(len(tgt_dict))) if args.parallel_text_data != "": if not os.path.isabs(args.parallel_text_data): args.parallel_text_data = os.path.join( args.data, args.parallel_text_data ) if args.langpairs is None: raise Exception( "Could not infer language pair, please provide it explicitly" ) infer_tgt_lang_id = None if args.infer_target_lang != "" and data_cfg.prepend_tgt_lang_tag_no_change: tgt_lang_tag = SpeechToTextDataset.LANG_TAG_TEMPLATE.format( args.infer_target_lang ) infer_tgt_lang_id = tgt_dict.index(tgt_lang_tag) assert infer_tgt_lang_id != tgt_dict.unk() return cls(args, src_dict, tgt_dict, infer_tgt_lang_id=infer_tgt_lang_id) def load_langpair_dataset( self, prepend_tgt_lang_tag=False, sampling_alpha=1.0, epoch=0 ): lang_pairs = [] text_dataset = None split = "train" for lp in self.args.langpairs.split(","): src, tgt = lp.split("-") text_dataset = load_langpair_dataset( self.args.parallel_text_data, split, src, self.src_dict, tgt, self.tgt_dict, combine=True, dataset_impl=None, upsample_primary=1, left_pad_source=False, left_pad_target=False, max_source_positions=self.args.max_positions_text, max_target_positions=self.args.max_target_positions, load_alignments=False, truncate_source=False, ) if prepend_tgt_lang_tag: # TODO text_dataset = TransformEosLangPairDataset( text_dataset, src_eos=self.src_dict.eos(), tgt_bos=self.tgt_dict.eos(), # 'prev_output_tokens' starts with eos new_tgt_bos=self.tgt_dict.index(LANG_TAG_TEMPLATE.format(tgt)), ) lang_pairs.append(text_dataset) if len(lang_pairs) > 1: if sampling_alpha != 1.0: size_ratios = SpeechToTextDatasetCreator.get_size_ratios( self.args.langpairs.split(","), [len(s) for s in lang_pairs], alpha=sampling_alpha, ) lang_pairs = [ ResamplingDataset(d, size_ratio=r, epoch=epoch, replace=(r >= 1.0)) for d, r in zip(lang_pairs, size_ratios) ] return ConcatDataset(lang_pairs) return text_dataset def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=self._infer_tgt_lang_id, ) def build_src_tokenizer(self, args): logger.info(f"src-pre-tokenizer: {self.data_cfg.src_pre_tokenizer}") return encoders.build_tokenizer(Namespace(**self.data_cfg.src_pre_tokenizer)) def build_src_bpe(self, args): logger.info(f"tokenizer: {self.data_cfg.src_bpe_tokenizer}") return encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer)) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ is_train_split = split.startswith("train") pre_tokenizer = self.build_tokenizer(self.args) bpe_tokenizer = self.build_bpe(self.args) src_pre_tokenizer = self.build_src_tokenizer(self.args) src_bpe_tokenizer = self.build_src_bpe(self.args) ast_dataset = SpeechToTextJointDatasetCreator.from_tsv( self.args.data, self.data_cfg, split, self.tgt_dict, src_dict=None if self.speech_only else self.src_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, src_pre_tokenizer=src_pre_tokenizer, src_bpe_tokenizer=src_bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed, ) noise_token_id = -1 text_dataset = None if self.args.parallel_text_data != "" and is_train_split: text_dataset = self.load_langpair_dataset( self.data_cfg.prepend_tgt_lang_tag_no_change, 1.0, epoch=epoch, ) if self.args.mask_text_ratio > 0: # add mask noise_token_id = ( self.src_dict.unk() if self.args.noise_token == "" else self.src_dict.index(self.args.noise_token) ) text_dataset = LangPairMaskDataset( text_dataset, src_bos=self.src_dict.bos(), src_eos=self.src_dict.eos(), noise_id=noise_token_id, mask_ratio=self.args.mask_text_ratio, mask_type=self.args.mask_text_type, ) if text_dataset is not None: mdsets = [ ModalityDatasetItem( "sup_speech", ast_dataset, (self.args.max_source_positions, self.args.max_target_positions), self.args.max_tokens, self.args.batch_size, ), ModalityDatasetItem( "text", text_dataset, (self.args.max_positions_text, self.args.max_target_positions), self.args.max_tokens_text if self.args.max_tokens_text is not None else self.args.max_tokens, self.args.batch_size, ), ] ast_dataset = MultiModalityDataset(mdsets) self.datasets[split] = ast_dataset @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.tgt_dict @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary` (if applicable for this task).""" return None if self.speech_only else self.src_dict def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, data_buffer_size=0, disable_iterator_cache=False, skip_remainder_batch=False, grouped_shuffling=False, update_epoch_batch_itr=False, ): if not isinstance(dataset, MultiModalityDataset): return super(SpeechTextJointToTextTask, self).get_batch_iterator( dataset, max_tokens, max_sentences, max_positions, ignore_invalid_inputs, required_batch_size_multiple, seed, num_shards, shard_id, num_workers, epoch, data_buffer_size, disable_iterator_cache, skip_remainder_batch=skip_remainder_batch, update_epoch_batch_itr=update_epoch_batch_itr, ) mult_ratio = [self.args.speech_sample_ratio, self.args.text_sample_ratio] assert len(dataset.datasets) == 2 # initialize the dataset with the correct starting epoch dataset.set_epoch(epoch) batch_samplers = dataset.get_batch_samplers( mult_ratio, required_batch_size_multiple, seed ) # return a reusable, sharded iterator epoch_iter = GroupedEpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_samplers=batch_samplers, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, mult_rate=1 if self.args.update_mix_data else max(self.args.update_freq), buffer_size=data_buffer_size, skip_remainder_batch=skip_remainder_batch, ) self.dataset_to_epoch_iter[dataset] = {} # refresh it every epoch return epoch_iter
13,654
35.124339
88
py
sign-topic
sign-topic-main/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss @register_criterion("label_smoothed_cross_entropy_r3f") class LabelSmoothedCrossEntropyR3FCriterion(FairseqCriterion): def __init__( self, task, sentence_avg, label_smoothing, eps, r3f_lambda, noise_type ): super().__init__(task) self.sentence_avg = sentence_avg self.label_smoothing = label_smoothing self.eps = eps self.r3f_lambda = r3f_lambda self.noise_type = noise_type if self.noise_type in {"normal"}: self.noise_sampler = torch.distributions.normal.Normal( loc=0.0, scale=self.eps ) elif self.noise_type == "uniform": self.noise_sampler = torch.distributions.uniform.Uniform( low=-self.eps, high=self.eps ) else: raise Exception(f"unrecognized noise type {self.noise_type}") @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--label-smoothing', default=0., type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing') parser.add_argument('--eps', type=float, default=1e-5, help='noise eps') parser.add_argument('--r3f-lambda', type=float, default=1.0, help='lambda for combining logistic loss and noisy KL loss') parser.add_argument('--noise-type', type=str, default='normal', choices=['normal', 'uniform'], help='type of noises') # fmt: on def _get_symm_kl(self, noised_logits, input_logits): return ( F.kl_div( F.log_softmax(noised_logits, dim=-1, dtype=torch.float32), F.softmax(input_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) + F.kl_div( F.log_softmax(input_logits, dim=-1, dtype=torch.float32), F.softmax(noised_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) ) / noised_logits.size(0) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ token_embeddings = model.encoder.embed_tokens(sample["net_input"]["src_tokens"]) input_logits, extra = model(**sample["net_input"]) loss, nll_loss = self.compute_loss( model, (input_logits, extra), sample, reduce=reduce ) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) if model.training: noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to( token_embeddings ) noised_embeddings = token_embeddings.clone() + noise noised_logits, _ = model( **sample["net_input"], token_embeddings=noised_embeddings ) symm_kl = self._get_symm_kl(noised_logits, input_logits) if model.training: symm_kl = symm_kl * sample_size loss = loss + self.r3f_lambda * symm_kl logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } if model.training: logging_output.update( symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data ) return loss, sample_size, logging_output def compute_loss(self, model, net_output, sample, reduce=True): lprobs = model.get_normalized_probs(net_output, log_probs=True) lprobs = lprobs.view(-1, lprobs.size(-1)) target = model.get_targets(sample, net_output).view(-1, 1) loss, nll_loss = label_smoothed_nll_loss( lprobs, target, self.label_smoothing, ignore_index=self.padding_idx, reduce=reduce, ) return loss, nll_loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs) metrics.log_scalar("symm_kl", symm_kl_sum / sample_size, sample_size, round=3) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
6,109
37.670886
91
py
sign-topic
sign-topic-main/examples/rxf/rxf_src/sentence_prediction_r3f.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("sentence_prediction_r3f") class SentencePredictionR3F(FairseqCriterion): def __init__( self, task, eps, r3f_lambda, noise_type, classification_head_name, regression_target, ): super().__init__(task) self.eps = eps self.r3f_lambda = r3f_lambda self.noise_type = noise_type self.classification_head_name = classification_head_name self.regression_target = regression_target if self.noise_type in {"normal"}: self.noise_sampler = torch.distributions.normal.Normal( loc=0.0, scale=self.eps ) elif self.noise_type == "uniform": self.noise_sampler = torch.distributions.uniform.Uniform( low=-self.eps, high=self.eps ) else: raise Exception(f"unrecognized noise type {self.noise_type}") @staticmethod def add_args(parser): # fmt: off parser.add_argument('--eps', type=float, default=1e-5, help='noise eps') parser.add_argument('--r3f-lambda', type=float, default=1.0, help='lambda for combining logistic loss and noisy KL loss') parser.add_argument('--noise-type', type=str, default='uniform', choices=['normal', 'uniform'], help='type of noises for RXF methods') parser.add_argument('--classification-head-name', default='sentence_classification_head', help='name of the classification head to use') parser.add_argument('--regression-target', action='store_true') # fmt: on def _get_symm_kl(self, noised_logits, input_logits): return ( F.kl_div( F.log_softmax(noised_logits, dim=-1, dtype=torch.float32), F.softmax(input_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) + F.kl_div( F.log_softmax(input_logits, dim=-1, dtype=torch.float32), F.softmax(noised_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) ) / noised_logits.size(0) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert ( hasattr(model, "classification_heads") and self.classification_head_name in model.classification_heads ), "model must provide sentence classification head for --criterion=sentence_prediction" token_embeddings = model.encoder.sentence_encoder.embed_tokens( sample["net_input"]["src_tokens"] ) input_logits, _ = model( **sample["net_input"], features_only=True, classification_head_name=self.classification_head_name, token_embeddings=token_embeddings, ) if model.training and self.noise_sampler: noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to( token_embeddings ) noised_embeddings = token_embeddings.detach().clone() + noise noised_logits, _ = model( **sample["net_input"], features_only=True, classification_head_name=self.classification_head_name, token_embeddings=noised_embeddings, ) symm_kl = self._get_symm_kl(noised_logits, input_logits) else: symm_kl = 0 targets = model.get_targets(sample, [input_logits]).view(-1) sample_size = targets.numel() if not self.regression_target: loss = F.nll_loss( F.log_softmax(input_logits, dim=-1, dtype=torch.float32), targets, reduction="sum", ) if model.training: symm_kl = symm_kl * sample_size loss = loss + self.r3f_lambda * symm_kl else: logits = input_logits.squeeze().float() targets = targets.float() loss = F.mse_loss(logits, targets, reduction="sum") logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample_size, "sample_size": sample_size, } if not self.regression_target: preds = input_logits.max(dim=1)[1] logging_output.update(ncorrect=(preds == targets).sum().item()) if model.training and self.noise_sampler: logging_output.update( symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data ) return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2), "symm_kl": symm_kl_sum / sample_size, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]: ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) agg_output.update(accuracy=ncorrect / nsentences) if sample_size != ntokens: agg_output["nll_loss"] = loss_sum / ntokens / math.log(2) return agg_output
6,587
37.302326
96
py
sign-topic
sign-topic-main/scripts/average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import collections import os import re import torch from fairseq.file_io import PathManager def average_checkpoints(inputs): """Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() params_keys = None new_state = None num_models = len(inputs) for fpath in inputs: with PathManager.open(fpath, "rb") as f: state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) # Copies over the settings from the first checkpoint if new_state is None: new_state = state model_params = state["model"] model_params_keys = list(model_params.keys()) if params_keys is None: params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( "For checkpoint {}, expected list of params: {}, " "but found: {}".format(f, params_keys, model_params_keys) ) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if k not in params_dict: params_dict[k] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: params_dict[k] += p averaged_params = collections.OrderedDict() for k, v in params_dict.items(): averaged_params[k] = v if averaged_params[k].is_floating_point(): averaged_params[k].div_(num_models) else: averaged_params[k] //= num_models new_state["model"] = averaged_params return new_state def last_n_checkpoints(paths, n, update_based, upper_bound=None): assert len(paths) == 1 path = paths[0] if update_based: pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt") else: pt_regexp = re.compile(r"checkpoint(\d+)\.pt") files = PathManager.ls(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if m is not None: sort_key = int(m.group(1)) if upper_bound is None or sort_key <= upper_bound: entries.append((sort_key, m.group(0))) if len(entries) < n: raise Exception( "Found {} checkpoint files but need at least {}", len(entries), n ) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] def main(): parser = argparse.ArgumentParser( description="Tool to average the params of input checkpoints to " "produce a new checkpoint", ) # fmt: off parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.') parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.') num_group = parser.add_mutually_exclusive_group() num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the ' 'path specified by input, and average last this many of them.') num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by' ' input, and average last this many of them.') parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, ' 'when using --num-update-checkpoints, this will set an upper bound on which update to use' 'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be' ' averaged.' 'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would' ' be averaged assuming --save-interval-updates 500' ) # fmt: on args = parser.parse_args() print(args) num = None is_update_based = False if args.num_update_checkpoints is not None: num = args.num_update_checkpoints is_update_based = True elif args.num_epoch_checkpoints is not None: num = args.num_epoch_checkpoints assert args.checkpoint_upper_bound is None or ( args.num_epoch_checkpoints is not None or args.num_update_checkpoints is not None ), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints" assert ( args.num_epoch_checkpoints is None or args.num_update_checkpoints is None ), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints" if num is not None: args.inputs = last_n_checkpoints( args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound, ) print("averaging checkpoints: ", args.inputs) new_state = average_checkpoints(args.inputs) with PathManager.open(args.output, "wb") as f: torch.save(new_state, f) print("Finished writing averaged checkpoint to {}".format(args.output)) if __name__ == "__main__": main()
6,075
36.73913
126
py
sign-topic
sign-topic-main/tests/test_rotary_positional_embedding.py
import torch import numpy as np import unittest from fairseq.modules.rotary_positional_embedding import apply_rotary_pos_emb from fairseq.modules import RotaryPositionalEmbedding class TestRotaryPositionalEmbedding(unittest.TestCase): def setUp(self) -> None: self.T = 3 self.B = 1 self.C = 2 torch.manual_seed(0) self.sample = torch.randn(self.T, self.B, self.C) # TBC self.rope_pos_emd = RotaryPositionalEmbedding(dim=self.C) def test_forward(self): expected_cos = torch.tensor( [[[[1.0000, 1.0000]]], [[[0.5403, 0.5403]]], [[[-0.4161, -0.4161]]]] ) expected_sin = torch.tensor( [[[[0.0000, 0.0000]]], [[[0.8415, 0.8415]]], [[[0.9093, 0.9093]]]] ) cos, sin = self.rope_pos_emd(self.sample, self.T) self.assertTrue( np.allclose( expected_cos.cpu().detach().numpy(), cos.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_sin.cpu().detach().numpy(), sin.cpu().detach().numpy(), atol=1e-4, ) ) def test_apply_rotary_pos_emb(self): cos, sin = self.rope_pos_emd(self.sample, self.T) query = self.sample.view(self.T, self.B, 1, self.C) expected_query = torch.tensor( [[[[1.5410, -0.2934]]], [[[-1.6555, -1.5263]]], [[[1.7231, -0.4041]]]] ) new_query, new_key = apply_rotary_pos_emb(query, query, cos, sin) self.assertTrue( np.allclose( expected_query.cpu().detach().numpy(), new_query.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_query.cpu().detach().numpy(), new_key.cpu().detach().numpy(), atol=1e-4, ) ) if __name__ == "__main__": unittest.main()
2,045
30.476923
82
py
sign-topic
sign-topic-main/tests/test_train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import unittest from io import StringIO from unittest.mock import MagicMock, patch import torch from fairseq import checkpoint_utils, data from omegaconf import OmegaConf def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { "train_iterator": { "epoch": epoch, "iterations_in_epoch": iterations_in_epoch, "shuffle": False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset( tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False ) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr def get_mock_cfg(finetune_from_model): cfg_mock = OmegaConf.create( { "checkpoint": { "save_dir": None, "optimizer_overrides": "{}", "reset_dataloader": False, "reset_meters": False, "reset_optimizer": False, "reset_lr_scheduler": False, "finetune_from_model": finetune_from_model, "model_parallel_size": 1, "restore_file": "checkpoint_last.pt", }, "common": { "model_parallel_size": 1, }, } ) return cfg_mock class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.cfg_mock = get_mock_cfg(None) self.patches = { "os.makedirs": MagicMock(), "os.path.join": MagicMock(), "os.path.isfile": MagicMock(return_value=True), "os.path.isabs": MagicMock(return_value=False), "fairseq.file_io.PathManager.exists": MagicMock(return_value=False), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] logging.disable(logging.CRITICAL) def tearDown(self): patch.stopall() logging.disable(logging.NOTSET) def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) for _ in range(150 - 52): next(itr) self.assertEqual(epoch_itr.iterations_in_epoch, 149) self.assertTrue(itr.has_next()) next(itr) self.assertFalse(itr.has_next()) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertTrue(itr.has_next()) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) self.patches["os.path.isfile"].return_value = False _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_finetune_from_model_args_conflict(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) for arg in [ "reset_optimizer", "reset_lr_scheduler", "reset_meters", "reset_dataloader", ]: with self.subTest(arg=arg): cfg_mock = get_mock_cfg("/temp/checkpoint_pretrained.pt") cfg_mock["checkpoint"][arg] = True with self.assertRaises(Exception) as context: _, _ = checkpoint_utils.load_checkpoint( cfg_mock.checkpoint, trainer ) self.assertTrue( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" in str(context.exception) ) def test_finetune_from_model(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" def mock_finetune_exist(path): if path == from_model_path: return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertTrue(reset_optimizer) self.assertTrue(reset_lr_scheduler) self.assertTrue(reset_meters) def test_finetune_from_model_resume(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" # launch second time # both restore_file=checkpoint_last.pt and finetune_from_model are set def mock_finetune_exist(path): if path == from_model_path or path.endsWith("checkpoint_last.pt"): return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertFalse(reset_optimizer) self.assertFalse(reset_lr_scheduler) self.assertFalse(reset_meters) if __name__ == "__main__": unittest.main()
9,292
36.471774
101
py
sign-topic
sign-topic-main/tests/test_positional_encoding.py
import unittest import torch from fairseq.modules import RelPositionalEncoding import numpy as np class TestRelPositionalEncoding(unittest.TestCase): def setUp(self) -> None: self.T = 3 self.B = 1 self.C = 2 torch.manual_seed(0) self.sample = torch.randn(self.T, self.B, self.C) # TBC self.rel_pos_enc = RelPositionalEncoding(max_len=4, d_model=self.C) def test_extend_pe(self): inp = self.sample.transpose(0, 1) self.rel_pos_enc.extend_pe(inp) expected_pe = torch.tensor( [ [ [0.1411, -0.9900], [0.9093, -0.4161], [0.8415, 0.5403], [0.0000, 1.0000], [-0.8415, 0.5403], [-0.9093, -0.4161], [-0.1411, -0.9900], ] ] ) self.assertTrue( np.allclose( expected_pe.cpu().detach().numpy(), self.rel_pos_enc.pe.cpu().detach().numpy(), atol=1e-4, ) ) def test_forward(self): pos_enc = self.rel_pos_enc(self.sample) expected_pos_enc = torch.tensor( [ [[0.9093, -0.4161]], [[0.8415, 0.5403]], [[0.0000, 1.0000]], [[-0.8415, 0.5403]], [[-0.9093, -0.4161]], ] ) self.assertTrue( np.allclose( pos_enc.cpu().detach().numpy(), expected_pos_enc.cpu().detach().numpy(), atol=1e-4, ) ) if __name__ == "__main__": unittest.main()
1,714
25.796875
75
py
sign-topic
sign-topic-main/tests/test_checkpoint_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os import tempfile import unittest from io import StringIO from unittest.mock import patch from omegaconf import OmegaConf from fairseq import checkpoint_utils from tests.utils import ( create_dummy_data, preprocess_translation_data, train_translation_model, ) import torch class TestCheckpointUtils(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @contextlib.contextmanager def _train_transformer(self, seed, extra_args=None): if extra_args is None: extra_args = [] with tempfile.TemporaryDirectory(f"_train_transformer_seed{seed}") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--seed", str(seed), ] + extra_args, ) yield os.path.join(data_dir, "checkpoint_last.pt") def test_load_model_ensemble_and_task(self): # with contextlib.redirect_stdout(StringIO()): with self._train_transformer(seed=123) as model1: with self._train_transformer(seed=456) as model2: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model1, model2] ) self.assertEqual(len(ensemble), 2) # after Transformer has been migrated to Hydra, this will probably # become cfg.common.seed self.assertEqual(ensemble[0].args.seed, 123) self.assertEqual(ensemble[1].args.seed, 456) # the task from the first model should be returned self.assertTrue("seed123" in task.cfg.data) # last cfg is saved self.assertEqual(cfg.common.seed, 456) def test_prune_state_dict(self): with contextlib.redirect_stdout(StringIO()): extra_args = ["--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01"] with self._train_transformer(seed=1, extra_args=extra_args) as model: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model], arg_overrides={ "encoder_layers_to_keep": "0,2", "decoder_layers_to_keep": "1", }, ) self.assertEqual(len(ensemble), 1) self.assertEqual(len(ensemble[0].encoder.layers), 2) self.assertEqual(len(ensemble[0].decoder.layers), 1) def test_torch_persistent_save_async(self): state_dict = {} filename = "async_checkpoint.pt" with patch(f"{checkpoint_utils.__name__}.PathManager.opena") as mock_opena: with patch( f"{checkpoint_utils.__name__}._torch_persistent_save" ) as mock_save: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=True ) mock_opena.assert_called_with(filename, "wb") mock_save.assert_called() def test_load_ema_from_checkpoint(self): dummy_state = {"a": torch.tensor([1]), "b": torch.tensor([0.1])} with patch(f"{checkpoint_utils.__name__}.PathManager.open") as mock_open, patch( f"{checkpoint_utils.__name__}.torch.load") as mock_load: mock_load.return_value = { "extra_state": { "ema": dummy_state } } filename = "ema_checkpoint.pt" state = checkpoint_utils.load_ema_from_checkpoint(filename) mock_open.assert_called_with(filename, "rb") mock_load.assert_called() self.assertIn("a", state["model"]) self.assertIn("b", state["model"]) self.assertTrue(torch.allclose(dummy_state["a"], state["model"]["a"])) self.assertTrue(torch.allclose(dummy_state["b"], state["model"]["b"])) if __name__ == "__main__": unittest.main()
4,744
35.221374
88
py
sign-topic
sign-topic-main/tests/test_average_checkpoints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import os import shutil import tempfile import unittest import numpy as np import torch from scripts.average_checkpoints import average_checkpoints from torch import nn class ModelWithSharedParameter(nn.Module): def __init__(self): super(ModelWithSharedParameter, self).__init__() self.embedding = nn.Embedding(1000, 200) self.FC1 = nn.Linear(200, 200) self.FC2 = nn.Linear(200, 200) # tie weight in FC2 to FC1 self.FC2.weight = nn.Parameter(self.FC1.weight) self.FC2.bias = nn.Parameter(self.FC1.bias) self.relu = nn.ReLU() def forward(self, input): return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input) class TestAverageCheckpoints(unittest.TestCase): def test_average_checkpoints(self): params_0 = collections.OrderedDict( [ ("a", torch.DoubleTensor([100.0])), ("b", torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ("c", torch.IntTensor([7, 8, 9])), ] ) params_1 = collections.OrderedDict( [ ("a", torch.DoubleTensor([1.0])), ("b", torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ("c", torch.IntTensor([2, 2, 2])), ] ) params_avg = collections.OrderedDict( [ ("a", torch.DoubleTensor([50.5])), ("b", torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), # We expect truncation for integer division ("c", torch.IntTensor([4, 5, 5])), ] ) fd_0, path_0 = tempfile.mkstemp() fd_1, path_1 = tempfile.mkstemp() torch.save(collections.OrderedDict([("model", params_0)]), path_0) torch.save(collections.OrderedDict([("model", params_1)]), path_1) output = average_checkpoints([path_0, path_1])["model"] os.close(fd_0) os.remove(path_0) os.close(fd_1) os.remove(path_1) for (k_expected, v_expected), (k_out, v_out) in zip( params_avg.items(), output.items() ): self.assertEqual( k_expected, k_out, "Key mismatch - expected {} but found {}. " "(Expected list of keys: {} vs actual list of keys: {})".format( k_expected, k_out, params_avg.keys(), output.keys() ), ) np.testing.assert_allclose( v_expected.numpy(), v_out.numpy(), err_msg="Tensor value mismatch for key {}".format(k_expected), ) def test_average_checkpoints_with_shared_parameters(self): def _construct_model_with_shared_parameters(path, value): m = ModelWithSharedParameter() nn.init.constant_(m.FC1.weight, value) torch.save({"model": m.state_dict()}, path) return m tmpdir = tempfile.mkdtemp() paths = [] path = os.path.join(tmpdir, "m1.pt") m1 = _construct_model_with_shared_parameters(path, 1.0) paths.append(path) path = os.path.join(tmpdir, "m2.pt") m2 = _construct_model_with_shared_parameters(path, 2.0) paths.append(path) path = os.path.join(tmpdir, "m3.pt") m3 = _construct_model_with_shared_parameters(path, 3.0) paths.append(path) new_model = average_checkpoints(paths) self.assertTrue( torch.equal( new_model["model"]["embedding.weight"], (m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC1.weight"], (m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC2.weight"], (m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0, ) ) shutil.rmtree(tmpdir) if __name__ == "__main__": unittest.main()
4,385
31.488889
88
py
sign-topic
sign-topic-main/tests/test_reproducibility.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import tempfile import unittest import torch from . import test_binaries class TestReproducibility(unittest.TestCase): def _test_reproducibility( self, name, extra_flags=None, delta=0.0001, resume_checkpoint="checkpoint1.pt", max_epoch=3, ): def get_last_log_stats_containing_string(log_records, search_string): for log_record in logs.records[::-1]: if isinstance(log_record.msg, str) and search_string in log_record.msg: return json.loads(log_record.msg) if extra_flags is None: extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with self.assertLogs() as logs: test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) # train epochs 1 and 2 together with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_log = get_last_log_stats_containing_string(logs.records, "train_loss") valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss") # train epoch 2, resuming from previous checkpoint 1 os.rename( os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, "checkpoint_last.pt"), ) with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_res_log = get_last_log_stats_containing_string( logs.records, "train_loss" ) valid_res_log = get_last_log_stats_containing_string( logs.records, "valid_loss" ) for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]: self.assertAlmostEqual( float(train_log[k]), float(train_res_log[k]), delta=delta ) for k in [ "valid_loss", "valid_ppl", "valid_num_updates", "valid_best_loss", ]: self.assertAlmostEqual( float(valid_log[k]), float(valid_res_log[k]), delta=delta ) def test_reproducibility(self): self._test_reproducibility("test_reproducibility") @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_fp16(self): self._test_reproducibility( "test_reproducibility_fp16", [ "--fp16", "--fp16-init-scale", "4096", ], delta=0.011, ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_memory_efficient_fp16(self): self._test_reproducibility( "test_reproducibility_memory_efficient_fp16", [ "--memory-efficient-fp16", "--fp16-init-scale", "4096", ], ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_amp(self): self._test_reproducibility( "test_reproducibility_amp", [ "--amp", "--fp16-init-scale", "4096", ], delta=0.011, ) def test_mid_epoch_reproducibility(self): self._test_reproducibility( "test_mid_epoch_reproducibility", ["--save-interval-updates", "3"], resume_checkpoint="checkpoint_1_3.pt", max_epoch=1, ) if __name__ == "__main__": unittest.main()
4,864
31.651007
88
py
sign-topic
sign-topic-main/tests/test_sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import unittest import tests.utils as test_utils import torch from fairseq.sequence_scorer import SequenceScorer class TestSequenceScorer(unittest.TestCase): def test_sequence_scorer(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) eos = d.eos() w1 = 4 w2 = 5 # construct dataloader data = [ { "source": torch.LongTensor([w1, w2, eos]), "target": torch.LongTensor([w1, w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, eos]), }, ] data_itr = test_utils.dummy_dataloader(data) # specify expected output probabilities args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.6, 0.4], # sentence 1 [0.0, unk, 0.4, 0.6], # sentence 2 [0.0, unk, 0.7, 0.3], # sentence 3 ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.2, 0.7], # sentence 1 [0.0, unk, 0.8, 0.2], # sentence 2 [0.7, unk, 0.1, 0.2], # sentence 3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [0.10, unk, 0.50, 0.4], # sentence 1 [0.15, unk, 0.15, 0.7], # sentence 2 [0.00, unk, 0.00, 0.0], # sentence 3 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 [0.9, unk, 0.05, 0.05], # sentence 1 [0.0, unk, 0.00, 0.0], # sentence 2 [0.0, unk, 0.00, 0.0], # sentence 3 ] ), ] expected_scores = [ [0.6, 0.7, 0.5, 0.9], # sentence 1 [0.6, 0.8, 0.15], # sentence 2 [0.3, 0.7], # sentence 3 ] task = test_utils.TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) scorer = SequenceScorer(task.target_dictionary) for sample in data_itr: hypos = task.inference_step(scorer, [model], sample) for id, hypos_id in zip(sample["id"].tolist(), hypos): self.assertHypoTokens(hypos_id[0], data[id]["target"]) self.assertHypoScore(hypos_id[0], expected_scores[id]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
4,150
33.305785
76
py
sign-topic
sign-topic-main/tests/test_multi_corpus_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_dataset import MultiCorpusDataset from tests.test_train import mock_dict class TestMultiCorpusDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([i for i in range(1, 5000, 2)]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([i for i in range(0, 5000, 2)]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, distribution, ): m = MultiCorpusDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), distribution=distribution, seed=0, sort_indices=True, ) m.set_epoch(1) indices = m.ordered_indices() count_sample_from_first_dataset = 0 items = set() for i in indices: item = m[i]["source"].item() if item % 2 == 1: count_sample_from_first_dataset += 1 items.add(item) sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / len(indices) ) self.assertLess( abs(sample_from_first_ds_percentage - distribution[0]), 0.01, ) self.assertEqual( len(items), int( min(len(self.dataset_1), len(indices) * distribution[0]) + min(len(self.dataset_1), len(indices) * distribution[1]) ), ) print(distribution) def test_multi_corpus_dataset(self): for distribution in [[0.5, 0.5], [0.1, 0.9], [0.9, 0.1], [0.0, 1.0]]: self._test_sample_helper(distribution=distribution)
2,587
30.180723
79
py
sign-topic
sign-topic-main/tests/test_espnet_multihead_attention.py
import torch import numpy as np import unittest from fairseq.modules import ( ESPNETMultiHeadedAttention, RelPositionMultiHeadedAttention, RotaryPositionMultiHeadedAttention, ) torch.use_deterministic_algorithms(True) class TestESPNETMultiHeadedAttention(unittest.TestCase): def setUp(self) -> None: self.T = 3 self.B = 1 self.C = 2 torch.manual_seed(0) self.sample = torch.randn(self.T, self.B, self.C) # TBC self.sample_scores = torch.randn(self.B, 1, self.T, self.T) self.MHA = ESPNETMultiHeadedAttention(self.C, 1, dropout=0) def test_forward(self): expected_scores = torch.tensor( [[[0.1713, -0.3776]], [[0.2263, -0.4486]], [[0.2243, -0.4538]]] ) scores, _ = self.MHA(self.sample, self.sample, self.sample) self.assertTrue( np.allclose( expected_scores.cpu().detach().numpy(), scores.cpu().detach().numpy(), atol=1e-4, ) ) def test_forward_qkv(self): expected_query = torch.tensor( [[[[-1.0235, 0.0409], [0.4008, 1.3077], [0.5396, 2.0698]]]] ) expected_key = torch.tensor( [[[[0.5053, -0.4965], [-0.3730, -0.9473], [-0.7019, -0.1935]]]] ) expected_val = torch.tensor( [[[[-0.9940, 0.5403], [0.5924, -0.7619], [0.7504, -1.0892]]]] ) sample_t = self.sample.transpose(0, 1) query, key, val = self.MHA.forward_qkv(sample_t, sample_t, sample_t) self.assertTrue( np.allclose( expected_query.cpu().detach().numpy(), query.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_key.cpu().detach().numpy(), key.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_val.cpu().detach().numpy(), val.cpu().detach().numpy(), atol=1e-4, ) ) def test_forward_attention(self): expected_scores = torch.tensor( [[[0.1627, -0.6249], [-0.2547, -0.6487], [-0.0711, -0.8545]]] ) scores = self.MHA.forward_attention( self.sample.transpose(0, 1).view(self.B, 1, self.T, self.C), self.sample_scores, mask=None, ) self.assertTrue( np.allclose( expected_scores.cpu().detach().numpy(), scores.cpu().detach().numpy(), atol=1e-4, ) ) class TestRelPositionMultiHeadedAttention(unittest.TestCase): def setUp(self) -> None: self.T = 3 self.B = 1 self.C = 2 torch.manual_seed(0) self.sample = torch.randn(self.T, self.B, self.C) # TBC self.sample_x = torch.randn(self.B, 1, self.T, self.T * 2 - 1) self.sample_pos = torch.randn(self.B, self.T * 2 - 1, self.C) self.MHA = RelPositionMultiHeadedAttention(self.C, 1, dropout=0) def test_rel_shift(self): expected_x = torch.tensor( [ [ [ [-0.7193, -0.4033, -0.5966], [-0.8567, 1.1006, -1.0712], [-0.5663, 0.3731, -0.8920], ] ] ] ) x = self.MHA.rel_shift(self.sample_x) self.assertTrue( np.allclose( expected_x.cpu().detach().numpy(), x.cpu().detach().numpy(), atol=1e-4, ) ) def test_forward(self): expected_scores = torch.tensor( [ [[-0.9609, -0.5020]], [[-0.9308, -0.4890]], [[-0.9473, -0.4948]], [[-0.9609, -0.5020]], [[-0.9308, -0.4890]], [[-0.9473, -0.4948]], [[-0.9609, -0.5020]], [[-0.9308, -0.4890]], [[-0.9473, -0.4948]], [[-0.9609, -0.5020]], [[-0.9308, -0.4890]], [[-0.9473, -0.4948]], [[-0.9609, -0.5020]], [[-0.9308, -0.4890]], [[-0.9473, -0.4948]], ] ) scores, _ = self.MHA(self.sample, self.sample, self.sample, self.sample_pos) self.assertTrue( np.allclose( expected_scores.cpu().detach().numpy(), scores.cpu().detach().numpy(), atol=1e-4, ) ) class TestRotaryPositionMultiHeadedAttention(unittest.TestCase): def setUp(self) -> None: self.T = 3 self.B = 1 self.C = 2 torch.manual_seed(0) self.sample = torch.randn(self.T, self.B, self.C) # TBC self.MHA = RotaryPositionMultiHeadedAttention( self.C, 1, dropout=0, precision=None ) def test_forward(self): expected_scores = torch.tensor( [[[-0.3220, -0.4726]], [[-1.2813, -0.0979]], [[-0.3138, -0.4758]]] ) scores, _ = self.MHA(self.sample, self.sample, self.sample) self.assertTrue( np.allclose( expected_scores.cpu().detach().numpy(), scores.cpu().detach().numpy(), atol=1e-4, ) ) if __name__ == "__main__": unittest.main()
5,548
30.350282
84
py
sign-topic
sign-topic-main/tests/test_memory_efficient_fp16.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import unittest import torch from fairseq.optim.adam import FairseqAdam from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestMemoryEfficientFP16(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_load_state_dict(self): # define simple FP16 model model = torch.nn.Linear(5, 5).cuda().half() params = list(model.parameters()) # initialize memory efficient FP16 optimizer # with pseudo DictConfigs optimizer = FairseqAdam( cfg=OmegaConf.create( vars( argparse.Namespace( adam_betas="(0.9, 0.999)", adam_eps=1e-8, weight_decay=0.0, lr=[0.00001], ) ) ), params=params, ) me_optimizer = MemoryEfficientFP16Optimizer( cfg=OmegaConf.create( { "common": vars( argparse.Namespace( fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4, ) ) } ), params=params, optimizer=optimizer, ) # optimizer state is created in the first step loss = model(torch.rand(5).cuda().half()).sum() me_optimizer.backward(loss) me_optimizer.step() # reload state state = me_optimizer.state_dict() me_optimizer.load_state_dict(state) for k, v in me_optimizer.optimizer.state.items(): self.assertTrue(k.dtype == torch.float16) for v_i in v.values(): if torch.is_tensor(v_i): self.assertTrue(v_i.dtype == torch.float32) if __name__ == "__main__": unittest.main()
2,452
30.050633
70
py
sign-topic
sign-topic-main/tests/test_hf_hub.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch try: import huggingface_hub except ImportError: huggingface_hub = None from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub @unittest.skipIf(not huggingface_hub, "Requires huggingface_hub install") class TestHuggingFaceHub(unittest.TestCase): @torch.no_grad() def test_hf_fastspeech2(self): hf_model_id = "facebook/fastspeech2-en-ljspeech" models, cfg, task = load_model_ensemble_and_task_from_hf_hub(hf_model_id) self.assertTrue(len(models) > 0) if __name__ == "__main__": unittest.main()
796
25.566667
81
py
sign-topic
sign-topic-main/tests/test_ema.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from copy import deepcopy from dataclasses import dataclass from typing import Optional import torch from fairseq.models.ema import EMA class DummyModule(torch.nn.Module): def __init__(self) -> None: """LightningModule for testing purposes Args: epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum validation loss for testing purposes (zero based). If None this is ignored. Defaults to None. """ super().__init__() self.layer = torch.nn.Linear(in_features=32, out_features=2) self.another_layer = torch.nn.Linear(in_features=2, out_features=2) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.layer(x) return self.another_layer(x) @dataclass class EMAConfig(object): ema_decay: float = 0.99 ema_start_update: int = 0 ema_fp32: bool = False ema_seed_model: Optional[str] = None class TestEMAGPU(unittest.TestCase): def assertTorchAllClose(self, x, y, atol=1e-8, rtol=1e-5, msg=None): diff = x.float() - y.float() diff_norm = torch.norm(diff) other_norm = torch.norm(y.float()) if msg is None: msg = "|input - other| > {} + {} * |other|".format(atol, rtol) self.assertLessEqual( diff_norm, atol + rtol * other_norm, msg=msg, ) def test_ema(self): model = DummyModule() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig() ema = EMA(model, config) # set decay ema._set_decay(config.ema_decay) self.assertEqual(ema.get_decay(), config.ema_decay) # get model self.assertEqual(ema.get_model(), ema.model) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # EMA step x = torch.randn(32) y = model(x) loss = y.sum() loss.backward() optimizer.step() ema.step(model) ema_state_dict = ema.get_model().state_dict() for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema_state_dict[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # Load EMA into model model2 = DummyModule() ema.reverse(model2) for key, param in model2.state_dict().items(): ema_param = ema_state_dict[key] self.assertTrue(torch.allclose(ema_param, param)) def test_ema_fp32(self): model = DummyModule().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=True) ema = EMA(model, config) x = torch.randn(32) y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertIn(key, ema.fp32_params) # EMA update is done in fp32, and hence the EMA param must be # closer to the EMA update done in fp32 than in fp16. self.assertLessEqual( torch.norm( ema_param.float() - ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ) .half() .float() ), torch.norm( ema_param.float() - ( config.ema_decay * prev_param + (1 - config.ema_decay) * param ).float() ), ) self.assertTorchAllClose( ema_param, ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ).half(), ) def test_ema_fp16(self): model = DummyModule().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=False) ema = EMA(model, config) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) x = torch.randn(32) y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue # EMA update is done in fp16, and hence the EMA param must be # closer to the EMA update done in fp16 than in fp32. self.assertLessEqual( torch.norm( ema_param.float() - ( config.ema_decay * prev_param + (1 - config.ema_decay) * param ).float() ), torch.norm( ema_param.float() - ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ) .half() .float() ), ) self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) if __name__ == "__main__": unittest.main()
6,676
30.200935
109
py
sign-topic
sign-topic-main/tests/test_lstm_jitable.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.lstm import LSTMModel from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitLSTMModel(unittest.TestCase): def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) def assertTensorEqual(self, t1, t2): t1 = t1[~torch.isnan(t1)] # can cause size mismatch errors if there are NaNs t2 = t2[~torch.isnan(t2)] self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def test_jit_and_export_lstm(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) scripted_model = torch.jit.script(model) self._test_save_and_load(scripted_model) def test_assert_jit_vs_nonjit_(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) model.eval() scripted_model = torch.jit.script(model) scripted_model.eval() idx = len(task.source_dictionary) iter = 100 # Inject random input and check output seq_len_tensor = torch.randint(1, 10, (iter,)) num_samples_tensor = torch.randint(1, 10, (iter,)) for i in range(iter): seq_len = seq_len_tensor[i] num_samples = num_samples_tensor[i] src_token = (torch.randint(0, idx, (num_samples, seq_len)),) src_lengths = torch.randint(1, seq_len + 1, (num_samples,)) src_lengths, _ = torch.sort(src_lengths, descending=True) # Force the first sample to have seq_len src_lengths[0] = seq_len prev_output_token = (torch.randint(0, idx, (num_samples, 1)),) result = model(src_token[0], src_lengths, prev_output_token[0], None) scripted_result = scripted_model( src_token[0], src_lengths, prev_output_token[0], None ) self.assertTensorEqual(result[0], scripted_result[0]) self.assertTensorEqual(result[1], scripted_result[1]) if __name__ == "__main__": unittest.main()
4,041
33.844828
85
py
sign-topic
sign-topic-main/tests/test_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.modules.multihead_attention import MultiheadAttention class TestMultiheadAttention(unittest.TestCase): def test_append_prev_key_padding_mask(self): bsz = 1 src_len = 4 cases = [ # no padding mask (None, None, None), # current padding mask only ( torch.tensor([[1]]).bool(), None, torch.tensor([[0, 0, 0, 1]]).bool(), ), # previous padding mask only ( None, torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 0]]).bool(), ), # both padding masks ( torch.tensor([[1]]).bool(), torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), # prev_key_padding_mask already full ( torch.tensor([[0, 1, 0, 1]]).bool(), None, torch.tensor([[0, 1, 0, 1]]).bool(), ), # key_padding_mask already full ( None, torch.tensor([[0, 1, 0, 1]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), ] for c in cases: key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( c[0], c[1], batch_size=bsz, src_len=src_len, static_kv=False, ) if key_padding_mask is not None: self.assertTrue( torch.all(torch.eq(key_padding_mask, c[2])), f"Unexpected resultant key padding mask: {key_padding_mask}" f" given current: {c[0]} and previous: {c[1]}", ) self.assertEqual(key_padding_mask.size(0), bsz) self.assertEqual(key_padding_mask.size(1), src_len) else: self.assertIsNone(c[2]) def test_pruning_heads(self): embed_dim = 768 num_heads = 12 num_heads_to_keep = 8 dummy_input = torch.randn(32, 2, embed_dim) mha = MultiheadAttention(embed_dim=embed_dim, num_heads=num_heads) reserve_head_index = mha._get_reserve_head_index(num_heads_to_keep=num_heads_to_keep) mha._adaptive_prune_heads(reserve_head_index=reserve_head_index) mha._set_skip_embed_dim_check() mha(query=dummy_input, key=dummy_input, value=dummy_input) self.assertEqual(mha.head_dim, embed_dim/num_heads) self.assertEqual(mha.num_heads, num_heads_to_keep) if __name__ == "__main__": unittest.main()
2,943
33.232558
93
py
sign-topic
sign-topic-main/tests/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import json import os import random import shutil import string import sys import typing as tp from io import StringIO import torch import torch.nn.functional as F import fairseq.distributed.utils as distributed_utils from fairseq import options, utils from fairseq.data import Dictionary from fairseq.data.language_pair_dataset import collate from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.tasks import LegacyFairseqTask from fairseq_cli import generate, interactive, preprocess, train, validate def dummy_dictionary(vocab_size, prefix="token_"): d = Dictionary() for i in range(vocab_size): token = prefix + str(i) d.add_symbol(token) d.finalize(padding_factor=1) # don't add extra padding symbols return d def dummy_dataloader( samples, padding_idx=1, eos_idx=2, batch_size=None, ): if batch_size is None: batch_size = len(samples) # add any missing data to samples for i, sample in enumerate(samples): if "id" not in sample: sample["id"] = i # create dataloader dataset = TestDataset(samples) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)), ) return iter(dataloader) def sequence_generator_setup(): # construct dummy dictionary d = dummy_dictionary(vocab_size=2) eos = d.eos() w1 = 4 w2 = 5 # construct source data src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0) [0.0, unk, 0.9, 0.1], # w2: 0.1 # sentence 2: [0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25) [0.00, unk, 0.10, 0.9], # w2: 0.3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9 [ 0.6, unk, 0.2, 0.2, ], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6) # sentence 2: [ 0.60, unk, 0.4, 0.00, ], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6) [0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0) [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0) # sentence 2: [ 0.1, unk, 0.5, 0.4, ], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1) [ 1.0, unk, 0.0, 0.0, ], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0) ] ), ] task = TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) tgt_dict = task.target_dictionary return tgt_dict, w1, w2, src_tokens, src_lengths, model def create_dummy_data( data_dir, num_examples=100, maxlen=20, alignment=False, languages=None ): def _create_dummy_data(dir, filename): data = torch.rand(num_examples * maxlen) data = 97 + torch.floor(26 * data).int() with open(os.path.join(dir, filename), "w") as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, data[offset : offset + ex_len])) print(ex_str, file=h) offset += ex_len def _create_dummy_alignment_data(filename_src, filename_tgt, filename): with open(os.path.join(data_dir, filename_src), "r") as src_f, open( os.path.join(data_dir, filename_tgt), "r" ) as tgt_f, open(os.path.join(data_dir, filename), "w") as h: for src, tgt in zip(src_f, tgt_f): src_len = len(src.split()) tgt_len = len(tgt.split()) avg_len = (src_len + tgt_len) // 2 num_alignments = random.randint(avg_len // 2, 2 * avg_len) src_indices = torch.floor(torch.rand(num_alignments) * src_len).int() tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int() ex_str = " ".join( [ "{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices) ] ) print(ex_str, file=h) files_to_write = [ "train.in", "train.out", "valid.in", "valid.out", "test.in", "test.out", ] if languages is None: # En only dummy dataset for f in files_to_write: _create_dummy_data(data_dir, f) else: for lang in languages: lang_dir = os.path.join(data_dir, lang) os.makedirs(lang_dir, exist_ok=True) for f in files_to_write: _create_dummy_data(lang_dir, f) if alignment: _create_dummy_alignment_data("train.in", "train.out", "train.align") _create_dummy_alignment_data("valid.in", "valid.out", "valid.align") _create_dummy_alignment_data("test.in", "test.out", "test.align") def preprocess_lm_data(data_dir, languages=None): preprocess_parser = options.get_preprocessing_parser() if languages is None: preprocess_args = preprocess_parser.parse_args( [ "--only-source", "--trainpref", os.path.join(data_dir, "train.out"), "--validpref", os.path.join(data_dir, "valid.out"), "--testpref", os.path.join(data_dir, "test.out"), "--destdir", data_dir, ] ) preprocess.main(preprocess_args) else: for lang in languages: lang_dir = os.path.join(data_dir, lang) assert os.path.exists(lang_dir) preprocess_args = preprocess_parser.parse_args( [ "--only-source", "--trainpref", os.path.join(lang_dir, "train.out"), "--validpref", os.path.join(lang_dir, "valid.out"), "--testpref", os.path.join(lang_dir, "test.out"), "--destdir", lang_dir, ] ) preprocess.main(preprocess_args) shutil.copyfile( os.path.join(data_dir, languages[0], "dict.txt"), os.path.join(data_dir, "dict.txt"), ) def preprocess_translation_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def preprocess_summarization_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--joined-dictionary", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def create_laser_data_and_config_json(data_dir): src_langs = ["de", "fr", "ru", "tr", "zh"] tgt_langs = ["en", "es"] config_json = {} config_train_json = [] src_vocab = None tgt_vocab = None for src_lang in src_langs: for tgt_lang in tgt_langs: langpair_folder = f"{src_lang}-{tgt_lang}" langpair_path = os.path.join(data_dir, langpair_folder) os.mkdir(langpair_path) create_dummy_data(langpair_path) preprocess_translation_data(langpair_path, ["--dataset-impl", "cached"]) src_vocab = os.path.join(langpair_path, "dict.in.txt") tgt_vocab = os.path.join(langpair_path, "dict.out.txt") config_train_json.append( { "id": 0 if tgt_lang == "en" else 1, "src": os.path.join(langpair_path, "train.in-out.in"), "tgt": os.path.join(langpair_path, "train.in-out.out"), } ) config_json["src_vocab"] = src_vocab config_json["tgt_vocab"] = tgt_vocab config_json["train"] = config_train_json with open(os.path.join(data_dir, "laserconfig.json"), "w") as config_file: json.dump(config_json, config_file) return config_file def train_translation_model( data_dir, arch, extra_flags=None, task="translation", run_validation=False, lang_flags=None, extra_valid_flags=None, world_size=1, ): if lang_flags is None: lang_flags = [ "--source-lang", "in", "--target-lang", "out", ] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--save-dir", data_dir, "--arch", arch, "--optimizer", "nag", "--lr", "0.05", "--max-tokens", "500", "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", str(world_size), "--num-workers", "0", ] + lang_flags + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) distributed_utils.call_main(cfg, train.main) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + lang_flags + (extra_valid_flags or []), ) validate.main(validate_args) def generate_main(data_dir, extra_flags=None, path=None): if extra_flags is None: extra_flags = [ "--print-alignment", ] if path is None: path = os.path.join(data_dir, "checkpoint_last.pt") generate_parser = options.get_generation_parser() generate_args = options.parse_args_and_arch( generate_parser, [ data_dir, "--path", path, "--beam", "3", "--batch-size", "64", "--max-len-b", "5", "--gen-subset", "valid", "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) # evaluate model in batch mode generate.main(generate_args) # evaluate model interactively generate_args.buffer_size = 0 generate_args.input = "-" generate_args.batch_size = None orig_stdin = sys.stdin sys.stdin = StringIO("h e l l o\n") interactive.main(generate_args) sys.stdin = orig_stdin class TestDataset(torch.utils.data.Dataset): def __init__(self, data): super().__init__() self.data = data self.sizes = None def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class TestTranslationTask(LegacyFairseqTask): def __init__(self, args, src_dict, tgt_dict, model): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict self.model = model @classmethod def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None): return cls(args, src_dict, tgt_dict, model) def build_model(self, args): return TestModel.build_model(args, self) @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.tgt_dict class TestModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestIncrementalDecoder(FairseqIncrementalDecoder): def __init__(self, args, dictionary): super().__init__(dictionary) assert hasattr(args, "beam_probs") or hasattr(args, "probs") args.max_decoder_positions = getattr(args, "max_decoder_positions", 100) self.args = args def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bbsz = prev_output_tokens.size(0) vocab = len(self.dictionary) src_len = encoder_out.encoder_out.size(1) tgt_len = prev_output_tokens.size(1) # determine number of steps if incremental_state is not None: # cache step number step = utils.get_incremental_state(self, incremental_state, "step") if step is None: step = 0 utils.set_incremental_state(self, incremental_state, "step", step + 1) steps = [step] else: steps = list(range(tgt_len)) # define output in terms of raw probs if hasattr(self.args, "probs"): assert ( self.args.probs.dim() == 3 ), "expected probs to have size bsz*steps*vocab" probs = self.args.probs.index_select(1, torch.LongTensor(steps)) else: probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_() for i, step in enumerate(steps): # args.beam_probs gives the probability for every vocab element, # starting with eos, then unknown, and then the rest of the vocab if step < len(self.args.beam_probs): probs[:, i, self.dictionary.eos() :] = self.args.beam_probs[step] else: probs[:, i, self.dictionary.eos()] = 1.0 # random attention attn = torch.rand(bbsz, tgt_len, src_len) dev = prev_output_tokens.device return probs.to(dev), {"attn": [attn.to(dev)]} def get_normalized_probs(self, net_output, log_probs, _): # the decoder returns probabilities directly probs = net_output[0] if log_probs: return probs.log() else: return probs def max_positions(self): return self.args.max_decoder_positions class TestReshapingEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): b_sz, t_sz = src_tokens.shape padding_needed = t_sz % 2 x = src_tokens if padding_needed > 0: padding_needed = 2 - padding_needed x = F.pad(x, (0, padding_needed)) return EncoderOut( encoder_out=x.view(b_sz, -1, 2), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestReshapingModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestReshapingEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestAdditionalInputEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): assert "fancy_other_input" in kwargs assert kwargs["fancy_other_input"] is not None return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestAdditionalInputModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestAdditionalInputEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out def train_language_model( data_dir, arch, extra_flags=None, run_validation=False, extra_valid_flags=None, task="language_modeling", world_size=1, ): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", str(world_size), "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) distributed_utils.call_main(cfg, train.main) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + (extra_valid_flags or []), ) validate.main(validate_args) def sizes(data): return [len(sentence) for sentence in data] POPULATION = string.ascii_letters + string.digits def make_sentence() -> tp.List[str]: length = random.randint(10, 50) return random.choices( population=POPULATION, k=length, weights=range(1, len(POPULATION) + 1) ) def make_data(length=1000, out_file=None) -> tp.List[tp.List[str]]: data = ( [make_sentence() for _ in range(0, length)] # add all the symbols at least once + [list(string.ascii_letters), list(string.digits)] ) if out_file is not None: with open(out_file, "w", encoding="utf-8") as out: for s in data: print(" ".join(s), file=out) return data
23,931
29.332066
86
py
sign-topic
sign-topic-main/tests/test_binaries.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import logging import os import random import sys import tempfile import unittest from io import StringIO from typing import Dict, List import torch from fairseq import options from fairseq_cli import eval_lm, train from tests.utils import ( create_dummy_data, create_laser_data_and_config_json, generate_main, preprocess_lm_data, preprocess_summarization_data, preprocess_translation_data, train_language_model, train_translation_model, ) try: import transformers # noqa has_hf_transformers = True except ImportError: has_hf_transformers = False class TestTranslation(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main(data_dir) def test_raw(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--dataset-impl", "raw"]) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"] ) generate_main(data_dir, ["--dataset-impl", "raw"]) def test_update_freq(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_update_freq") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"] ) generate_main(data_dir) def test_max_positions(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_max_positions") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) with self.assertRaises(Exception) as context: train_translation_model( data_dir, "fconv_iwslt_de_en", ["--max-target-positions", "5"], ) self.assertTrue( "skip this example with --skip-invalid-size-inputs-valid-test" in str(context.exception) ) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--max-target-positions", "5", "--skip-invalid-size-inputs-valid-test", ], ) with self.assertRaises(Exception) as context: generate_main(data_dir) generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"]) def test_generation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_sampling") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main( data_dir, [ "--sampling", "--temperature", "2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topk", "3", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topp", "0.2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--diversity-rate", "0.5", "--beam", "6", ], ) with self.assertRaises(ValueError): generate_main( data_dir, [ "--diverse-beam-groups", "4", "--match-source-len", ], ) generate_main(data_dir, ["--prefix-size", "2"]) generate_main(data_dir, ["--retain-dropout"]) def test_eval_bleu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--eval-bleu", "--eval-bleu-print-samples", "--eval-bleu-remove-bpe", "--eval-bleu-detok", "space", "--eval-bleu-args", '{"beam": 4, "min_len": 10}', ], ) def test_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm_wiseman_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", ], ) generate_main(data_dir) def test_lstm_bidirectional(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm", [ "--encoder-layers", "2", "--encoder-bidirectional", "--encoder-hidden-size", "16", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--decoder-layers", "2", ], ) generate_main(data_dir) def test_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], run_validation=True, ) generate_main(data_dir) def test_multilingual_transformer(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_multilingual_transformer_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch="multilingual_transformer", task="multilingual_translation", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "multilingual_translation", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) @unittest.skipIf( sys.platform.lower() == "darwin", "skip latent depth test on MacOS" ) def test_multilingual_translation_latent_depth(self): # test with latent depth in encoder, decoder, or both encoder_latent_layer = [[], ["--encoder-latent-layer"]] decoder_latent_layer = [[], ["--decoder-latent-layer"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_latent_layer)): for j in range(len(decoder_latent_layer)): if i == 0 and j == 0: continue enc_ll_flag = encoder_latent_layer[i] dec_ll_flag = decoder_latent_layer[j] with tempfile.TemporaryDirectory( f"test_multilingual_translation_latent_depth_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="latent_multilingual_transformer", task="multilingual_translation_latent_depth", extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--share-encoders", "--share-decoders", "--sparsity-weight", "0.1", ] + enc_ll_flag + dec_ll_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", ] + enc_ll_flag + dec_ll_flag, ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--task", "multilingual_translation_latent_depth", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ll_flag + dec_ll_flag, ) def test_translation_multi_simple_epoch(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_translation_multi_simple_epoch_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_no_vepoch(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_dicts(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_src_tgt_dict_spec(self): # test the specification of explicit --src-dict and --tgt-dict with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--source-dict", f"{data_dir}/dict.in.txt", "--target-dict", f"{data_dir}/dict.out.txt", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_transformer_cross_self_attention(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_cross_self_attention" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-embed-dim", "8", "--no-cross-attention", "--cross-self-attention", ], run_validation=True, ) generate_main(data_dir, extra_flags=[]) def test_transformer_pointer_generator(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_pointer_generator" ) as data_dir: create_dummy_data(data_dir) preprocess_summarization_data(data_dir) train_translation_model( data_dir, "transformer_pointer_generator", extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--alignment-layer", "-1", "--alignment-heads", "1", "--source-position-markers", "0", ], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) def test_lightconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "lightweight", "--decoder-conv-type", "lightweight", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_dynamicconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "dynamic", "--decoder-conv-type", "dynamic", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_cmlm_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "cmlm_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "0", "--iter-decode-eos-penalty", "0", "--print-step", ], ) # def test_nat_crf_transformer(self): # with contextlib.redirect_stdout(StringIO()): # with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir: # create_dummy_data(data_dir) # preprocess_translation_data(data_dir, ['--joined-dictionary']) # train_translation_model(data_dir, 'nacrf_transformer', [ # '--apply-bert-init', '--criterion', # 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', # '--length-loss-factor', '0.1', # '--word-ins-loss-factor', '0.5', # '--crf-lowrank-approx', '1', # '--crf-beam-approx', '1' # ], task='translation_lev') # generate_main(data_dir, [ # '--task', 'translation_lev', # '--iter-decode-max-iter', '0', # '--iter-decode-eos-penalty', '0', # '--print-step', # ]) def test_iterative_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_iterative_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "iterative_nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--stochastic-approx", "--dae-ratio", "0.5", "--train-step", "3", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_insertion_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "insertion_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "random_mask", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_mixture_of_experts(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_moe") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main( data_dir, [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--gen-expert", "0", ], ) def test_alignment(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", ], run_validation=True, ) generate_main(data_dir) def test_laser_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_laser_lstm") as data_dir: laser_config_file = create_laser_data_and_config_json(data_dir) train_translation_model( laser_config_file.name, "laser_lstm", [ "--user-dir", "examples/laser/laser_src", "--weighting-alpha", "0.3", "--encoder-bidirectional", "--encoder-hidden-size", "512", "--encoder-layers", "5", "--decoder-layers", "1", "--encoder-embed-dim", "320", "--decoder-embed-dim", "320", "--decoder-lang-embed-dim", "32", "--save-dir", data_dir, "--disable-validation", ], task="laser", lang_flags=[], ) def test_laser_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_laser_transformer") as data_dir: laser_config_file = create_laser_data_and_config_json(data_dir) train_translation_model( laser_config_file.name, "laser_transformer", [ "--user-dir", "examples/laser/laser_src", "--weighting-alpha", "0.3", "--encoder-embed-dim", "320", "--decoder-embed-dim", "320", "--decoder-lang-embed-dim", "32", "--save-dir", data_dir, "--disable-validation", ], task="laser", lang_flags=[], ) def test_alignment_full_context(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", "--full-context-alignment", ], run_validation=True, ) generate_main(data_dir) def test_transformer_layerdrop(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01", ], ) generate_main(data_dir) generate_main( data_dir, [ "--model-overrides", "{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}", ], ) class TestStories(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_self_att_wp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) config = [ "--encoder-layers", "[(128, 3)] * 2", "--decoder-layers", "[(128, 3)] * 2", "--decoder-attention", "True", "--encoder-attention", "False", "--gated-attention", "True", "--self-attention", "True", "--project-input", "True", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--multihead-self-attention-nheads", "2", ] train_translation_model(data_dir, "fconv_self_att_wp", config) generate_main(data_dir) # fusion model os.rename( os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "pretrained.pt"), ) config.extend( [ "--pretrained", "True", "--pretrained-checkpoint", os.path.join(data_dir, "pretrained.pt"), "--save-dir", os.path.join(data_dir, "fusion_model"), ] ) train_translation_model(data_dir, "fconv_self_att_wp", config) class TestLanguageModeling(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "fconv_lm", [ "--decoder-layers", "[(850, 3)] * 2 + [(1024,4)]", "--decoder-embed-dim", "280", "--optimizer", "nag", "--lr", "0.1", ], ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", ["--add-bos-token", "--nval", "1"], run_validation=True, ) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_normformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", [ "--add-bos-token", "--nval", "1", "--scale-fc", "--scale-heads", "--scale-attn", "--scale-fc", ], run_validation=True, ) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm_with_adaptive_softmax(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_lm_with_adaptive_softmax" ) as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", [ "--add-bos-token", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", ], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lightconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lightconv_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm_residuals(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token", "--residuals"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) @unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing") def test_transformer_xl_bptt_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) task_flags = [ "--user-dir", "examples/truncated_bptt", "--task", "truncated_bptt_lm", "--batch-size", "2", "--tokens-per-sample", "50", ] train_language_model( data_dir=data_dir, arch="transformer_xl", extra_flags=task_flags + [ "--n-layer", "2", ], task="truncated_bptt_lm", run_validation=True, extra_valid_flags=task_flags, ) eval_lm_main(data_dir, extra_flags=task_flags) # Train with activation offloading train_language_model( data_dir=data_dir, arch="transformer_xl", extra_flags=task_flags + [ "--n-layer", "2", "--offload-activations", ], task="truncated_bptt_lm", run_validation=True, extra_valid_flags=task_flags, ) class TestMaskedLanguageModel(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_legacy_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model(data_dir, "masked_lm") def test_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"] ) def test_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes) def test_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_linformer_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "linformer_roberta_base", extra_flags=[ "--user-dir", "examples/linformer/linformer_src", "--encoder-layers", "2", ], ) def test_linformer_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=["--user-dir", "examples/linformer/linformer_src"], ) def test_linformer_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def test_linformer_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model( data_dir, arch="masked_lm", extra_args=("--encoder-learned-pos",) if learned_pos_emb else (), ) with tempfile.TemporaryDirectory( "test_mlm_translation" ) as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data( translation_dir, extra_flags=["--joined-dictionary"] ) # Train transformer with data_dir/checkpoint_last.pt train_translation_model( translation_dir, arch="transformer_from_pretrained_xlm", extra_flags=[ "--decoder-layers", "1", "--decoder-embed-dim", "32", "--decoder-attention-heads", "1", "--decoder-ffn-embed-dim", "32", "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", "--pretrained-xlm-checkpoint", "{}/checkpoint_last.pt".format(data_dir), "--activation-fn", "gelu", "--max-source-positions", "500", "--max-target-positions", "500", ] + ( ["--encoder-learned-pos", "--decoder-learned-pos"] if learned_pos_emb else [] ) + (["--init-encoder-only"] if encoder_only else []), task="translation_from_pretrained_xlm", ) def test_pretrained_masked_lm_for_translation_learned_pos_emb(self): self._test_pretrained_masked_lm_for_translation(True, False) def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self): self._test_pretrained_masked_lm_for_translation(False, False) def test_pretrained_masked_lm_for_translation_encoder_only(self): self._test_pretrained_masked_lm_for_translation(True, True) def test_r4f_roberta(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=[ "--user-dir", "examples/rxf/rxf_src", "--criterion", "sentence_prediction_r3f", "--spectral-norm-classification-head", ], ) def train_legacy_masked_language_model(data_dir, arch, extra_args=()): train_parser = options.get_training_parser() # TODO: langs should be in and out right? train_args = options.parse_args_and_arch( train_parser, [ "--task", "cross_lingual_lm", data_dir, "--arch", arch, # Optimizer args "--optimizer", "adam", "--lr-scheduler", "reduce_lr_on_plateau", "--lr-shrink", "0.5", "--lr", "0.0001", "--stop-min-lr", "1e-09", # dropout, attention args "--dropout", "0.1", "--attention-dropout", "0.1", # MLM args "--criterion", "legacy_masked_lm_loss", "--masked-lm-only", "--monolingual-langs", "in,out", "--num-segment", "5", # Transformer args: use a small transformer model for fast training "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", # Other training args "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--dataset-impl", "raw", "--num-workers", "0", ] + list(extra_args), ) train.main(train_args) class TestOptimizers(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_optimizers(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_optimizers") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"] last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt") for optimizer in optimizers: if os.path.exists(last_checkpoint): os.remove(last_checkpoint) train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", optimizer, ], ) generate_main(data_dir) def read_last_log_entry( logs: List[logging.LogRecord], logger_name: str ) -> Dict[str, float]: for x in reversed(logs): if x.name == logger_name: return json.loads(x.message) raise ValueError(f"No entries from {logger_name} found in captured logs") class TestActivationCheckpointing(unittest.TestCase): base_flags = [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--restore-file", "x.pt", "--log-format", "json", "--log-interval", "1", "--max-update", "2", ] def _train(self, data_dir, extra_flags): with self.assertLogs() as logs: train_translation_model( data_dir, "transformer_iwslt_de_en", self.base_flags + extra_flags, run_validation=True, extra_valid_flags=["--log-format", "json"], ) return logs.records def test_activation_offloading_does_not_change_metrics(self): """Neither ----checkpoint-activations nor --offload-activations should change loss""" with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: with self.assertLogs(): create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) offload_logs = self._train(data_dir, ["--offload-activations"]) baseline_logs = self._train(data_dir, []) assert len(baseline_logs) == len(offload_logs) baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") offload_valid_stats = read_last_log_entry(offload_logs, "valid") baseline_train_stats = read_last_log_entry(baseline_logs, "train") offload_train_stats = read_last_log_entry(offload_logs, "train") assert ( baseline_train_stats["train_loss"] == offload_train_stats["train_loss"] ) assert ( baseline_valid_stats["valid_loss"] == offload_valid_stats["valid_loss"] ) def test_activation_checkpointing_does_not_change_metrics(self): """--checkpoint-activations should not change loss""" with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: with self.assertLogs(): create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) ckpt_logs = self._train(data_dir, ["--checkpoint-activations"]) baseline_logs = self._train(data_dir, []) assert len(baseline_logs) == len(ckpt_logs) baseline_train_stats = read_last_log_entry(baseline_logs, "train") ckpt_train_stats = read_last_log_entry(ckpt_logs, "train") assert baseline_train_stats["train_loss"] == ckpt_train_stats["train_loss"] baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") ckpt_valid_stats = read_last_log_entry(ckpt_logs, "valid") assert baseline_valid_stats["valid_loss"] == ckpt_valid_stats["valid_loss"] def create_dummy_roberta_head_data( data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False ): input_dir = "input0" def _create_dummy_data(filename): random_data = torch.rand(num_examples * maxlen) input_data = 97 + torch.floor(26 * random_data).int() if regression: output_data = torch.rand((num_examples, num_classes)) else: output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int() with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in: label_filename = filename + ".label" if regression else filename + ".out" with open(os.path.join(data_dir, "label", label_filename), "w") as f_out: offset = 0 for i in range(num_examples): # write example input ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, input_data[offset : offset + ex_len])) print(ex_str, file=f_in) # write example label if regression: class_str = " ".join(map(str, output_data[i].numpy())) print(class_str, file=f_out) else: class_str = "class{}".format(output_data[i]) print(class_str, file=f_out) offset += ex_len os.mkdir(os.path.join(data_dir, input_dir)) os.mkdir(os.path.join(data_dir, "label")) _create_dummy_data("train") _create_dummy_data("valid") _create_dummy_data("test") def train_masked_lm(data_dir, arch, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "masked_lm", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "masked_lm", "--batch-size", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "sentence_prediction", data_dir, "--arch", arch, "--encoder-layers", "2", "--num-classes", str(num_classes), "--optimizer", "adam", "--lr", "0.0001", "--criterion", "sentence_prediction", "--max-tokens", "500", "--max-positions", "500", "--batch-size", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def eval_lm_main(data_dir, extra_flags=None): eval_lm_parser = options.get_eval_lm_parser() eval_lm_args = options.parse_args_and_arch( eval_lm_parser, [ data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) eval_lm.main(eval_lm_args) if __name__ == "__main__": unittest.main()
73,338
37.417496
93
py
sign-topic
sign-topic-main/tests/test_concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.concat_dataset import ConcatDataset from tests.test_train import mock_dict class TestConcatDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def test_concat_dataset_basics(self): d = ConcatDataset([self.dataset_1, self.dataset_2]) assert len(d) == 2 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 assert d[2]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 1 assert d[2]["source"][0] == 2
1,866
30.644068
81
py
sign-topic
sign-topic-main/tests/test_activation_checkpointing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.nn as nn from fairseq.modules.checkpoint_activations import checkpoint_wrapper from torch.utils.checkpoint import checkpoint class Model(nn.Module): def __init__( self, use_pytorch_checkpoint=False, use_fairseq_checkpoint=False, **kwargs ): super().__init__() torch.manual_seed(0) self.use_pytorch_checkpoint = use_pytorch_checkpoint self.ffn = nn.Sequential( nn.Linear(32, 128), # add a Dropout layer to test RNG save/restore nn.Dropout(p=0.5), nn.Linear(128, 32), ) if use_fairseq_checkpoint: self.ffn = checkpoint_wrapper(self.ffn, **kwargs) self.out = nn.Linear(32, 1) def forward(self, x): if self.use_pytorch_checkpoint: x = checkpoint(self.ffn, x) else: x = self.ffn(x) return self.out(x) class TestActivationCheckpointing(unittest.TestCase): def _test_checkpoint_wrapper(self, device, log_memory_usage=False): def get_loss_and_gnorm(model): torch.manual_seed(1) input = torch.rand(2, 16, 32).requires_grad_(True).to(device) model.zero_grad() loss = model(input).sum() loss.backward() gnorm = torch.norm( torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]) ) return {"loss": loss, "gnorm": gnorm} model = Model().to(device) no_cpt = get_loss_and_gnorm(model) model = Model(use_pytorch_checkpoint=True).to(device) pyt_cpt = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], pyt_cpt["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], pyt_cpt["gnorm"]) model = Model(use_fairseq_checkpoint=True).to(device) fairseq_cpt = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt["gnorm"]) model = Model(use_fairseq_checkpoint=True, offload_to_cpu=True).to(device) fairseq_cpt_offload = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt_offload["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt_offload["gnorm"]) def test_checkpoint_wrapper_cpu(self): self._test_checkpoint_wrapper(device=torch.device("cpu")) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_checkpoint_wrapper_cuda(self): self._test_checkpoint_wrapper(device=torch.device("cuda")) if __name__ == "__main__": unittest.main()
2,904
35.3125
86
py
sign-topic
sign-topic-main/tests/test_noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import Dict, List import torch import tests.utils as test_utils from fairseq import utils from fairseq.data import ( Dictionary, LanguagePairDataset, TransformEosDataset, data_utils, noising, ) class TestDataNoising(unittest.TestCase): def _get_test_data_with_bpe_cont_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with continuation markers as suffixes to denote non-end of word tokens. This is the standard BPE format used in fairseq's preprocessing. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he@@") vocab.add_symbol("llo") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("y@@") vocab.add_symbol("ou") vocab.add_symbol("n@@") vocab.add_symbol("ew") vocab.add_symbol("or@@") vocab.add_symbol("k") src_tokens = [ ["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"], ["how", "are", "y@@", "ou"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_bpe_end_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with end-of-word markers as suffixes to denote tokens at the end of a word. This is an alternative to fairseq's standard preprocessing framework and is not generally supported within fairseq. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he") vocab.add_symbol("llo_EOW") vocab.add_symbol("how_EOW") vocab.add_symbol("are_EOW") vocab.add_symbol("y") vocab.add_symbol("ou_EOW") vocab.add_symbol("n") vocab.add_symbol("ew_EOW") vocab.add_symbol("or") vocab.add_symbol("k_EOW") src_tokens = [ ["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"], ["how_EOW", "are_EOW", "y", "ou_EOW"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_word_vocab(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: word vocab x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("you") vocab.add_symbol("new") vocab.add_symbol("york") src_tokens = [ ["hello", "new", "york", "you"], ["how", "are", "you", "new", "york"], ] x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _convert_src_tokens_to_tensor( self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool ): src_len = [len(x) for x in src_tokens] # If we have to append EOS, we include EOS in counting src length if append_eos: src_len = [length + 1 for length in src_len] x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad()) for i in range(len(src_tokens)): for j in range(len(src_tokens[i])): x[i][j] = vocab.index(src_tokens[i][j]) if append_eos: x[i][j + 1] = vocab.eos() x = x.transpose(1, 0) return x, torch.LongTensor(src_len) def assert_eos_at_end(self, x, x_len, eos): """Asserts last token of every sentence in x is EOS""" for i in range(len(x_len)): self.assertEqual( x[x_len[i] - 1][i], eos, ( "Expected eos (token id {eos}) at the end of sentence {i} " "but got {other} instead" ).format(i=i, eos=eos, other=x[i][-1]), ) def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised): # Expect only the first word (2 bpe tokens) of the first example # was dropped out self.assertEqual(x_len[0] - 2, l_noised[0]) for i in range(l_noised[0]): self.assertEqual(x_noised[i][0], x[i + 2][0]) def test_word_dropout_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk): # Expect only the first word (2 bpe tokens) of the first example # was blanked out self.assertEqual(x_len[0], l_noised[0]) for i in range(l_noised[0]): if i < 2: self.assertEqual(x_noised[i][0], unk) else: self.assertEqual(x_noised[i][0], x[i][0]) def test_word_blank_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def generate_unchanged_shuffle_map(self, length): return {i: i for i in range(length)} def assert_word_shuffle_matches_expected( self, x, x_len, max_shuffle_distance: int, vocab: Dictionary, expected_shufle_maps: List[Dict[int, int]], expect_eos_at_end: bool, bpe_end_marker=None, ): """ This verifies that with a given x, x_len, max_shuffle_distance, and vocab, we get the expected shuffle result. Args: x: Tensor of shape (T x B) = (sequence_length, batch_size) x_len: Tensor of length B = batch_size max_shuffle_distance: arg to pass to noising expected_shuffle_maps: List[mapping] where mapping is a Dict[old_index, new_index], mapping x's elements from their old positions in x to their new positions in x. expect_eos_at_end: if True, check the output to make sure there is an EOS at the end. bpe_end_marker: str denoting the BPE end token. If this is not None, we set the BPE cont token to None in the noising classes. """ bpe_cont_marker = None if bpe_end_marker is None: bpe_cont_marker = "@@" with data_utils.numpy_seed(1234): word_shuffle = noising.WordShuffle( vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker ) x_noised, l_noised = word_shuffle.noising( x, x_len, max_shuffle_distance=max_shuffle_distance ) # For every example, we have a different expected shuffle map. We check # that each example is shuffled as expected according to each # corresponding shuffle map. for i in range(len(expected_shufle_maps)): shuffle_map = expected_shufle_maps[i] for k, v in shuffle_map.items(): self.assertEqual(x[k][i], x_noised[v][i]) # Shuffling should not affect the length of each example for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised): self.assertEqual(pre_shuffle_length, post_shuffle_length) if expect_eos_at_end: self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_shuffle_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=True, ) def test_word_shuffle_with_eos_nonbpe(self): """The purpose of this is to test shuffling logic with word vocabs""" vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ {0: 0, 1: 1, 2: 3, 3: 2}, {0: 0, 1: 2, 2: 1, 3: 3, 4: 4}, ], expect_eos_at_end=True, ) def test_word_shuffle_without_eos(self): """Same result as word shuffle with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, ) def test_word_shuffle_without_eos_with_bpe_end_marker(self): """Same result as word shuffle without eos except using BPE end token""" vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) def assert_no_eos_at_end(self, x, x_len, eos): """Asserts that the last token of each sentence in x is not EOS""" for i in range(len(x_len)): self.assertNotEqual( x[x_len[i] - 1][i], eos, "Expected no eos (token id {eos}) at the end of sentence {i}.".format( eos=eos, i=i ), ) def test_word_dropout_without_eos(self): """Same result as word dropout with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_blank_without_eos(self): """Same result as word blank with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def _get_noising_dataset_batch( self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False, ): """ Constructs a NoisingDataset and the corresponding ``LanguagePairDataset(NoisingDataset(src), src)``. If *append_eos_to_tgt* is True, wrap the source dataset in :class:`TransformEosDataset` to append EOS to the clean source when using it as the target. """ src_dataset = test_utils.TestDataset(data=src_tokens_no_pad) noising_dataset = noising.NoisingDataset( src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising, ) tgt = src_dataset language_pair_dataset = LanguagePairDataset( src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict ) language_pair_dataset = TransformEosDataset( language_pair_dataset, src_dict.eos(), append_eos_to_tgt=append_eos_to_tgt, ) dataloader = torch.utils.data.DataLoader( dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater, ) denoising_batch_result = next(iter(dataloader)) return denoising_batch_result def test_noising_dataset_with_eos(self): src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=True ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_noising_dataset_without_eos(self): """ Similar to test noising dataset with eos except that we have to set *append_eos_to_tgt* to ``True``. """ src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=False ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict, append_eos_to_tgt=True, ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
19,814
36.246241
87
py
sign-topic
sign-topic-main/tests/test_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import List import torch from fairseq.token_generation_constraints import ( ConstraintNode, OrderedConstraintState, UnorderedConstraintState, pack_constraints, ) def tensorize(constraints: List[List[int]]) -> torch.Tensor: return [torch.tensor(x) for x in constraints] class TestHelperRoutines(unittest.TestCase): def setUp(self): self.examples = [ ([[]], torch.tensor([[0]])), ([[], []], torch.tensor([[0], [0]])), ([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])), ( [ [ torch.tensor([3, 1, 2]), torch.tensor([3]), torch.tensor([4, 5, 6, 7]), ], [], [torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])], ], torch.tensor( [ [3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0], ] ), ), ] def test_packing(self): """Ensures the list of lists of tensors gets packed correctly.""" for batch_constraints, expected_tensor in self.examples: packed = pack_constraints(batch_constraints) assert torch.equal(packed, expected_tensor) class TestUnorderedConstraintState(unittest.TestCase): def setUp(self): # Tuples of (contraint set, expected printed graph, token counts per node) self.examples = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", # noqa {1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1}, ), ([], "[None].False#0", {}), (tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}), ( tensorize([[100000, 1, 2, 3, 4, 5]]), "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", {100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, ), ( tensorize([[1, 2], [1, 2]]), "([None].False#2 ([1].False#2 [2].True#2))", {1: 2, 2: 2}, ), ( tensorize([[1, 2], [3, 4]]), "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", {1: 1, 2: 1, 3: 1, 4: 1}, ), ] self.sequences = [ ( self.examples[0][0], [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 2, 94], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 3, 999, 1, 4], {"bank": 4, "num_completed": 2, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 3, 999, 1, 4, 999], {"bank": 4, "num_completed": 2, "finished": False, "is_root": True}, ), ( self.examples[0][0], [4, 5, 6, 8], {"bank": 2, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( self.examples[0][0], [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": True}, ), ( tensorize([[1], [2, 3]]), # Should not be able to get credit for entering 1 a second time [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[4][0], [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( self.examples[4][0], [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ( self.examples[5][0], [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ] def test_graphs(self): """ Test whether unordered graph systems are created correctly. """ for example in self.examples: constraints, expected, gold_counts = example c = ConstraintNode.create(constraints) assert ( ConstraintNode.print_graph(c) == expected ), f"got {ConstraintNode.print_graph(c)}, expected {expected}" assert ( c.token_counts() == gold_counts ), f"{c} got {c.token_counts()} wanted {gold_counts}" def test_next_tokens(self): """ Tests that the set of next tokens is correct. """ for example in self.examples: constraints, expected, gold_counts = example root = ConstraintNode.create(constraints) root_tokens = set(root.children.keys()) for sequence in constraints: state = UnorderedConstraintState(root) for token in sequence: all_tokens = root_tokens.union(state.node.children.keys()) assert ( all_tokens == state.next_tokens() ), f"ALL {all_tokens} NEXT {state.next_tokens()}" state = state.advance(token) def test_sequences(self): for constraints, tokens, expected in self.sequences: state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" class TestOrderedConstraintState(unittest.TestCase): def setUp(self): self.sequences = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 94], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 3, 999, 1, 4], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 999, 999], {"bank": 3, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 77, 1, 3, 1], {"bank": 6, "num_completed": 2, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1], [2, 3]]), [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [3, 4]]), [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ] def test_sequences(self): for i, (constraints, tokens, expected) in enumerate(self.sequences): state = OrderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" if __name__ == "__main__": unittest.main()
10,612
37.452899
155
py
sign-topic
sign-topic-main/tests/test_sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class TestSparseMultiheadAttention(unittest.TestCase): def test_sparse_multihead_attention(self): attn_weights = torch.randn(1, 8, 8) bidirectional_sparse_mask = torch.tensor( [ [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], ] ) bidirectional_attention = SparseMultiheadAttention( 16, 1, stride=4, expressivity=1, is_bidirectional=True ) bidirectional_attention_sparse_mask = ( bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8) ) torch.all( torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask) ) sparse_mask = torch.tensor( [ [ 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf")], [ float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, float("-inf"), float("-inf"), ], [ float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, float("-inf"), ], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], ] ) attention = SparseMultiheadAttention( 16, 1, stride=4, expressivity=1, is_bidirectional=False ) attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8) torch.all(torch.eq(attention_sparse_mask, sparse_mask)) if __name__ == "__main__": unittest.main()
3,738
31.513043
84
py
sign-topic
sign-topic-main/tests/test_export.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.modules import multihead_attention, sinusoidal_positional_embedding from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ Return a dummy task and argument parser, which can be used to create a model/criterion. """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def _test_save_and_load(scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestExportModels(unittest.TestCase): def test_export_multihead_attention(self): module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) scripted = torch.jit.script(module) _test_save_and_load(scripted) def test_incremental_state_multihead_attention(self): module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module1 = torch.jit.script(module1) module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module2 = torch.jit.script(module2) state = {} state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])}) state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])}) v1 = module1.get_incremental_state(state, "key")["a"] v2 = module2.get_incremental_state(state, "key")["a"] self.assertEqual(v1, 1) self.assertEqual(v2, 2) def test_positional_embedding(self): module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding( embedding_dim=8, padding_idx=1 ) scripted = torch.jit.script(module) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer_no_token_pos_emb(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) args.no_token_positional_embeddings = True model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) if __name__ == "__main__": unittest.main()
4,002
32.082645
86
py
sign-topic
sign-topic-main/tests/test_roberta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import unittest from typing import Any, Dict, Sequence import fairseq import fairseq.options import fairseq.tasks import torch from tests.utils import dummy_dictionary VOCAB_SIZE = 100 @fairseq.tasks.register_task("fake_task") class FakeTask(fairseq.tasks.LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = dummy_dictionary(VOCAB_SIZE - 4) assert len(self.dictionary) == VOCAB_SIZE @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary @functools.lru_cache() def get_toy_model( device: str, architecture: str = "roberta_enc_dec", **extra_args: Any, ): assert device in ("gpu", "cpu") kwargs = { "arch": architecture, # Use characteristics dimensions "encoder_layers": 3, "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "encoder_attention_heads": 4, "decoder_layers": 3, "decoder_embed_dim": 12, "decoder_ffn_embed_dim": 14, "decoder_attention_heads": 4, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, # required args "tokens_per_sample": 256, "data": "/tmp/test_roberta", } kwargs.update(extra_args) fake_task = FakeTask(kwargs) args = fairseq.options.get_args( task="online_backtranslation", mono_langs="en,ro", valid_lang_pairs="en-ro", **kwargs, ) torch.manual_seed(0) model = fake_task.build_model(args) if device == "gpu": model.cuda() return fake_task, model def mk_sample( lang: str, device: str, tok: Sequence[int] = None, batch_size: int = 2 ) -> Dict[str, Any]: assert device in ("gpu", "cpu") if not tok: if lang == "en": tok = [10, 11, 12, 13, 14, 15, 2] else: tok = [20, 21, 22, 23, 24, 25, 26, 27, 2] batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size) if device == "gpu": batch = batch.cuda() sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor( [len(tok)] * batch_size, dtype=torch.long, device=batch.device ), }, "target": batch[:, 1:], } return sample def cpu_gpu(fn): def helper(self): fn(self, "cpu") if torch.cuda.is_available(): fn(self, "gpu") return helper def architectures(fn): def helper(self): for arch in ["roberta_enc_dec", "transformer"]: fn(self, arch) return helper class RobertaTest(unittest.TestCase): def assertTensorEqual(self, t1, t2, delta: float = 1e-6): self.assertEqual(t1.size(), t2.size(), "size mismatch") if delta == 0.0: self.assertEqual(t1.ne(t2).long().sum(), 0) else: self.assertEqual(((t2 - t1).abs() > delta).long().sum(), 0) def assertSharing(self, model, link_groups: Sequence[Sequence[str]]): ids = {} for group in link_groups: group_ids = {name: id(params(model, name)) for name in group} shared_id = group_ids[group[0]] self.assertEqual(group_ids, {name: shared_id for name in group}) self.assertNotIn(shared_id, ids) ids[shared_id] = group def test_roberta_shared_params(self): _, roberta = get_toy_model("cpu", architecture="roberta") self.assertSharing( roberta, [ [ "encoder.sentence_encoder.embed_tokens.weight", "encoder.lm_head.weight", ] ], ) _, roberta = get_toy_model( "cpu", architecture="roberta", untie_weights_roberta=True ) self.assertSharing( roberta, [ ["encoder.sentence_encoder.embed_tokens.weight"], ["encoder.lm_head.weight"], ], ) def test_roberta_enc_dec_shared_params(self): # 3 distinct embeddings _, enc_dec = get_toy_model("cpu", architecture="roberta_enc_dec") self.assertSharing( enc_dec, [ ["encoder.embed_tokens.weight"], ["decoder.embed_tokens.weight"], ["decoder.output_projection.weight"], ], ) # 2 distinct embeddings, one for encoder, one for decoder _, enc_dec = get_toy_model( "cpu", architecture="roberta_enc_dec", share_decoder_input_output_embed=True ) self.assertSharing( enc_dec, [ ["encoder.embed_tokens.weight"], [ "decoder.embed_tokens.weight", "decoder.output_projection.weight", ], ], ) # shared embeddings _, enc_dec = get_toy_model( "cpu", architecture="roberta_enc_dec", share_all_embeddings=True ) self.assertSharing( enc_dec, [ [ "encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "decoder.output_projection.weight", ] ], ) def test_roberta_max_positions_is_correctly_set(self): device = "cpu" task, model = get_toy_model(device) max_pos = model.max_decoder_positions() self.assertEqual(max_pos, 256) self.assertEqual(max_pos, model.decoder.max_positions()) self.assertEqual(max_pos, model.encoder.max_positions()) self.assertEqual(max_pos, model.encoder.embed_positions.max_positions) sentence = [31 for _ in range(max_pos)] sample = mk_sample("en", device, sentence, batch_size=1) self.assertEqual(list(sample["net_input"]["src_lengths"]), [max_pos]) self.assertEqual(len(sample["net_input"]["src_tokens"][0]), max_pos) x, _ = model.forward(**sample["net_input"]) self.assertEqual(x.shape, (1, max_pos, VOCAB_SIZE)) @cpu_gpu def test_roberta_forward_backward(self, device: str): _, model = get_toy_model(device) sample = mk_sample("en", device) en_tokens = sample["net_input"]["src_tokens"] (bs, l) = en_tokens.shape # Forward logits, _ = model(**sample["net_input"]) self.assertEqual(logits.shape, (bs, l, VOCAB_SIZE)) # Backward loss = logits.sum() loss.backward() @cpu_gpu def test_roberta_forward_backward_bs1(self, device: str): _, model = get_toy_model(device) sample = mk_sample("en", device, batch_size=1) o, _ = model.forward(**sample["net_input"]) loss = o.sum() sample2 = mk_sample("ro", device, batch_size=1) o, _ = model.forward(**sample2["net_input"]) loss += o.sum() loss.backward() @cpu_gpu def test_roberta_batching(self, device: str): """ Checks that the batch of size 2 give twice the same results than the batch of size 1. """ _, model = get_toy_model(device) sample = mk_sample("en", device, batch_size=1) slen = sample["net_input"]["src_lengths"][0] sample2 = mk_sample("en", device, batch_size=2) with torch.no_grad(): z = model.encoder.forward( sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"] ) z = z["encoder_out"][-1] logits, _ = model.forward(**sample["net_input"]) z2 = model.encoder.forward( sample2["net_input"]["src_tokens"], sample["net_input"]["src_lengths"] ) z2 = z2["encoder_out"][-1] logits2, _ = model.forward(**sample2["net_input"]) self.assertEqual(z.shape, (slen, 1, 12)) self.assertEqual(z2.shape, (slen, 2, 12)) self.assertTensorEqual(logits2[0], logits2[1]) self.assertTensorEqual(logits[0], logits2[0]) @cpu_gpu def test_roberta_incremental_decoder(self, device: str): """ Checks that incremental decoding yields the same result than non incremental one. """ task, model = get_toy_model(device) en_sample = mk_sample("en", device) en_tokens = en_sample["net_input"]["src_tokens"] ro_sample = mk_sample("ro", device) ro_tokens = ro_sample["net_input"]["src_tokens"] en_enc = model.encoder.forward( en_tokens, src_lengths=en_sample["net_input"]["src_lengths"] ) (bs, tgt_len) = ro_tokens.shape # Decode without incremental state ro_dec, _ = model.decoder.forward(ro_tokens, encoder_out=en_enc) self.assertEqual(ro_dec.shape, (bs, tgt_len, VOCAB_SIZE)) self.assertTensorEqual(ro_dec[0], ro_dec[1]) # Decode with incremental state inc_state = {} ro_dec_inc = [] for i in range(tgt_len): ro, _ = model.decoder.forward( ro_tokens[:, : i + 1], encoder_out=en_enc, incremental_state=inc_state ) self.assertEqual(ro.shape, (bs, 1, VOCAB_SIZE)) ro_dec_inc.append(ro) for i in range(tgt_len): # Intra-batch self.assertTensorEqual(ro_dec_inc[i][0], ro_dec_inc[i][1]) # Incremental vs non-incremental self.assertTensorEqual(ro_dec_inc[i][:, 0], ro_dec[:, i]) @cpu_gpu def test_regularize_for_adaprune_in_roberta(self, device: str): _, model = get_toy_model( device=device, architecture="roberta_base", mha_reg_scale_factor=0.000375, ffn_reg_scale_factor=0.000375, ) sample = mk_sample("en", device, batch_size=1) task_loss, _ = model.forward(**sample["net_input"]) head_loss = model._get_adaptive_head_loss() ffn_loss = model._get_adaptive_ffn_loss() loss = task_loss.sum() + head_loss + ffn_loss loss.backward() @cpu_gpu def test_ffn_prune_for_adaprune_in_roberta(self, device: str): _, model = get_toy_model( device=device, architecture="roberta_base", ) sample = mk_sample("en", device, batch_size=1) for layer in model.encoder.sentence_encoder.layers: fc1_original_size = layer.fc1.out_features remove_index = layer._get_fc_rank(remove_num=2) layer._prune_fc_layer(remove_index=remove_index) self.assertEqual(layer.fc1.out_features, fc1_original_size - 2) task_loss, _ = model.forward(**sample["net_input"]) def params(model, name): if "." not in name: return getattr(model, name) prefix, name = name.split(".", 1) return params(getattr(model, prefix), name)
11,291
31.730435
93
py
sign-topic
sign-topic-main/tests/test_online_backtranslation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import tempfile import unittest from pathlib import Path from typing import Any, Dict, Sequence import fairseq.data.indexed_dataset as indexed_dataset import fairseq.options import fairseq.tasks.online_backtranslation as obt import torch from tests import utils def mk_sample(tokens: Sequence[int], batch_size: int = 2) -> Dict[str, Any]: batch = torch.stack([torch.tensor(tokens, dtype=torch.long)] * batch_size) sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor([len(tokens)] * batch_size, dtype=torch.long), }, "target": batch[:, 1:], } return sample def mk_dataset(num_samples: int, max_len: int, output: Path): output.parent.mkdir(exist_ok=True) idx = indexed_dataset.IndexedDatasetBuilder(str(output)) data = torch.randint(5, 100, (num_samples, max_len)) lengths = torch.randint(3, max_len, (num_samples,)) for d, l in zip(data, lengths): d[0] = 0 idx.add_item(d[:l]) idx.finalize(output.with_suffix(".idx")) assert output.exists() assert output.with_suffix(".idx").exists() class OnlineBacktranslationTest(unittest.TestCase): tmp_dir = Path(tempfile.mkdtemp(suffix="OnlineBacktranslationTest")) @classmethod def obt_task( cls, languages: Sequence[str], data: Path = None, language_mapping: str = None ): dict_path = cls.tmp_dir / "dict.txt" if not dict_path.exists(): dictionary = utils.dummy_dictionary(100) dictionary.save(str(dict_path)) if data is not None: (data / "dict.txt").write_text(dict_path.read_text()) else: data = cls.tmp_dir assert len(languages) >= 2 kwargs = { "arch": "transformer", # --max-sentences=1 for better predictability of batches "max_sentences": 1, # Use characteristics dimensions "encoder_layers": 3, "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "encoder_attention_heads": 4, "decoder_layers": 3, "decoder_embed_dim": 12, "decoder_output_dim": 12, "decoder_ffn_embed_dim": 14, "decoder_attention_heads": 4, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } args = fairseq.options.get_args( data, task="online_backtranslation", mono_langs=",".join(languages), valid_lang_pairs=f"{languages[0]}-{languages[1]}", tokens_per_sample=256, language_mapping=language_mapping, **kwargs, ) task = obt.OnlineBackTranslationTask.setup_task(args) # we need to build the model to have the correct dictionary model = task.build_model(task.args) return task, model def tmp_path(self, test_case: str) -> Path: return Path(tempfile.mkdtemp(test_case, dir=self.tmp_dir)) def test_lang_tokens(self): task, model = self.obt_task(["en", "ro", "zh"]) assert obt._lang_token("en") in task.dictionary assert obt._lang_token("ro") in task.dictionary assert obt._lang_token("zh") in task.dictionary en_bos = obt._lang_token_index(task.common_dict, "en") assert "en" == task.common_dict[en_bos].strip("_") zh_bos = obt._lang_token_index(task.common_dict, "zh") assert "zh" == task.common_dict[zh_bos].strip("_") zh_sample = mk_sample([zh_bos, 16, 14, 12, 10]) # we expect to receive the bos token for translation assert task.get_bos_token_from_sample(zh_sample) == en_bos def test_backtranslate_sample(self): task, model = self.obt_task(["en", "ro", "zh"]) en_bos = obt._lang_token_index(task.common_dict, "en") zh_bos = obt._lang_token_index(task.common_dict, "zh") sample = mk_sample([zh_bos, 16, 14, 12, 10]) task.backtranslate_sample(sample, "zh", "en") target_zh = list(sample["target"][0]) assert target_zh == [16, 14, 12, 10] # original zh sentence generated_en = sample["net_input"]["src_tokens"][0] assert generated_en[0] == en_bos def test_train_dataset(self): data = self.tmp_path("test_train_dataset") mk_dataset(20, 10, data / "en" / "train.bin") mk_dataset(10, 10, data / "zh" / "train.bin") task, model = self.obt_task(["en", "zh"], data) task.load_dataset("train") en_bos = obt._lang_token_index(task.common_dict, "en") zh_bos = obt._lang_token_index(task.common_dict, "zh") train = task.datasets["train"] train.ordered_indices() train.prefetch([0, 19]) sample_0 = train[0] sample_19 = train[19] self.assertEqual( set(sample_0.keys()), {"en-BT", "en-DENOISE", "zh-BT", "zh-DENOISE"} ) for sample in (sample_0, sample_19): self.assertEqual(sample["en-BT"]["source"][0], en_bos) # bt target isn't ready to look at. self.assertEqual(sample["en-DENOISE"]["source"][0], en_bos) # TODO What could we check on the target side ? for i in range(10): # Zh dataset is shorter, and is wrapped around En dataset. train.prefetch([i, i + 10]) self.assertEqual( list(train[i]["zh-DENOISE"]["source"]), list(train[i + 10]["zh-DENOISE"]["source"]), ) self.assertEqual(train[i]["zh-DENOISE"]["source"][0].item(), zh_bos) # Sorted by increasing len self.assertLess( len(sample_0["en-BT"]["source"]), len(sample_19["en-BT"]["source"]) ) def test_valid_dataset(self): data = self.tmp_path("test_valid_dataset") mk_dataset(10, 21, data / "valid.en-zh.en.bin") mk_dataset(10, 21, data / "valid.en-zh.zh.bin") task, model = self.obt_task(["en", "zh"], data) valid = task.load_dataset("valid") en_bos = obt._lang_token_index(task.common_dict, "en") assert valid is not None valid.prefetch(range(10)) sample_0 = valid[0] sample_9 = valid[9] self.assertEqual(sample_0["id"], 0) self.assertEqual(sample_9["id"], 9) self.assertEqual(sample_0["source"][0], en_bos) self.assertEqual(sample_9["source"][0], en_bos) # TODO: could we test the target side ? def assertFnMatch(self, fn, values): for x, y in values.items(): fn_x = fn(x) self.assertEqual(fn_x, y, f"Fn has wrong value: fn({x}) = {fn_x} != {y}") def test_piecewise_linear_fn(self): self.assertFnMatch( obt.PiecewiseLinearFn.from_string("1.0"), {0: 1, 100: 1, 500: 1, 1000: 1} ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:1,1000:0"), {0: 1, 500: 0.5, 1000: 0, 2000: 0}, ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:0,1000:1"), {0: 0, 500: 0.5, 1000: 1, 2000: 1}, ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:0,1000:1,2000:0"), {0: 0, 500: 0.5, 1000: 1, 1500: 0.5, 2000: 0, 3000: 0}, )
7,650
35.961353
86
py
sign-topic
sign-topic-main/tests/test_backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import ( BacktranslationDataset, LanguagePairDataset, TransformEosDataset, ) from fairseq.sequence_generator import SequenceGenerator class TestBacktranslationDataset(unittest.TestCase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model, ) = test_utils.sequence_generator_setup() dummy_src_samples = self.src_tokens self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) self.cuda = torch.cuda.is_available() def _backtranslation_dataset_helper( self, remove_eos_from_input_src, remove_eos_from_output_src, ): tgt_dataset = LanguagePairDataset( src=self.tgt_dataset, src_sizes=self.tgt_dataset.sizes, src_dict=self.tgt_dict, tgt=None, tgt_sizes=None, tgt_dict=None, ) generator = SequenceGenerator( [self.model], tgt_dict=self.tgt_dict, max_len_a=0, max_len_b=200, beam_size=2, unk_penalty=0, ) backtranslation_dataset = BacktranslationDataset( tgt_dataset=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # remove eos from the input src remove_eos_from_src=remove_eos_from_input_src, ), src_dict=self.tgt_dict, backtranslation_fn=( lambda sample: generator.generate([self.model], sample) ), output_collater=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # if we remove eos from the input src, then we need to add it # back to the output tgt append_eos_to_tgt=remove_eos_from_input_src, remove_eos_from_src=remove_eos_from_output_src, ).collater, cuda=self.cuda, ) dataloader = torch.utils.data.DataLoader( backtranslation_dataset, batch_size=2, collate_fn=backtranslation_dataset.collater, ) backtranslation_batch_result = next(iter(dataloader)) eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 # Note that we sort by src_lengths and add left padding, so actually # ids will look like: [1, 0] expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) if remove_eos_from_output_src: expected_src = expected_src[:, :-1] expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) generated_src = backtranslation_batch_result["net_input"]["src_tokens"] tgt_tokens = backtranslation_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_backtranslation_dataset_no_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=True, ) def test_backtranslation_dataset_with_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=False, ) def test_backtranslation_dataset_no_eos_in_input_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=True, remove_eos_from_output_src=False, ) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
4,140
32.395161
85
py
sign-topic
sign-topic-main/tests/test_fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import logging import unittest import torch from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestGradientScaling(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda().half() self.params = list(self.model.parameters()) self.cfg_dls = OmegaConf.create( { "optimization": { "lr": [0.1], }, "optimizer": { "_name": "adam", "lr": [0.1], "adam_betas": "(0.9, 0.999)", "adam_eps": 1e-8, "weight_decay": 0.0, }, "common": { "fp16_init_scale": 1, "fp16_scale_window": 1, "fp16_scale_tolerance": 1, "threshold_loss_scale": 1, "min_loss_scale": 1e-4, "tpu": False, }, } ) logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def run_iter(self, model, params, optimizer): optimizer.zero_grad() y = model(self.x) loss = self.loss_fn(y, self.target) optimizer.backward(loss) self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16)) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) optimizer.step() self.assertEqual( model.weight, torch.tensor( [[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual( model.bias, torch.tensor( [5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual(optimizer.scaler.loss_scale, 2.0) def test_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) self.assertTrue( all( torch.all( fp32_params.eq( torch.tensor( [3.1000, 5.1000], device="cuda:0", requires_grad=True ) ) ) for fp32_params in optimizer.fp32_params.values() ) ) def test_memory_efficient(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) if __name__ == "__main__": unittest.main()
3,571
30.892857
87
py
sign-topic
sign-topic-main/tests/test_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import math import tempfile import unittest import numpy as np import torch import tests.utils as test_utils from fairseq import search from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.ngram_repeat_block import NGramRepeatBlock from fairseq.sequence_generator import EnsembleModel, SequenceGenerator from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), n=1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitSequenceGeneratorBase(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() eos = self.task.tgt_dict.eos() src_tokens = torch.randint(3, 50, (2, 10)).long() src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1) src_lengths = torch.LongTensor([2, 10]) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } TransformerModel.add_args(self.parser) args = self.parser.parse_args([]) args.encoder_layers = 2 args.decoder_layers = 1 self.transformer_model = TransformerModel.build_model(args, self.task) def assertOutputEqual(self, hypo, pos_probs): pos_scores = torch.FloatTensor(pos_probs).log() self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores) self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel()) def assertTensorSizeEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def assertHypoEqual(self, h1, h2): "Check two hypos are equal" self.assertTensorEqual(h1["tokens"], h2["tokens"]) self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"]) self.assertLess(abs(h1["score"] - h2["score"]), 1e-6) self.assertAlmostEqual(h1["attention"], h2["attention"]) def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) JIT_MSG = "Targeting OSS scriptability for the 1.6 release" @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) class TestJitSequenceGenerator(TestJitSequenceGeneratorBase): def test_export_transformer(self): model = self.transformer_model torch.jit.script(model) def test_ensemble_sequence_generator(self): model = self.transformer_model generator = SequenceGenerator( [model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2, max_len_b=10, ) scripted_model = torch.jit.script(generator) self._test_save_and_load(scripted_model) def test_export_ensemble_model(self): model = self.transformer_model ensemble_models = EnsembleModel([model]) torch.jit.script(ensemble_models) class TestExportSearch(unittest.TestCase): def setUp(self): task, _ = get_dummy_task_and_parser() self.tgt_dict = task.tgt_dict self.min_top1_prob = 0.4 def test_export_diverse_bs(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) torch.jit.script(search_strategy) def test_export_sampling(self): low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) torch.jit.script(search_strategy) def test_export_diverse_siblings_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) torch.jit.script(search_strategy) class TestSequenceGeneratorBase(unittest.TestCase): def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) class TestSequenceGenerator(TestSequenceGeneratorBase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model, ) = test_utils.sequence_generator_setup() self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): # Sentence 1: unchanged from the normalized case # Sentence 2: beams swap order generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, normalize_scores=False ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_encoder_with_different_output_len(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) reshaping_model = test_utils.TestReshapingModel.build_model(args, task) generator = SequenceGenerator( [reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) for sent in [0, 1]: for beam in [0, 1]: assert hypos[sent][beam]["attention"] is not None def test_generation_with_additional_input(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task) generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2) sample = self.sample.copy() sample["net_input"]["fancy_other_input"] = sample["net_input"]["src_tokens"] hypos = generator.forward(self.sample) eos, w1 = self.tgt_dict.eos(), self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) @unittest.skipUnless(torch.cuda.is_available(), "") class TestRepeatNgramBlocking(TestSequenceGeneratorBase): @classmethod def setUpClass(cls): ( cls.tgt_dict, cls.w1, cls.w2, src_tokens, src_lengths, cls.model, ) = test_utils.sequence_generator_setup() return cls def test_finds_repetitive_tokens(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") desired_result = lprobs.new_tensor( [[0.0, 0.0, -math.inf, 0.0], [0.0, 0.0, 0.0, -math.inf]] ) cuda_ext_result, baseline_result = self._compare_cuda_ext_to_default_implem( bsz, beam_size, generated_tok, lprobs, step, 2 ) self.assertTensorEqual(cuda_ext_result, desired_result) self.assertTensorEqual(baseline_result, desired_result) @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) def test_jit_no_extension(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") blocker = NGramRepeatBlock(2, use_extension=False) base_result = blocker(generated_tok, lprobs.clone(), bsz, beam_size, step) scripted_blocker = torch.jit.script(blocker) jit_result = scripted_blocker( generated_tok, lprobs.clone(), bsz, beam_size, step ) self.assertTensorEqual(base_result, jit_result) def test_ngram_blocking_same_as_default_implem(self): """Test that cuda extension returns same things as default impl in many settings.""" vocab_size = 4 step = 6 for _ in range(2): block_param = np.random.choice([1, 2, 3, 4]) batch_size = np.random.randint(1, 8) beam_size = np.random.choice([1, 2, 4, 8]) lprobs = torch.zeros((beam_size * batch_size, vocab_size), device="cuda") generated_tok = torch.tensor( np.random.randint( 0, vocab_size, size=(batch_size * beam_size, step + 1) ), device="cuda", dtype=torch.long, ) self._compare_cuda_ext_to_default_implem( batch_size, beam_size, generated_tok, lprobs, step, block_param, ) def _compare_cuda_ext_to_default_implem( self, bsz, beam_size, generated_tok, lprobs, step, block_param ): """Assert that cuda extension and default implem return the same thing.""" blocker = NGramRepeatBlock(block_param) assert blocker.use_extension, "Extension not compiled" cuda_ext_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) blocker.use_extension = False baseline_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) self.assertTensorEqual(cuda_ext_result, baseline_result) blocker.use_extension = True return cuda_ext_result, baseline_result class TestDiverseBeamSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], # sentence 2: [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], # sentence 2: [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_diverse_beam_search(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy, ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) class TestDiverseSiblingsSearch(TestDiverseBeamSearch): def assertHypoScore( self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0 ): pos_scores = torch.FloatTensor(pos_probs).log() pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate) self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def test_diverse_beam_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) class TestTopPSamplingSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 # The minimal probability of top 2 tokens. self.min_top2_prob = 0.75 # The minimal probability of the top 1 token. self.min_top1_prob = 0.4 w1_prob = self.min_top1_prob w2_prob = self.min_top2_prob - self.min_top1_prob eos_prob = 1 - self.min_top2_prob args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_topp_sampling_search_low_prob(self): # Given a prob low enough to top-P sampling, we expect only the top # 1 token to be sampled, which always results in the same output. low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1 = self.eos, self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w1, eos]) self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0]) def test_topp_sampling_search_high_prob(self): # Given a prob high enough to top-P sampling, any of the top 2 # tokens could be sampled. This can cause different outputs. high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=high_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertTrue( self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0]) ) # sentence 1, beam 2 self.assertTrue( self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0]) ) # sentence 2, beam 1 self.assertTrue( self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0]) ) # sentence 2, beam 2 self.assertTrue( self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0]) ) def hypoTokens(self, hypo, tokens): return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() if not self.almostEqual(hypo["positional_scores"], pos_scores): return False if pos_scores.numel() != hypo["tokens"].numel(): return False score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen return abs(score - hypo["score"]) < 1e-6 def almostEqual(self, t1, t2): return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4 def tensorEqual(self, t1, t2): return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0 if __name__ == "__main__": unittest.main()
27,810
36.330201
92
py