id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
183,734
import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup version = write_version_py() extensions = [ Extension( "fairseq.libbleu", sources=[ "fairseq/clib/libbleu/libbleu.cpp", "fairseq/clib/libbleu/module.cpp", ], extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.data_utils_fast", sources=["fairseq/data/data_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.token_block_utils_fast", sources=["fairseq/data/token_block_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), ] cmdclass = {} extra_packages = [] def do_setup(package_data): setup( name="fairseq", version=version, description="Facebook AI Research Sequence-to-Sequence Toolkit", url="https://github.com/pytorch/fairseq", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "cython", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "setuptools>=18.0", ], install_requires=[ "cffi", "cython", 'dataclasses; python_version<"3.7"', "hydra-core<1.1", "omegaconf<2.1", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "regex", "sacrebleu>=1.4.12", "torch", "tqdm", ], dependency_links=dependency_links, packages=find_packages( exclude=[ "examples", "examples.*", "scripts", "scripts.*", "tests", "tests.*", ] ) + extra_packages, package_data=package_data, ext_modules=extensions, test_suite="tests", entry_points={ "console_scripts": [ "fairseq-eval-lm = fairseq_cli.eval_lm:cli_main", "fairseq-generate = fairseq_cli.generate:cli_main", "fairseq-hydra-train = fairseq_cli.hydra_train:cli_main", "fairseq-interactive = fairseq_cli.interactive:cli_main", "fairseq-preprocess = fairseq_cli.preprocess:cli_main", "fairseq-score = fairseq_cli.score:cli_main", "fairseq-train = fairseq_cli.train:cli_main", "fairseq-validate = fairseq_cli.validate:cli_main", ], }, cmdclass=cmdclass, zip_safe=False, )
null
183,735
import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup if "READTHEDOCS" in os.environ: # don't build extensions when generating docs extensions = [] if "build_ext" in cmdclass: del cmdclass["build_ext"] # use CPU build of PyTorch dependency_links = [ "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" ] else: dependency_links = [] if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): extra_packages.append("fairseq.model_parallel.megatron.mpu") def get_files(path, relative_to="fairseq"): all_files = [] for root, _dirs, files in os.walk(path, followlinks=True): root = os.path.relpath(root, relative_to) for file in files: if file.endswith(".pyc"): continue all_files.append(os.path.join(root, file)) return all_files
null
183,736
import os import sys import time import logging from tqdm import tqdm import torch from fairseq import utils, tasks, options from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.dataclass.utils import convert_namespace_to_omegaconf from torch import Tensor from typing import Dict, List, Optional def write_result(results, output_file): with open(output_file, 'w') as f: for line in results: f.write(line + '\n')
null
183,737
import os import sys import time import logging from tqdm import tqdm import torch from fairseq import utils, tasks, options from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.dataclass.utils import convert_namespace_to_omegaconf from torch import Tensor from typing import Dict, List, Optional logger = logging.getLogger("inference") def fairseq_generate(data_lines, cfg, models, task, batch_size, device): # fairseq original decoding implementation src_dict = task.source_dictionary tgt_dict = task.target_dictionary generator = task.build_generator(models, cfg.generation) data_size = len(data_lines) all_results = [] logger.info(f'Fairseq generate batch {batch_size}') start = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) translations = generator.generate(models, batch, prefix_tokens=None) results = [] for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos]) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta
null
183,738
import os import sys import time import logging from tqdm import tqdm import torch from fairseq import utils, tasks, options from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.dataclass.utils import convert_namespace_to_omegaconf from torch import Tensor from typing import Dict, List, Optional logger = logging.getLogger("inference") def baseline_forward_decoder(model, input_tokens, encoder_out: Dict[str, List[Tensor]], incremental_state: Dict[str, Dict[str, Optional[Tensor]]], parallel_forward_start_pos=None, temperature: float = 1.0): decoder_out = model.decoder.forward(input_tokens, encoder_out=encoder_out, incremental_state=incremental_state, parallel_forward_start_pos=parallel_forward_start_pos) decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1]) pred_tokens = torch.argmax(decoder_out_tuple[0], dim=-1).squeeze(0) return pred_tokens def baseline_generate(data_lines, model, task, batch_size, device, max_len=200): # simplified AR greedy decoding src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] start = time.perf_counter() logger.info(f'Baseline generate') for start_idx in tqdm(range(0, data_size, batch_size)): batch_size = min(data_size - start_idx, batch_size) batch_lines = [line for line in data_lines[start_idx: start_idx + batch_size]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = False batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) net_input = batch['net_input'] encoder_out = model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) batch_tokens = [[tgt_dict.eos()] for _ in range(batch_size)] finish_list = [] for step in range(0, max_len): cur_input_tokens = torch.tensor(batch_tokens).to(device).long() pred_tokens = baseline_forward_decoder(model, cur_input_tokens, encoder_out, incremental_state=incremental_state) for i, pred_tok in enumerate(pred_tokens): if len(batch_tokens[i]) == 1: batch_tokens[i].append(pred_tok.item()) else: if batch_tokens[i][-1] != tgt_dict.eos(): batch_tokens[i].append(pred_tok.item()) else: if i not in finish_list: finish_list.append(i) batch_tokens[i].append(tgt_dict.eos()) if len(finish_list) == batch_size: break batch_tokens = [y for x, y in sorted(zip(batch['id'].cpu().tolist(), batch_tokens))] for tokens in batch_tokens: all_results.append(tgt_dict.string(tokens[1:])) remove_bpe_results = [line.replace('@@ ', '') for line in all_results] delta = time.perf_counter() - start return remove_bpe_results, delta
null
183,739
import os import sys import time import logging from tqdm import tqdm import torch from fairseq import utils, tasks, options from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.dataclass.utils import convert_namespace_to_omegaconf from torch import Tensor from typing import Dict, List, Optional logger = logging.getLogger("inference") def gad_forward(start_pos_list, block_size, batch_size, tgt_dict, prev_output_tokens, encoder_out, AR_encoder_out, model, AR_model, beta, tau, max_len=200): pad_tokens = [[tgt_dict.pad()] * (max_len + block_size) for _ in range(batch_size)] for i in range(batch_size): pad_tokens[i][:len(prev_output_tokens[i])] = prev_output_tokens[i] output_tokens = torch.tensor(pad_tokens).to(device) output_tokens = output_tokens[:, : output_tokens.ne(tgt_dict.pad()).sum(1).max()] _, tensor_tokens = model.decoder( normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out, ).max(-1) _tokens = tensor_tokens.tolist() for i, start_pos in enumerate(start_pos_list): if start_pos_list[i] != -1: output_tokens[i, start_pos:start_pos + block_size] = tensor_tokens[i, start_pos:start_pos + block_size] prev_output_tokens[i][start_pos:start_pos + block_size] = _tokens[i][start_pos:start_pos + block_size] append_eos = torch.tensor([[tgt_dict.eos()] for _ in range(batch_size)]).to(device) cur_span_input_tokens = torch.cat((append_eos, output_tokens), dim=-1) AR_verify_tokens = forward_decoder(AR_model, cur_span_input_tokens, AR_encoder_out, beta=beta, tau=tau) next_output_tokens = prev_output_tokens.copy() for i in range(batch_size): if start_pos_list[i] != -1: bifurcation = block_size for j, (token, AR_verify_token) in enumerate( zip(prev_output_tokens[i][start_pos_list[i]:], AR_verify_tokens[i][start_pos_list[i]:-1])): if token not in AR_verify_token: bifurcation = j break next_output_tokens[i] = prev_output_tokens[i][:start_pos_list[i] + bifurcation] + \ [AR_verify_tokens[i][start_pos_list[i] + bifurcation][0]] + \ [tgt_dict.unk()] * block_size find_eos = False for j, o in enumerate(next_output_tokens[i][start_pos_list[i]:start_pos_list[i] + bifurcation + 1]): if o == tgt_dict.eos() or start_pos_list[i] + j == max_len: next_output_tokens[i] = next_output_tokens[i][:start_pos_list[i] + j] start_pos_list[i] = -1 find_eos = True break if not find_eos: start_pos_list[i] = start_pos_list[i] + bifurcation + 1 return next_output_tokens, start_pos_list def gad_generate(data_lines, model, AR_model, task, block_size, batch_size, device, beta=1, tau=0, max_len=200): # Generalized Aggressive Decoding src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] logger.info(f'GAD generate') start = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_size = min(data_size - start_idx, batch_size) batch_lines = [line for line in data_lines[start_idx: start_idx + batch_size]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = False batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) net_input = batch['net_input'] AR_encoder_out = AR_model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) encoder_out = model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) sentences = [[tgt_dict.eos()] for _ in range(batch_size)] prev_output_tokens = [[tgt_dict.unk()] * block_size for _ in range(batch_size)] start_pos_list = [0] * batch_size finish_list = [] for step in range(0, max_len): prev_output_tokens, start_pos_list = gad_forward(start_pos_list, block_size, batch_size, tgt_dict, prev_output_tokens, encoder_out, AR_encoder_out, model, AR_model, beta, tau) for i, start_pos in enumerate(start_pos_list): if i not in finish_list: if start_pos == -1: finish_list.append(i) sentences[i] = prev_output_tokens[i] if len(finish_list) == batch_size: break batch_sents = [y for x, y in sorted(zip(batch['id'].cpu().tolist(), sentences))] for s in batch_sents: all_results.append(tgt_dict.string(s)) remove_bpe_results = [line.replace('@@ ', '') for line in all_results] delta = time.perf_counter() - start return remove_bpe_results, delta
null
183,740
import math from math import log import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from torch import Tensor import numpy as np def log_metric(key, logging_outputs): if len(logging_outputs) > 0 and key in logging_outputs[0]: metrics.log_scalar( key, utils.item(np.mean([log.get(key, 0) for log in logging_outputs])), priority=10, round=3 )
null
183,741
import torch from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATModel from fairseq.modules.transformer_sentence_encoder import init_bert_params import torch from fairseq.models.nat.nonautoregressive_transformer import NATransformerEncoder, NATransformerDecoder, NATransformerModel import logging import random from contextlib import contextmanager def torch_seed(seed): state = torch.random.get_rng_state() state_cuda = torch.cuda.random.get_rng_state() torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) try: yield finally: torch.random.set_rng_state(state) torch.cuda.random.set_rng_state(state_cuda)
null
183,742
import torch from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATModel from fairseq.modules.transformer_sentence_encoder import init_bert_params import torch from fairseq.models.nat.nonautoregressive_transformer import NATransformerEncoder, NATransformerDecoder, NATransformerModel import logging import random from contextlib import contextmanager def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.src_embedding_copy = getattr(args, "src_embedding_copy", False) "block", "block" def block_architecture(args): args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", args.encoder_embed_dim*4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", args.encoder_embed_dim//64) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim*4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", args.decoder_embed_dim//64) base_architecture(args)
null
183,743
import torch from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATModel from fairseq.modules.transformer_sentence_encoder import init_bert_params import torch from fairseq.models.nat.nonautoregressive_transformer import NATransformerEncoder, NATransformerDecoder, NATransformerModel import logging import random from contextlib import contextmanager def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.src_embedding_copy = getattr(args, "src_embedding_copy", False) "block", "block" def base_architecture2(args): base_architecture(args)
null
183,754
import functools from typing import Any, Dict, List, Tuple, Union import torch import torch.utils.checkpoint as checkpoint from fairseq import utils def _checkpointed_forward(original_forward, offload_to_cpu, *args, **kwargs): # Autograd Functions in PyTorch work best with positional args, since # the backward must return gradients (or None) for every input argument. # We can flatten keyword arguments to make this easier. kwarg_keys, flat_args = pack_kwargs(*args, **kwargs) parent_ctx_dict = {"offload": offload_to_cpu} output = CheckpointFunction.apply( original_forward, parent_ctx_dict, kwarg_keys, *flat_args ) if isinstance(output, torch.Tensor): return output else: packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"] if packed_non_tensor_outputs: output = unpack_non_tensors(output, packed_non_tensor_outputs) return output The provided code snippet includes necessary dependencies for implementing the `checkpoint_wrapper` function. Write a Python function `def checkpoint_wrapper(m, offload_to_cpu=False)` to solve the following problem: A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward Usage:: checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True) a, b = checkpointed_module(x, y=3, z=torch.Tensor([1])) Here is the function: def checkpoint_wrapper(m, offload_to_cpu=False): """ A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward Usage:: checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True) a, b = checkpointed_module(x, y=3, z=torch.Tensor([1])) """ m.forward = functools.partial( _checkpointed_forward, m.forward, # original_forward offload_to_cpu, ) return m
A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward Usage:: checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True) a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
183,760
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from .unfold import unfold1d class DynamicConv1dTBC(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False, ): def in_proj(self): def reset_parameters(self): def forward(self, x, incremental_state=None, query=None, unfold=None): def _forward_unfolded(self, x, incremental_state, query): def _forward_expanded(self, x, incremental_stat, query): def reorder_incremental_state(self, incremental_state, new_order): def _get_input_buffer(self, incremental_state): def _set_input_buffer(self, incremental_state, new_buffer): def extra_repr(self): def DynamicConv( input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False, ): if torch.cuda.is_available(): try: from fairseq.modules.dynamicconv_layer import DynamicconvLayer return DynamicconvLayer( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, renorm_padding=renorm_padding, bias=bias, conv_bias=conv_bias, query_size=query_size, ) except ImportError as e: print(e) return DynamicConv1dTBC( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, renorm_padding=renorm_padding, bias=bias, conv_bias=conv_bias, query_size=query_size, )
null
183,762
from typing import Optional, Tuple import torch import torch.nn as nn from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ The provided code snippet includes necessary dependencies for implementing the `init_bert_params` function. Write a Python function `def init_bert_params(module)` to solve the following problem: Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). Here is the function: def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=0.02) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): module.q_proj.weight.data.normal_(mean=0.0, std=0.02) module.k_proj.weight.data.normal_(mean=0.0, std=0.02) module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated).
183,767
import logging import re from operator import attrgetter, itemgetter import numpy as np import torch.distributed as dist import torch.nn as nn from .modules import PQConv2d, PQEmbedding, PQLinear from .pq import PQ def get_layers(model, filter_regexp): """ Filters out the layers according to a regexp. Note that we omit biases. Args: - model: a nn.Module - filter_regexp: a regexp to filter the layers to keep according to their name in model.named_parameters(). For instance, the regexp: down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) is keeping blocks down_layers from 1 to 6, and inside each block is keeping conv1, conv2 and identity.conv. Remarks: - We add (module\\.)? at the beginning of the regexp to account for the possible use of nn.parallel.DataParallel """ # get all parameter names all_layers = map(itemgetter(0), model.named_parameters()) # remove biases all_layers = filter(lambda x: "bias" not in x, all_layers) # remove .weight in all other names (or .weight_orig is spectral norm) all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) all_layers = map(lambda x: x.replace(".weight", ""), all_layers) # return filtered layers filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" r = re.compile(filter_regexp) return list(filter(r.match, all_layers)) def get_param(module, layer_name, param_config): """ Given a quantization configuration, get the right parameter for the module to be quantized. Args: - module: a nn.Module - layer_name: the name of the layer - param_config: a dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. Remarks: - if 'fuzzy_name' is passed as a parameter, layers whose layer_name include 'fuzzy_name' will be assigned the given parameter. In the following example, conv.expand layers will have a block size of 9 while conv.reduce will have a block size of 4 and all other layers will have a block size of 2. { 'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}), 'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4}) } """ layer_type = module.__class__.__name__ if layer_type not in param_config: raise KeyError(f"Layer type {layer_type} not in config for layer {module}") feature, params = param_config[module.__class__.__name__] if feature != "fuzzy_name": feature_value = str(getattr(module, feature)) if feature_value not in params: if "*" in params: feature_value = "*" else: raise KeyError( f"{feature}={feature_value} not in config for layer {module}" ) else: feature_values = [name for name in params if name in layer_name] if len(feature_values) == 0: if "*" in params: feature_value = "*" else: raise KeyError(f"name={layer_name} not in config for {module}") else: feature_value = feature_values[0] return params[feature_value] def attrsetter(*items): def resolve_attr(obj, attr): attrs = attr.split(".") head = attrs[:-1] tail = attrs[-1] for name in head: obj = getattr(obj, name) return obj, tail def g(obj, val): for attr in items: resolved_obj, resolved_attr = resolve_attr(obj, attr) setattr(resolved_obj, resolved_attr, val) return g class PQ(EM): """ Quantizes the layer weights W with the standard Product Quantization technique. This learns a codebook of codewords or centroids of size block_size from W. For further reference on using PQ to quantize neural networks, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks", Stock et al., ICLR 2020. PQ is performed in two steps: (1) The matrix W (weights or fully-connected or convolutional layer) is reshaped to (block_size, -1). - If W is fully-connected (2D), its columns are split into blocks of size block_size. - If W is convolutional (4D), its filters are split along the spatial dimension. (2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix. Args: - W: weight matrix to quantize of size (in_features x out_features) - block_size: size of the blocks (subvectors) - n_centroids: number of centroids - n_iter: number of k-means iterations - eps: for cluster reassignment when an empty cluster is found - max_tentatives for cluster reassignment when an empty cluster is found - verbose: print information after each iteration Remarks: - block_size be compatible with the shape of W """ def __init__( self, W, block_size, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True, ): self.block_size = block_size W_reshaped = self._reshape(W) super(PQ, self).__init__( W_reshaped, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) def _reshape(self, W): """ Reshapes the matrix W as expained in step (1). """ # fully connected: by convention the weight has size out_features x in_features if len(W.size()) == 2: self.out_features, self.in_features = W.size() assert ( self.in_features % self.block_size == 0 ), "Linear: n_blocks must be a multiple of in_features" return ( W.reshape(self.out_features, -1, self.block_size) .permute(2, 1, 0) .flatten(1, 2) ) # convolutional: we reshape along the spatial dimension elif len(W.size()) == 4: self.out_channels, self.in_channels, self.k_h, self.k_w = W.size() assert ( self.in_channels * self.k_h * self.k_w ) % self.block_size == 0, ( "Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w" ) return ( W.reshape(self.out_channels, -1, self.block_size) .permute(2, 1, 0) .flatten(1, 2) ) # not implemented else: raise NotImplementedError(W.size()) def encode(self): """ Performs self.n_iter EM steps. """ self.initialize_centroids() for i in range(self.n_iter): try: self.step(i) except EmptyClusterResolveError: break def decode(self): """ Returns the encoded full weight matrix. Must be called after the encode function. """ # fully connected case if "k_h" not in self.__dict__: return ( self.centroids[self.assignments] .reshape(-1, self.out_features, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) # convolutional case else: return ( self.centroids[self.assignments] .reshape(-1, self.out_channels, self.block_size) .permute(1, 0, 2) .reshape(self.out_channels, self.in_channels, self.k_h, self.k_w) ) The provided code snippet includes necessary dependencies for implementing the `quantize_model_` function. Write a Python function `def quantize_model_( model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-6, max_tentatives=100, verbose=True, )` to solve the following problem: Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step] Here is the function: def quantize_model_( model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-6, max_tentatives=100, verbose=True, ): """ Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step] """ quantized_layers = get_layers(model, layers_to_quantize[step]) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) verbose = verbose and is_master_process # get block size and centroids module = attrgetter(layer)(model) block_size = get_param(module, layer, block_sizes_config) n_centroids = get_param(module, layer, n_centroids_config) if verbose: logging.info( f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids" ) # quantize layer weight = module.weight.data.clone() is_bias = "bias" in [x[0] for x in module.named_parameters()] bias = module.bias.data.clone() if is_bias else None quantizer = PQ( weight, block_size, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) # quantization performed on all GPUs with same seed quantizer.encode() centroids = quantizer.centroids.contiguous() assignments = quantizer.assignments.contiguous() # broadcast results to make sure weights are up-to-date if dist.is_initialized(): dist.broadcast(centroids, 0) dist.broadcast(assignments, 0) # instantiate the quantized counterpart if isinstance(module, nn.Linear): out_features, in_features = map( lambda k: module.__dict__[k], ["out_features", "in_features"] ) quantized_module = PQLinear( centroids, assignments, bias, in_features, out_features ) elif isinstance(module, nn.Embedding): num_embeddings, embedding_dim = map( lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"] ) quantized_module = PQEmbedding( centroids, assignments, num_embeddings, embedding_dim ) elif isinstance(module, nn.Conv2d): out_channels, in_channels, kernel_size = map( lambda k: module.__dict__[k], ["out_channels", "in_channels", "kernel_size"], ) stride, padding, dilation, groups, padding_mode = map( lambda k: module.__dict__[k], ["stride", "padding", "dilation", "groups", "padding_mode"], ) quantized_module = PQConv2d( centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode, ) else: raise ValueError(f"Module {module} not yet supported for quantization") # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # update statistics size_tracker.update(weight, block_size, n_centroids) # return name of quantized layers return quantized_layers
Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step]
183,769
import logging from operator import attrgetter import torch.distributed as dist import torch.nn as nn from ..pq.utils import attrsetter, get_layers from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d} def get_layers(model, filter_regexp): """ Filters out the layers according to a regexp. Note that we omit biases. Args: - model: a nn.Module - filter_regexp: a regexp to filter the layers to keep according to their name in model.named_parameters(). For instance, the regexp: down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) is keeping blocks down_layers from 1 to 6, and inside each block is keeping conv1, conv2 and identity.conv. Remarks: - We add (module\\.)? at the beginning of the regexp to account for the possible use of nn.parallel.DataParallel """ # get all parameter names all_layers = map(itemgetter(0), model.named_parameters()) # remove biases all_layers = filter(lambda x: "bias" not in x, all_layers) # remove .weight in all other names (or .weight_orig is spectral norm) all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) all_layers = map(lambda x: x.replace(".weight", ""), all_layers) # return filtered layers filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" r = re.compile(filter_regexp) return list(filter(r.match, all_layers)) def attrsetter(*items): def resolve_attr(obj, attr): attrs = attr.split(".") head = attrs[:-1] tail = attrs[-1] for name in head: obj = getattr(obj, name) return obj, tail def g(obj, val): for attr in items: resolved_obj, resolved_attr = resolve_attr(obj, attr) setattr(resolved_obj, resolved_attr, val) return g The provided code snippet includes necessary dependencies for implementing the `quantize_model_` function. Write a Python function `def quantize_model_(model, p=0.2, bits=8, update_step=3000)` to solve the following problem: Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps Here is the function: def quantize_model_(model, p=0.2, bits=8, update_step=3000): """ Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps """ # quantize all layers quantized_layers = get_layers(model, "(.*?)") for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) # recover module module = attrgetter(layer)(model) if is_master_process: logging.info( f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}" ) # quantization params q_params = { "p": p, "update_step": update_step, "bits": bits, "method": "histogram", "counter": 0, } # instantiate the quantized counterpart if isinstance(module, tuple(MAPPING.keys())): QuantizedModule = MAPPING[module.__class__] quantized_module = QuantizedModule.__new__(QuantizedModule) params = module.__dict__ params.update(q_params) quantized_module.__dict__.update(params) else: if is_master_process: logging.info(f"Module {module} not yet supported for quantization") continue # activation quantization a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method="histogram") # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # return name of quantized layers return quantized_layers
Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps
183,771
import torch def quantize(w, scale, zero_point): def emulate_int8_histogram(w, scale=None, zero_point=None): if scale is None: obs = torch.quantization.observer.HistogramObserver() _ = obs(w.float()) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point), scale, zero_point
null
183,772
import torch def quantize(w, scale, zero_point): def emulate_int8_channel(w, scale=None, zero_point=None): if scale is None: obs = torch.quantization.observer.PerChannelMinMaxObserver( ch_axis=-1, qscheme=torch.per_channel_symmetric ) _ = obs(w) scale, zero_point, ch_axis = obs.get_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point), scale, zero_point
null
183,773
import torch def quantize(w, scale, zero_point): return ( torch.clamp(torch.round(w / scale + zero_point), 0, 255) - zero_point ) * scale def emulate_int8_tensor(w, scale=None, zero_point=None): if scale is None: obs = torch.quantization.observer.MinMaxObserver() _ = obs(w) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point), scale, zero_point
null
183,775
import ast import collections import contextlib import logging import os import re import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict logger = logging.getLogger(__name__) def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): from fairseq import meters # only one worker should attempt to create the required dir if cfg.distributed_rank == 0: os.makedirs(cfg.save_dir, exist_ok=True) prev_best = getattr(save_checkpoint, "best", val_loss) if val_loss is not None: best_function = max if cfg.maximize_best_checkpoint_metric else min save_checkpoint.best = best_function(val_loss, prev_best) if cfg.no_save: return trainer.consolidate_optimizer() if not trainer.is_data_parallel_master: return write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") def is_better(a, b): return a >= b if cfg.maximize_best_checkpoint_metric else a <= b suffix = cfg.checkpoint_suffix or "" checkpoint_conds = collections.OrderedDict() checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 ) checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not end_of_epoch and cfg.save_interval_updates > 0 and updates % cfg.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) if val_loss is not None and cfg.keep_best_checkpoints > 0: checkpoint_conds[ "checkpoint.best_{}_{:.2f}.pt".format(cfg.best_checkpoint_metric, val_loss) ] = not hasattr(save_checkpoint, "best") or is_better( val_loss, save_checkpoint.best ) checkpoint_conds[ "checkpoint_last{}.pt".format(suffix) ] = not cfg.no_last_checkpoints extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if hasattr(save_checkpoint, "best"): extra_state.update({"best": save_checkpoint.best}) checkpoints = [ os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond ] if len(checkpoints) > 0: trainer.save_checkpoint(checkpoints[0], extra_state) for cp in checkpoints[1:]: assert PathManager.copy( checkpoints[0], cp, overwrite=True ), f"Failed to copy {checkpoints[0]} to {cp}" write_timer.stop() logger.info( "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if not end_of_epoch and cfg.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt" ) for old_chk in checkpoints[cfg.keep_interval_updates :]: if os.path.lexists(old_chk): os.remove(old_chk) if cfg.keep_last_epochs > 0: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths(cfg.save_dir, pattern=r"checkpoint(\d+)\.pt") for old_chk in checkpoints[cfg.keep_last_epochs :]: if os.path.lexists(old_chk): os.remove(old_chk) if cfg.keep_best_checkpoints > 0: # only keep the best N checkpoints according to validation metric checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format( cfg.best_checkpoint_metric ), ) if not cfg.maximize_best_checkpoint_metric: checkpoints = checkpoints[::-1] for old_chk in checkpoints[cfg.keep_best_checkpoints :]: if os.path.lexists(old_chk): os.remove(old_chk) class CheckpointConfig(FairseqDataclass): save_dir: str = field( default="checkpoints", metadata={"help": "path to save checkpoints"} ) restore_file: str = field( default="checkpoint_last.pt", metadata={ "help": "filename from which to load checkpoint " "(default: <save-dir>/checkpoint_last.pt" }, ) finetune_from_model: Optional[str] = field( default=None, metadata={ "help": "finetune from a pretrained model; note that meters and lr scheduler will be reset" }, ) reset_dataloader: bool = field( default=False, metadata={ "help": "if set, does not reload dataloader state from the checkpoint" }, ) reset_lr_scheduler: bool = field( default=False, metadata={ "help": "if set, does not load lr scheduler state from the checkpoint" }, ) reset_meters: bool = field( default=False, metadata={"help": "if set, does not load meters from the checkpoint"}, ) reset_optimizer: bool = field( default=False, metadata={"help": "if set, does not load optimizer state from the checkpoint"}, ) optimizer_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override optimizer args when loading a checkpoint" }, ) save_interval: int = field( default=1, metadata={"help": "save a checkpoint every N epochs"} ) save_interval_updates: int = field( default=0, metadata={"help": "save a checkpoint (and validate) every N updates"} ) keep_interval_updates: int = field( default=-1, metadata={ "help": "keep the last N checkpoints saved with --save-interval-updates" }, ) keep_interval_updates_pattern: int = field( default=-1, metadata={ "help": "when used with --keep-interval-updates, skips deleting " "any checkpoints with update X where " "X %% keep_interval_updates_pattern == 0" }, ) keep_last_epochs: int = field( default=-1, metadata={"help": "keep last N epoch checkpoints"} ) keep_best_checkpoints: int = field( default=-1, metadata={"help": "keep best N checkpoints based on scores"} ) no_save: bool = field( default=False, metadata={"help": "don't save models or checkpoints"} ) no_epoch_checkpoints: bool = field( default=False, metadata={"help": "only store last and best checkpoints"} ) no_last_checkpoints: bool = field( default=False, metadata={"help": "don't store last checkpoints"} ) no_save_optimizer_state: bool = field( default=False, metadata={"help": "don't save optimizer-state as part of checkpoint"}, ) best_checkpoint_metric: str = field( default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'} ) maximize_best_checkpoint_metric: bool = field( default=False, metadata={ "help": 'select the largest metric value for saving "best" checkpoints' }, ) patience: int = field( default=-1, metadata={ "help": ( "early stop training if valid performance doesn't " "improve for N consecutive validation runs; note " "that this is influenced by --validate-interval" ) }, ) checkpoint_suffix: str = field( default="", metadata={"help": "suffix to add to the checkpoint file name"} ) checkpoint_shard_count: int = field( default=1, metadata={ "help": "Number of shards containing the checkpoint - " "if the checkpoint is over 300GB, it is preferable " "to split it into shards to prevent OOM on CPU while loading " "the checkpoint" }, ) load_checkpoint_on_all_dp_ranks: bool = field( default=False, metadata={ "help": "load checkpoints on all data parallel devices " "(default: only load on rank 0 and broadcast to other devices)" }, ) write_checkpoints_asynchronously: bool = field( default=False, metadata={ "help": ( "Write checkpoints asynchronously in a separate " "thread. NOTE: This feature is currently being tested." ), "argparse_alias": "--save-async", }, ) model_parallel_size: int = II("common.model_parallel_size") class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False The provided code snippet includes necessary dependencies for implementing the `load_checkpoint` function. Write a Python function `def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args)` to solve the following problem: Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. Here is the function: def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): """ Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. """ reset_optimizer = cfg.reset_optimizer reset_lr_scheduler = cfg.reset_lr_scheduler optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) reset_meters = cfg.reset_meters reset_dataloader = cfg.reset_dataloader if cfg.finetune_from_model is not None and ( reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader ): raise ValueError( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" ) suffix = cfg.checkpoint_suffix if ( cfg.restore_file == "checkpoint_last.pt" ): # default value of restore_file is 'checkpoint_last.pt' checkpoint_path = os.path.join( cfg.save_dir, "checkpoint_last{}.pt".format(suffix) ) first_launch = not PathManager.exists(checkpoint_path) if cfg.finetune_from_model is not None and first_launch: # if there is no last checkpoint to restore, start the finetune from pretrained model # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. if PathManager.exists(cfg.finetune_from_model): checkpoint_path = cfg.finetune_from_model reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info( f"loading pretrained model from {checkpoint_path}: " "optimizer, lr scheduler, meters, dataloader will be reset" ) else: raise ValueError( f"--funetune-from-model {cfg.finetune_from_model} does not exist" ) elif cfg.model_parallel_size > 1: checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") else: checkpoint_path = cfg.restore_file if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: raise ValueError( "--finetune-from-model and --restore-file (non-default value) " "can not be specified together: " + str(cfg) ) extra_state = trainer.load_checkpoint( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters, ) if ( extra_state is not None and "best" in extra_state and not reset_optimizer and not reset_meters ): save_checkpoint.best = extra_state["best"] if extra_state is not None and not reset_dataloader: # restore iterator from checkpoint itr_state = extra_state["train_iterator"] epoch_itr = trainer.get_train_iterator( epoch=itr_state["epoch"], load_dataset=True, **passthrough_args ) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator( epoch=1, load_dataset=True, **passthrough_args ) trainer.lr_step(epoch_itr.epoch) return extra_state, epoch_itr
Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``.
183,776
import ast import collections import contextlib import logging import os import re import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict def load_model_ensemble_and_task( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): assert state is None or len(filenames) == 1 from fairseq import tasks assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble = [] cfg = None for filename in filenames: orig_filename = filename assert num_shards > 0 for shard_idx in range(num_shards): if num_shards == 1: filename = filename.replace(".pt", suffix + ".pt") else: filename = orig_filename[:-3] + f"_part{shard_idx}.pt" if not PathManager.exists(filename): raise IOError("Model file not found: {}".format(filename)) if state is None: state = load_checkpoint_to_cpu(filename, arg_overrides) if "args" in state and state["args"] is not None: cfg = convert_namespace_to_omegaconf(state["args"]) elif "cfg" in state and state["cfg"] is not None: cfg = state["cfg"] else: raise RuntimeError( f"Neither args nor cfg exist in state keys = {state.keys()}" ) if task is None: task = tasks.setup_task(cfg.task) if "task_state" in state: task.load_state_dict(state["task_state"]) # build model for ensemble model = task.build_model(cfg.model) model.load_state_dict(state["model"], strict=strict, model_cfg=cfg.model) # reset state so it gets loaded for the next model in ensemble state = None ensemble.append(model) return ensemble, cfg, task The provided code snippet includes necessary dependencies for implementing the `load_model_ensemble` function. Write a Python function `def load_model_ensemble( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, )` to solve the following problem: Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading Here is the function: def load_model_ensemble( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): """Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading """ assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble, args, _task = load_model_ensemble_and_task( filenames, arg_overrides, task, strict, suffix, num_shards, state, ) return ensemble, args
Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading
183,777
import ast import collections import contextlib import logging import os import re import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict def torch_persistent_save(obj, f): class FairseqConfig(FairseqDataclass): class PathManager: def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: def get_local_path(path: str, **kwargs) -> str: def exists(path: str) -> bool: def isfile(path: str) -> bool: def ls(path: str) -> List[str]: def mkdirs(path: str) -> None: def rm(path: str) -> None: def chmod(path: str, mode: int) -> None: def register_handler(handler) -> None: def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: def path_requires_pathmanager(path: str) -> bool: def supports_rename(path: str) -> bool: def rename(src: str, dst: str): def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): def async_close() -> bool: def save_state( filename, cfg: FairseqConfig, model_state_dict, criterion, optimizer, lr_scheduler, num_updates, optim_history=None, extra_state=None, task=None, **kwargs, ): from fairseq import utils if optim_history is None: optim_history = [] if extra_state is None: extra_state = {} state_dict = { "cfg": cfg, "args": kwargs.get("args", None), "model": model_state_dict or {}, "optimizer_history": optim_history + [ { "criterion_name": criterion.__class__.__name__, "optimizer_name": optimizer.__class__.__name__, "lr_scheduler_state": lr_scheduler.state_dict(), "num_updates": num_updates, } ], "extra_state": extra_state, "task_state": task.state_dict() if task is not None else {} } if utils.has_parameters(criterion): state_dict["criterion"] = criterion.state_dict() if cfg is None: cfg = state_dict["args"] assert cfg is not None, "must provide cfg or args" if isinstance(cfg, DictConfig): no_save_optimizer_state = cfg.checkpoint.no_save_optimizer_state else: no_save_optimizer_state = cfg.no_save_optimizer_state if not no_save_optimizer_state: state_dict["last_optimizer_state"] = optimizer.state_dict() # keep everything on CPU state_dict = utils.move_to_cpu(state_dict) if PathManager.supports_rename(filename): # do atomic save with PathManager.open(filename + ".tmp", "wb") as f: torch_persistent_save(state_dict, f) PathManager.rename(filename + ".tmp", filename) else: # fallback to non-atomic save with PathManager.open(filename, "wb") as f: torch_persistent_save(state_dict, f)
null
183,778
import ast import collections import contextlib import logging import os import re import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict logger = logging.getLogger(__name__) The provided code snippet includes necessary dependencies for implementing the `prune_state_dict` function. Write a Python function `def prune_state_dict(state_dict, model_cfg: Optional[DictConfig])` to solve the following problem: Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. Here is the function: def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]): """Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. """ arch = None if model_cfg is not None: arch = ( model_cfg._name if isinstance(model_cfg, DictConfig) else getattr(model_cfg, "arch", None) ) if not model_cfg or arch is None or arch == "ptt_transformer": # args should not be none, but don't crash if it is. return state_dict encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None) decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None) if not encoder_layers_to_keep and not decoder_layers_to_keep: return state_dict # apply pruning logger.info( "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" ) def create_pruning_pass(layers_to_keep, layer_name): keep_layers = sorted( int(layer_string) for layer_string in layers_to_keep.split(",") ) mapping_dict = {} for i in range(len(keep_layers)): mapping_dict[str(keep_layers[i])] = str(i) regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) return {"substitution_regex": regex, "mapping_dict": mapping_dict} pruning_passes = [] if encoder_layers_to_keep: pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) if decoder_layers_to_keep: pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) new_state_dict = {} for layer_name in state_dict.keys(): match = re.search(r"\.layers\.(\d+)\.", layer_name) # if layer has no number in it, it is a supporting layer, such as an # embedding if not match: new_state_dict[layer_name] = state_dict[layer_name] continue # otherwise, layer should be pruned. original_layer_number = match.group(1) # figure out which mapping dict to replace from for pruning_pass in pruning_passes: if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ "substitution_regex" ].search(layer_name): new_layer_number = pruning_pass["mapping_dict"][original_layer_number] substitution_match = pruning_pass["substitution_regex"].search( layer_name ) new_state_key = ( layer_name[: substitution_match.start(1)] + new_layer_number + layer_name[substitution_match.end(1) :] ) new_state_dict[new_state_key] = state_dict[layer_name] # Since layers are now pruned, *_layers_to_keep are no longer needed. # This is more of "It would make it work fix" rather than a proper fix. if isinstance(model_cfg, DictConfig): context = open_dict(model_cfg) else: context = contextlib.ExitStack() with context: if hasattr(model_cfg, "encoder_layers_to_keep"): model_cfg.encoder_layers_to_keep = None if hasattr(model_cfg, "decoder_layers_to_keep"): model_cfg.decoder_layers_to_keep = None return new_state_dict
Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly.
183,779
import ast import collections import contextlib import logging import os import re import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): """Loads a checkpoint to CPU (with upgrading for backward compatibility). If doing single-GPU training or if the checkpoint is only being loaded by at most one process on each node (current default behavior is for only rank 0 to read the checkpoint from disk), load_on_all_ranks should be False to avoid errors from torch.distributed not having been initialized or torch.distributed.barrier() hanging. If all processes on each node may be loading the checkpoint simultaneously, load_on_all_ranks should be set to True to avoid I/O conflicts. There's currently no support for > 1 but < all processes loading the checkpoint on each node. """ local_path = PathManager.get_local_path(path) # The locally cached file returned by get_local_path() may be stale for # remote files that are periodically updated/overwritten (ex: # checkpoint_last.pt) - so we remove the local copy, sync across processes # (if needed), and then download a fresh copy. if local_path != path and PathManager.path_requires_pathmanager(path): try: os.remove(local_path) except FileNotFoundError: # With potentially multiple processes removing the same file, the # file being missing is benign (missing_ok isn't available until # Python 3.8). pass if load_on_all_ranks: torch.distributed.barrier() local_path = PathManager.get_local_path(path) with open(local_path, "rb") as f: state = torch.load(f, map_location=torch.device("cpu")) if "args" in state and state["args"] is not None and arg_overrides is not None: args = state["args"] for arg_name, arg_val in arg_overrides.items(): setattr(args, arg_name, arg_val) if "cfg" in state and state["cfg"] is not None and arg_overrides is not None: overwrite_args_by_name(state["cfg"], arg_overrides) state = _upgrade_state_dict(state) return state class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False The provided code snippet includes necessary dependencies for implementing the `load_pretrained_component_from_model` function. Write a Python function `def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str )` to solve the following problem: Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. Here is the function: def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str ): """ Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. """ if not PathManager.exists(checkpoint): raise IOError("Model file not found: {}".format(checkpoint)) state = load_checkpoint_to_cpu(checkpoint) if isinstance(component, FairseqEncoder): component_type = "encoder" elif isinstance(component, FairseqDecoder): component_type = "decoder" else: raise ValueError( "component to load must be either a FairseqEncoder or " "FairseqDecoder. Loading other component types are not supported." ) component_state_dict = OrderedDict() for key in state["model"].keys(): if key.startswith(component_type): # encoder.input_layers.0.0.weight --> input_layers.0.0.weight component_subkey = key[len(component_type) + 1 :] component_state_dict[component_subkey] = state["model"][key] component.load_state_dict(component_state_dict, strict=True) return component
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file.
183,780
import ast import collections import contextlib import logging import os import re import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict logger = logging.getLogger(__name__) def verify_checkpoint_directory(save_dir: str) -> None: if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) temp_file_path = os.path.join(save_dir, "dummy") try: with open(temp_file_path, "w"): pass except OSError as e: logger.warning( "Unable to access checkpoint save directory: {}".format(save_dir) ) raise e else: os.remove(temp_file_path)
null
183,782
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor MANIFOLD_PATH_SEP = "|" def split_paths(paths: str) -> List[str]: return ( paths.split(os.pathsep) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP) )
null
183,783
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def deprecation_warning(message, stacklevel=3): def load_ensemble_for_inference(filenames, task, model_arg_overrides=None): from fairseq import checkpoint_utils deprecation_warning( "utils.load_ensemble_for_inference is deprecated. " "Please use checkpoint_utils.load_model_ensemble instead." ) return checkpoint_utils.load_model_ensemble( filenames, arg_overrides=model_arg_overrides, task=task )
null
183,784
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cuda(sample, device=None): device = device or torch.cuda.current_device() def _move_to_cuda(tensor): # non_blocking is ignored if tensor is not pinned, so we can always set # to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620) return tensor.to(device=device, non_blocking=True) return apply_to_sample(_move_to_cuda, sample)
null
183,785
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cpu(sample): def _move_to_cpu(tensor): # PyTorch has poor support for half tensors (float16) on CPU. # Move any such tensors to float32. if tensor.dtype in {torch.bfloat16, torch.float16}: tensor = tensor.to(dtype=torch.float32) return tensor.cpu() return apply_to_sample(_move_to_cpu, sample)
null
183,786
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, shared_attn=None ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.k_proj self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.v_proj self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.q_proj self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.out_proj if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn if shared_attn is None: self.reset_parameters() self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, q_lora=None, v_lora=None ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if ( False and not self.onnx_trace and not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() ): assert key is not None and value is not None return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) if q_lora is not None: q += q_lora(query) if v_lora is not None: v += v_lora(value) if value is not None else v_lora(query) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: if len(attn_mask.size()) < 3: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value The provided code snippet includes necessary dependencies for implementing the `get_incremental_state` function. Write a Python function `def get_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]` to solve the following problem: Helper for getting incremental state for an nn.Module. Here is the function: def get_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" return module.get_incremental_state(incremental_state, key)
Helper for getting incremental state for an nn.Module.
183,787
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, shared_attn=None ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.k_proj self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.v_proj self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.q_proj self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if shared_attn is None else shared_attn.out_proj if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn if shared_attn is None: self.reset_parameters() self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, q_lora=None, v_lora=None ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if ( False and not self.onnx_trace and not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() ): assert key is not None and value is not None return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) if q_lora is not None: q += q_lora(query) if v_lora is not None: v += v_lora(value) if value is not None else v_lora(query) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: if len(attn_mask.size()) < 3: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value The provided code snippet includes necessary dependencies for implementing the `set_incremental_state` function. Write a Python function `def set_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]` to solve the following problem: Helper for setting incremental state for an nn.Module. Here is the function: def set_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: result = module.set_incremental_state(incremental_state, key, value) if result is not None: incremental_state = result return incremental_state
Helper for setting incremental state for an nn.Module.
183,788
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def load_align_dict(replace_unk): if replace_unk is None: align_dict = None elif isinstance(replace_unk, str) and len(replace_unk) > 0: # Load alignment dictionary for unknown word replacement if it was passed as an argument. align_dict = {} with open(replace_unk, "r") as f: for line in f: cols = line.split() align_dict[cols[0]] = cols[1] else: # No alignment dictionary provided but we still want to perform unknown word replacement by copying the # original source word. align_dict = {} return align_dict
null
183,789
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor logger = logging.getLogger(__name__) def print_embed_overlap(embed_dict, vocab_dict): embed_keys = set(embed_dict.keys()) vocab_keys = set(vocab_dict.symbols) overlap = len(embed_keys & vocab_keys) logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
null
183,790
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor The provided code snippet includes necessary dependencies for implementing the `parse_embedding` function. Write a Python function `def parse_embedding(embed_path)` to solve the following problem: Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 Here is the function: def parse_embedding(embed_path): """Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 """ embed_dict = {} with open(embed_path) as f_embed: next(f_embed) # skip header for line in f_embed: pieces = line.rstrip().split(" ") embed_dict[pieces[0]] = torch.Tensor( [float(weight) for weight in pieces[1:]] ) return embed_dict
Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932
183,791
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def load_embedding(embed_dict, vocab, embedding): for idx in range(len(vocab)): token = vocab[idx] if token in embed_dict: embedding.weight.data[idx] = embed_dict[token] return embedding
null
183,792
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def replace_unk(hypo_str, src_str, alignment, align_dict, unk): from fairseq import tokenizer # Tokens are strings here hypo_tokens = tokenizer.tokenize_line(hypo_str) # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"] for i, ht in enumerate(hypo_tokens): if ht == unk: src_token = src_tokens[alignment[i]] # Either take the corresponding value in the aligned dictionary or just copy the original value. hypo_tokens[i] = align_dict.get(src_token, src_token) return " ".join(hypo_tokens) def post_process_prediction( hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None, ): hypo_str = tgt_dict.string( hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore ) if align_dict is not None: hypo_str = replace_unk( hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string() ) if align_dict is not None or remove_bpe is not None: # Convert back to tokens for evaluating with unk replacement or without BPE # Note that the dictionary can be modified inside the method. hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True) return hypo_tokens, hypo_str, alignment
null
183,793
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor The provided code snippet includes necessary dependencies for implementing the `make_positions` function. Write a Python function `def make_positions(tensor, padding_idx: int, onnx_trace: bool = False)` to solve the following problem: Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. Here is the function: def make_positions(tensor, padding_idx: int, onnx_trace: bool = False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored.
183,794
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def strip_pad(tensor, pad): return tensor[tensor.ne(pad)]
null
183,795
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def buffered_arange(max): if not hasattr(buffered_arange, "buf"): buffered_arange.buf = torch.LongTensor() if max > buffered_arange.buf.numel(): buffered_arange.buf.resize_(max) torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max]
null
183,796
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def convert_padding_direction( src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False ): assert right_to_left ^ left_to_right pad_mask = src_tokens.eq(padding_idx) if not pad_mask.any(): # no padding, return early return src_tokens if left_to_right and not pad_mask[:, 0].any(): # already right padded return src_tokens if right_to_left and not pad_mask[:, -1].any(): # already left padded return src_tokens max_len = src_tokens.size(1) buffered = torch.empty(0).long() if max_len > 0: torch.arange(max_len, out=buffered) range = buffered.type_as(src_tokens).expand_as(src_tokens) num_pads = pad_mask.long().sum(dim=1, keepdim=True) if right_to_left: index = torch.remainder(range - num_pads, max_len) else: index = torch.remainder(range + num_pads, max_len) return src_tokens.gather(1, index)
null
183,797
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor: per_device_grads = {} norms = [] for grad in grads: device = grad.device cur_device_grads = per_device_grads.get(device) if cur_device_grads is None: cur_device_grads = [] per_device_grads[device] = cur_device_grads cur_device_grads.append(grad) for device in per_device_grads.keys(): cur_device_grads = per_device_grads[device] if device.type == "cuda": # TODO(msb) return has_inf has_inf = torch.zeros((1, 1), dtype=torch.int, device=device) with torch.cuda.device(device): norm = multi_tensor_l2norm( chunk_size, has_inf, [cur_device_grads], False ) norms.append(norm[0].to(torch.cuda.current_device())) else: norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads] total_norm = torch.norm(torch.stack(norms)) return total_norm def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor: if isinstance(params, torch.Tensor): params = [params] params = list(params) grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)] if len(grads) == 0: if len(params) > 0: return params[0].new_tensor(0.0) else: return torch.tensor(0.0) if len(grads) == 1: total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) else: if multi_tensor_l2norm_available: total_norm = multi_tensor_total_norm(grads) else: if torch.cuda.is_available(): warnings.warn( "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " "you may get better performance by installing NVIDIA's apex library" ) device = torch.cuda.current_device() elif grads[0].device.type == "xla": device = grads[0].device else: device = torch.device("cpu") total_norm = torch.norm( torch.stack( [torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads] ) ) if aggregate_norm_fn is not None: total_norm = aggregate_norm_fn(total_norm) if max_norm > 0: max_norm = float(max_norm) clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) for g in grads: g.mul_(clip_coef) return total_norm
null
183,798
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor The provided code snippet includes necessary dependencies for implementing the `fill_with_neg_inf` function. Write a Python function `def fill_with_neg_inf(t)` to solve the following problem: FP16-compatible function that fills a tensor with -inf. Here is the function: def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float("-inf")).type_as(t)
FP16-compatible function that fills a tensor with -inf.
183,799
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def item(tensor): if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor def _match_types(arg1, arg2): """Convert the numerical argument to the same type as the other argument""" def upgrade(arg_number, arg_structure): if isinstance(arg_structure, tuple): return tuple([arg_number] * len(arg_structure)) elif isinstance(arg_structure, dict): arg = copy.deepcopy(arg_structure) for k in arg: arg[k] = upgrade(arg_number, arg_structure[k]) return arg else: return arg_number if isinstance(arg1, float) or isinstance(arg1, int): return upgrade(arg1, arg2), arg2 elif isinstance(arg2, float) or isinstance(arg2, int): return arg1, upgrade(arg2, arg1) return arg1, arg2 The provided code snippet includes necessary dependencies for implementing the `resolve_max_positions` function. Write a Python function `def resolve_max_positions(*args)` to solve the following problem: Resolve max position constraints from multiple sources. Here is the function: def resolve_max_positions(*args): """Resolve max position constraints from multiple sources.""" def map_value_update(d1, d2): updated_value = copy.deepcopy(d1) for key in d2: if key not in updated_value: updated_value[key] = d2[key] else: updated_value[key] = min(d1[key], d2[key]) return updated_value def nullsafe_min(l): minim = None for item in l: if minim is None: minim = item elif item is not None and item < minim: minim = item return minim max_positions = None for arg in args: if max_positions is None: max_positions = arg elif arg is not None: max_positions, arg = _match_types(max_positions, arg) if isinstance(arg, float) or isinstance(arg, int): max_positions = min(max_positions, arg) elif isinstance(arg, dict): max_positions = map_value_update(max_positions, arg) else: max_positions = tuple(map(nullsafe_min, zip(max_positions, arg))) return max_positions
Resolve max position constraints from multiple sources.
183,800
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor import sys sys.modules["fairseq.distributed_utils"] = distributed_utils sys.modules["fairseq.meters"] = meters sys.modules["fairseq.metrics"] = metrics sys.modules["fairseq.progress_bar"] = progress_bar def import_user_module(args): module_path = getattr(args, "user_dir", None) if module_path is not None: module_path = os.path.abspath(args.user_dir) if not os.path.exists(module_path) and not os.path.isfile(os.path.dirname(module_path)): fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: fairseq_rel_path = os.path.join( os.path.dirname(__file__), "..", args.user_dir ) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: raise FileNotFoundError(module_path) # ensure that user modules are only imported once import_user_module.memo = getattr(import_user_module, "memo", set()) if module_path not in import_user_module.memo: import_user_module.memo.add(module_path) module_parent, module_name = os.path.split(module_path) if module_name not in sys.modules: sys.path.insert(0, module_parent) importlib.import_module(module_name) else: raise ImportError( "Failed to import --user-dir={} because the corresponding module name " "({}) is not globally unique. Please rename the directory to " "something unique and try again.".format(module_path, module_name) )
null
183,801
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32)
null
183,802
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def log_softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.log_softmax(x.float(), dim=dim) else: return F.log_softmax(x, dim=dim, dtype=torch.float32)
null
183,803
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def safe_round(number, ndigits): if hasattr(number, "__round__"): return round(number, ndigits) elif torch is not None and torch.is_tensor(number) and number.numel() == 1: return safe_round(number.item(), ndigits) elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"): return safe_round(number.item(), ndigits) else: return number def get_perplexity(loss, round=2, base=2): from fairseq.logging.meters import safe_round if loss is None: return 0.0 try: return safe_round(base ** loss, round) except OverflowError: return float("inf")
null
183,804
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def deprecation_warning(message, stacklevel=3): # don't use DeprecationWarning, since it's ignored by default warnings.warn(message, stacklevel=stacklevel) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x) The provided code snippet includes necessary dependencies for implementing the `get_activation_fn` function. Write a Python function `def get_activation_fn(activation: str) -> Callable` to solve the following problem: Returns the activation function corresponding to `activation` Here is the function: def get_activation_fn(activation: str) -> Callable: """ Returns the activation function corresponding to `activation` """ from fairseq.modules import gelu, gelu_accurate if activation == "relu": return F.relu elif activation == "gelu": return gelu elif activation == "gelu_fast": deprecation_warning( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x else: raise RuntimeError("--activation-fn {} not supported".format(activation))
Returns the activation function corresponding to `activation`
183,805
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def get_available_activation_fns() -> List: return [ "relu", "gelu", "gelu_fast", # deprecated "gelu_accurate", "tanh", "linear", ]
null
183,806
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def model_eval(model): is_training = model.training model.eval() yield model.train(is_training)
null
183,807
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def has_parameters(module): try: next(module.parameters()) return True except StopIteration: return False
null
183,808
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def get_rng_state(): state = {"torch_rng_state": torch.get_rng_state()} if xm is not None: state["xla_rng_state"] = xm.get_rng_state() if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state
null
183,809
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def set_rng_state(state): torch.set_rng_state(state["torch_rng_state"]) if xm is not None: xm.set_rng_state(state["xla_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"])
null
183,810
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor The provided code snippet includes necessary dependencies for implementing the `parse_alignment` function. Write a Python function `def parse_alignment(line)` to solve the following problem: Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). Here is the function: def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment
Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m).
183,811
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def item(tensor): if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor def get_token_to_word_mapping(tokens, exclude_list): n = len(tokens) word_start = [int(token not in exclude_list) for token in tokens] word_idx = list(accumulate(word_start)) token_to_word = {i: word_idx[i] for i in range(n)} return token_to_word def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_invalid = ( ((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad]) tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad]) alignment = [] if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent): attn_valid = attn[tgt_valid] attn_valid[:, src_invalid] = float("-inf") _, src_indices = attn_valid.max(dim=1) for tgt_idx, src_idx in zip(tgt_valid, src_indices): alignment.append( ( src_token_to_word[src_idx.item()] - 1, tgt_token_to_word[tgt_idx.item()] - 1, ) ) return alignment
null
183,812
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad)).nonzero(as_tuple=False) ) src_valid = ( ((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1) ) alignment = [] if len(tgt_valid) != 0 and len(src_valid) != 0: attn_valid = attn[tgt_valid, src_valid] alignment = [ ["{:.6f}".format(p) for p in src_probs.tolist()] for src_probs in attn_valid ] return alignment
null
183,813
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor The provided code snippet includes necessary dependencies for implementing the `new_arange` function. Write a Python function `def new_arange(x, *size)` to solve the following problem: Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. Here is the function: def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x.
183,814
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def get_tpu_device(): return xm.xla_device()
null
183,815
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor try: import torch_xla.core.xla_model as xm except ImportError: xm = None def tpu_data_loader(itr): import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl from fairseq.data import iterators xm.rendezvous("tpu_data_loader") # wait for all workers xm.mark_step() device = xm.xla_device() return iterators.CountingIterator( pl.ParallelLoader(itr, [device]).per_device_loader(device), start=getattr(itr, "n", 0), total=len(itr), )
null
183,816
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def csv_str_list(x): return x.split(",")
null
183,817
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def eval_str_list(x, type=float): if x is None: return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)]
null
183,818
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def eval_str_dict(x, type=dict): if x is None: return None if isinstance(x, str): x = eval(x) return x
null
183,819
import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor def eval_bool(x, default=False): if x is None: return default try: return bool(eval(x)) except TypeError: return default
null
183,820
import os from collections import Counter import torch from fairseq.file_io import PathManager from fairseq.tokenizer import tokenize_line from typing import List, Dict def safe_readline(f): pos = f.tell() while True: try: return f.readline() except UnicodeDecodeError: pos -= 1 f.seek(pos) # search where this character begins
null
183,824
import atexit import json import logging import os import sys from collections import OrderedDict from contextlib import contextmanager from numbers import Number from typing import Optional import torch from .meters import AverageMeter, StopwatchMeter, TimeMeter def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = "tqdm", wandb_project: Optional[str] = None, wandb_run_name: Optional[str] = None, azureml_logging: Optional[bool] = False, ): if log_format is None: log_format = default_log_format if log_format == "tqdm" and not sys.stderr.isatty(): log_format = "simple" if log_format == "json": bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "none": bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == "simple": bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "tqdm": bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError("Unknown log format: {}".format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) if wandb_project: bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) if azureml_logging: bar = AzureMLProgressBarWrapper(bar) return bar The provided code snippet includes necessary dependencies for implementing the `build_progress_bar` function. Write a Python function `def build_progress_bar( args, iterator, epoch: Optional[int] = None, prefix: Optional[str] = None, default: str = "tqdm", no_progress_bar: str = "none", )` to solve the following problem: Legacy wrapper that takes an argparse.Namespace. Here is the function: def build_progress_bar( args, iterator, epoch: Optional[int] = None, prefix: Optional[str] = None, default: str = "tqdm", no_progress_bar: str = "none", ): """Legacy wrapper that takes an argparse.Namespace.""" if getattr(args, "no_progress_bar", False): default = no_progress_bar if getattr(args, "distributed_rank", 0) == 0: tensorboard_logdir = getattr(args, "tensorboard_logdir", None) else: tensorboard_logdir = None return progress_bar( iterator, log_format=args.log_format, log_interval=args.log_interval, epoch=epoch, prefix=prefix, tensorboard_logdir=tensorboard_logdir, default_log_format=default, )
Legacy wrapper that takes an argparse.Namespace.
183,828
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * _aggregators = OrderedDict() _active_aggregators = OrderedDict() _active_aggregators_cnt = defaultdict(lambda: 0) class MetersDict(OrderedDict): """A sorted dictionary of :class:`Meters`. Meters are sorted according to a priority that is given when the meter is first added to the dictionary. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.priorities = [] def __setitem__(self, key, value): assert key not in self, "MetersDict doesn't support reassignment" priority, value = value bisect.insort(self.priorities, (priority, len(self.priorities), key)) super().__setitem__(key, value) for _, _, key in self.priorities: # reorder dict to match priorities self.move_to_end(key) def add_meter(self, key, meter, priority): self.__setitem__(key, (priority, meter)) def state_dict(self): return [ (pri, key, self[key].__class__.__name__, self[key].state_dict()) for pri, _, key in self.priorities # can't serialize DerivedMeter instances if not isinstance(self[key], MetersDict._DerivedMeter) ] def load_state_dict(self, state_dict): self.clear() self.priorities.clear() for pri, key, meter_cls, meter_state in state_dict: meter = globals()[meter_cls]() meter.load_state_dict(meter_state) self.add_meter(key, meter, pri) def get_smoothed_value(self, key: str) -> float: """Get a single smoothed value.""" meter = self[key] if isinstance(meter, MetersDict._DerivedMeter): return meter.fn(self) else: return meter.smoothed_value def get_smoothed_values(self) -> Dict[str, float]: """Get all smoothed values.""" return OrderedDict( [ (key, self.get_smoothed_value(key)) for key in self.keys() if not key.startswith("_") ] ) def reset(self): """Reset Meter instances.""" for meter in self.values(): if isinstance(meter, MetersDict._DerivedMeter): continue meter.reset() class _DerivedMeter(Meter): """A Meter whose values are derived from other Meters.""" def __init__(self, fn): self.fn = fn def reset(self): pass The provided code snippet includes necessary dependencies for implementing the `aggregate` function. Write a Python function `def aggregate(name: Optional[str] = None, new_root: bool = False)` to solve the following problem: Context manager to aggregate metrics under a given name. Aggregations can be nested. If *new_root* is ``False``, then logged metrics will be recorded along the entire stack of nested aggregators, including a global "default" aggregator. If *new_root* is ``True``, then this aggregator will be the root of a new aggregation stack, thus bypassing any parent aggregators. Note that aggregation contexts are uniquely identified by their *name* (e.g., train, valid). Creating a context with an existing name will reuse the corresponding :class:`MetersDict` instance. If no name is given, then a temporary aggregator will be created. Usage:: with metrics.aggregate("train"): for step, batch in enumerate(epoch): with metrics.aggregate("train_inner") as agg: metrics.log_scalar("loss", get_loss(batch)) if step % log_interval == 0: print(agg.get_smoothed_value("loss")) agg.reset() print(metrics.get_smoothed_values("train")["loss"]) Args: name (str): name of the aggregation. Defaults to a random/temporary name if not given explicitly. new_root (bool): make this aggregation the root of a new aggregation stack. Here is the function: def aggregate(name: Optional[str] = None, new_root: bool = False): """Context manager to aggregate metrics under a given name. Aggregations can be nested. If *new_root* is ``False``, then logged metrics will be recorded along the entire stack of nested aggregators, including a global "default" aggregator. If *new_root* is ``True``, then this aggregator will be the root of a new aggregation stack, thus bypassing any parent aggregators. Note that aggregation contexts are uniquely identified by their *name* (e.g., train, valid). Creating a context with an existing name will reuse the corresponding :class:`MetersDict` instance. If no name is given, then a temporary aggregator will be created. Usage:: with metrics.aggregate("train"): for step, batch in enumerate(epoch): with metrics.aggregate("train_inner") as agg: metrics.log_scalar("loss", get_loss(batch)) if step % log_interval == 0: print(agg.get_smoothed_value("loss")) agg.reset() print(metrics.get_smoothed_values("train")["loss"]) Args: name (str): name of the aggregation. Defaults to a random/temporary name if not given explicitly. new_root (bool): make this aggregation the root of a new aggregation stack. """ if name is None: # generate a temporary name name = str(uuid.uuid4()) assert name not in _aggregators agg = MetersDict() else: assert name != "default" agg = _aggregators.setdefault(name, MetersDict()) if new_root: backup_aggregators = _active_aggregators.copy() _active_aggregators.clear() backup_aggregators_cnt = _active_aggregators_cnt.copy() _active_aggregators_cnt.clear() _active_aggregators[name] = agg _active_aggregators_cnt[name] += 1 yield agg _active_aggregators_cnt[name] -= 1 if _active_aggregators_cnt[name] == 0 and name in _active_aggregators: del _active_aggregators[name] if new_root: _active_aggregators.clear() _active_aggregators.update(backup_aggregators) _active_aggregators_cnt.clear() _active_aggregators_cnt.update(backup_aggregators_cnt)
Context manager to aggregate metrics under a given name. Aggregations can be nested. If *new_root* is ``False``, then logged metrics will be recorded along the entire stack of nested aggregators, including a global "default" aggregator. If *new_root* is ``True``, then this aggregator will be the root of a new aggregation stack, thus bypassing any parent aggregators. Note that aggregation contexts are uniquely identified by their *name* (e.g., train, valid). Creating a context with an existing name will reuse the corresponding :class:`MetersDict` instance. If no name is given, then a temporary aggregator will be created. Usage:: with metrics.aggregate("train"): for step, batch in enumerate(epoch): with metrics.aggregate("train_inner") as agg: metrics.log_scalar("loss", get_loss(batch)) if step % log_interval == 0: print(agg.get_smoothed_value("loss")) agg.reset() print(metrics.get_smoothed_values("train")["loss"]) Args: name (str): name of the aggregation. Defaults to a random/temporary name if not given explicitly. new_root (bool): make this aggregation the root of a new aggregation stack.
183,829
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) class AverageMeter(Meter): """Computes and stores the average and current value""" def __init__(self, round: Optional[int] = None): self.round = round self.reset() def reset(self): self.val = None # most recent update self.sum = 0 # sum from all updates self.count = 0 # total n from all updates def update(self, val, n=1): if val is not None: self.val = val if n > 0: self.sum = type_as(self.sum, val) + (val * n) self.count = type_as(self.count, n) + n def state_dict(self): return { "val": self.val, "sum": self.sum, "count": self.count, "round": self.round, } def load_state_dict(self, state_dict): self.val = state_dict["val"] self.sum = state_dict["sum"] self.count = state_dict["count"] self.round = state_dict.get("round", None) def avg(self): return self.sum / self.count if self.count > 0 else self.val def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val The provided code snippet includes necessary dependencies for implementing the `log_scalar` function. Write a Python function `def log_scalar( key: str, value: float, weight: float = 1, priority: int = 10, round: Optional[int] = None, )` to solve the following problem: Log a scalar value. Args: key (str): name of the field to log value (float): value to log weight (float): weight that this value contributes to the average. A weight of 0 will always log the latest value. priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying Here is the function: def log_scalar( key: str, value: float, weight: float = 1, priority: int = 10, round: Optional[int] = None, ): """Log a scalar value. Args: key (str): name of the field to log value (float): value to log weight (float): weight that this value contributes to the average. A weight of 0 will always log the latest value. priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, AverageMeter(round=round), priority) agg[key].update(value, weight)
Log a scalar value. Args: key (str): name of the field to log value (float): value to log weight (float): weight that this value contributes to the average. A weight of 0 will always log the latest value. priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying
183,830
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) class MetersDict(OrderedDict): """A sorted dictionary of :class:`Meters`. Meters are sorted according to a priority that is given when the meter is first added to the dictionary. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.priorities = [] def __setitem__(self, key, value): assert key not in self, "MetersDict doesn't support reassignment" priority, value = value bisect.insort(self.priorities, (priority, len(self.priorities), key)) super().__setitem__(key, value) for _, _, key in self.priorities: # reorder dict to match priorities self.move_to_end(key) def add_meter(self, key, meter, priority): self.__setitem__(key, (priority, meter)) def state_dict(self): return [ (pri, key, self[key].__class__.__name__, self[key].state_dict()) for pri, _, key in self.priorities # can't serialize DerivedMeter instances if not isinstance(self[key], MetersDict._DerivedMeter) ] def load_state_dict(self, state_dict): self.clear() self.priorities.clear() for pri, key, meter_cls, meter_state in state_dict: meter = globals()[meter_cls]() meter.load_state_dict(meter_state) self.add_meter(key, meter, pri) def get_smoothed_value(self, key: str) -> float: """Get a single smoothed value.""" meter = self[key] if isinstance(meter, MetersDict._DerivedMeter): return meter.fn(self) else: return meter.smoothed_value def get_smoothed_values(self) -> Dict[str, float]: """Get all smoothed values.""" return OrderedDict( [ (key, self.get_smoothed_value(key)) for key in self.keys() if not key.startswith("_") ] ) def reset(self): """Reset Meter instances.""" for meter in self.values(): if isinstance(meter, MetersDict._DerivedMeter): continue meter.reset() class _DerivedMeter(Meter): """A Meter whose values are derived from other Meters.""" def __init__(self, fn): self.fn = fn def reset(self): pass The provided code snippet includes necessary dependencies for implementing the `log_derived` function. Write a Python function `def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20)` to solve the following problem: Log a scalar value derived from other meters. Args: key (str): name of the field to log fn (Callable[[MetersDict], float]): function that takes a single argument *meters* and returns the derived value priority (int): smaller values are logged earlier in the output Here is the function: def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20): """Log a scalar value derived from other meters. Args: key (str): name of the field to log fn (Callable[[MetersDict], float]): function that takes a single argument *meters* and returns the derived value priority (int): smaller values are logged earlier in the output """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
Log a scalar value derived from other meters. Args: key (str): name of the field to log fn (Callable[[MetersDict], float]): function that takes a single argument *meters* and returns the derived value priority (int): smaller values are logged earlier in the output
183,831
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def reset() -> None: """Reset all metrics aggregators.""" _aggregators.clear() _active_aggregators.clear() _active_aggregators_cnt.clear() # The "default" aggregator observes all logged values. _aggregators["default"] = MetersDict() _active_aggregators["default"] = _aggregators["default"] _active_aggregators_cnt["default"] = 1 reset() def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) class TimeMeter(Meter): """Computes the average occurrence of some event per second""" def __init__( self, init: int = 0, n: int = 0, round: Optional[int] = None, ): self.round = round self.reset(init, n) def reset(self, init=0, n=0): self.init = init self.start = time.perf_counter() self.n = n self.i = 0 def update(self, val=1): self.n = type_as(self.n, val) + val self.i += 1 def state_dict(self): return { "init": self.elapsed_time, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): if "start" in state_dict: # backwards compatibility for old state_dicts self.reset(init=state_dict["init"]) else: self.reset(init=state_dict["init"], n=state_dict["n"]) self.round = state_dict.get("round", None) def avg(self): return self.n / self.elapsed_time def elapsed_time(self): return self.init + (time.perf_counter() - self.start) def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val The provided code snippet includes necessary dependencies for implementing the `log_speed` function. Write a Python function `def log_speed( key: str, value: float, priority: int = 30, round: Optional[int] = None, )` to solve the following problem: Log the rate of some quantity per second. Args: key (str): name of the field to log value (float): value to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying Here is the function: def log_speed( key: str, value: float, priority: int = 30, round: Optional[int] = None, ): """Log the rate of some quantity per second. Args: key (str): name of the field to log value (float): value to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, TimeMeter(round=round), priority) agg[key].reset() # reset meter on the first call else: agg[key].update(value)
Log the rate of some quantity per second. Args: key (str): name of the field to log value (float): value to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying
183,832
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { "sum": self.sum, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.n = state_dict["n"] self.start_time = None self.round = state_dict.get("round", None) def avg(self): return self.sum / self.n if self.n > 0 else self.sum def elapsed_time(self): if self.start_time is None: return 0.0 return time.perf_counter() - self.start_time def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val The provided code snippet includes necessary dependencies for implementing the `log_start_time` function. Write a Python function `def log_start_time(key: str, priority: int = 40, round: Optional[int] = None)` to solve the following problem: Log the duration of some event in seconds. The duration will be computed once :func:`log_stop_time` is called. Args: key (str): name of the field to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying Here is the function: def log_start_time(key: str, priority: int = 40, round: Optional[int] = None): """Log the duration of some event in seconds. The duration will be computed once :func:`log_stop_time` is called. Args: key (str): name of the field to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, StopwatchMeter(round=round), priority) agg[key].start()
Log the duration of some event in seconds. The duration will be computed once :func:`log_stop_time` is called. Args: key (str): name of the field to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying
183,833
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) The provided code snippet includes necessary dependencies for implementing the `log_stop_time` function. Write a Python function `def log_stop_time(key: str, weight: float = 0.0, prehook=None)` to solve the following problem: Log the duration of some event in seconds. The duration will be computed since :func:`log_start_time` was called. Set weight > 0 to report the average time instead of the sum. Args: key (str): name of the field to log weight (float): weight that this time contributes to the average prehook (function, no arguments): will be called before the timer is stopped. For example, use prehook=torch.cuda.synchronize to make sure all gpu operations are done before timer is stopped. Here is the function: def log_stop_time(key: str, weight: float = 0.0, prehook=None): """Log the duration of some event in seconds. The duration will be computed since :func:`log_start_time` was called. Set weight > 0 to report the average time instead of the sum. Args: key (str): name of the field to log weight (float): weight that this time contributes to the average prehook (function, no arguments): will be called before the timer is stopped. For example, use prehook=torch.cuda.synchronize to make sure all gpu operations are done before timer is stopped. """ for agg in get_active_aggregators(): if key in agg: agg[key].stop(weight, prehook)
Log the duration of some event in seconds. The duration will be computed since :func:`log_start_time` was called. Set weight > 0 to report the average time instead of the sum. Args: key (str): name of the field to log weight (float): weight that this time contributes to the average prehook (function, no arguments): will be called before the timer is stopped. For example, use prehook=torch.cuda.synchronize to make sure all gpu operations are done before timer is stopped.
183,834
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) class Meter(object): """Base class for Meters.""" def __init__(self): pass def state_dict(self): return {} def load_state_dict(self, state_dict): pass def reset(self): raise NotImplementedError def smoothed_value(self) -> float: """Smoothed value used for logging.""" raise NotImplementedError The provided code snippet includes necessary dependencies for implementing the `log_custom` function. Write a Python function `def log_custom( new_meter_fn: Callable[[], Meter], key: str, *args, priority: int = 50, **kwargs, )` to solve the following problem: Log using a custom Meter. Any extra *args* or *kwargs* will be passed through to the Meter's *update* method. Args: new_meter_fn (Callable[[], Meter]): function that returns a new Meter instance key (str): name of the field to log priority (int): smaller values are logged earlier in the output Here is the function: def log_custom( new_meter_fn: Callable[[], Meter], key: str, *args, priority: int = 50, **kwargs, ): """Log using a custom Meter. Any extra *args* or *kwargs* will be passed through to the Meter's *update* method. Args: new_meter_fn (Callable[[], Meter]): function that returns a new Meter instance key (str): name of the field to log priority (int): smaller values are logged earlier in the output """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, new_meter_fn(), priority) agg[key].update(*args, **kwargs)
Log using a custom Meter. Any extra *args* or *kwargs* will be passed through to the Meter's *update* method. Args: new_meter_fn (Callable[[], Meter]): function that returns a new Meter instance key (str): name of the field to log priority (int): smaller values are logged earlier in the output
183,835
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def reset() -> None: """Reset all metrics aggregators.""" _aggregators.clear() _active_aggregators.clear() _active_aggregators_cnt.clear() # The "default" aggregator observes all logged values. _aggregators["default"] = MetersDict() _active_aggregators["default"] = _aggregators["default"] _active_aggregators_cnt["default"] = 1 reset() def get_meter(name: str, key: str) -> Meter: """Get a single Meter instance aggregated under *name* and *key*. Returns: Meter or None if no metrics have been logged under *name* and *key*. """ if name not in _aggregators: return None return _aggregators[name].get(key, None) The provided code snippet includes necessary dependencies for implementing the `reset_meter` function. Write a Python function `def reset_meter(name: str, key: str) -> None` to solve the following problem: Reset Meter instance aggregated under a given *name* and *key*. Here is the function: def reset_meter(name: str, key: str) -> None: """Reset Meter instance aggregated under a given *name* and *key*.""" meter = get_meter(name, key) if meter is not None: meter.reset()
Reset Meter instance aggregated under a given *name* and *key*.
183,836
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * def reset() -> None: """Reset all metrics aggregators.""" _aggregators.clear() _active_aggregators.clear() _active_aggregators_cnt.clear() # The "default" aggregator observes all logged values. _aggregators["default"] = MetersDict() _active_aggregators["default"] = _aggregators["default"] _active_aggregators_cnt["default"] = 1 reset() def get_meters(name: str) -> MetersDict: """Get Meter instances aggregated under a given *name*. Returns: MetersDict or None if no metrics have been logged under *name*. """ return _aggregators.get(name, None) The provided code snippet includes necessary dependencies for implementing the `reset_meters` function. Write a Python function `def reset_meters(name: str) -> None` to solve the following problem: Reset Meter instances aggregated under a given *name*. Here is the function: def reset_meters(name: str) -> None: """Reset Meter instances aggregated under a given *name*.""" meters = get_meters(name) if meters is not None: meters.reset()
Reset Meter instances aggregated under a given *name*.
183,837
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * _aggregators = OrderedDict() The provided code snippet includes necessary dependencies for implementing the `get_smoothed_value` function. Write a Python function `def get_smoothed_value(name: str, key: str) -> float` to solve the following problem: Get a single smoothed value. Raises: KeyError: if no metrics have been logged under *name* and *key*. Here is the function: def get_smoothed_value(name: str, key: str) -> float: """Get a single smoothed value. Raises: KeyError: if no metrics have been logged under *name* and *key*. """ return _aggregators[name].get_smoothed_value(key)
Get a single smoothed value. Raises: KeyError: if no metrics have been logged under *name* and *key*.
183,838
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * _aggregators = OrderedDict() The provided code snippet includes necessary dependencies for implementing the `get_smoothed_values` function. Write a Python function `def get_smoothed_values(name: str) -> Dict[str, float]` to solve the following problem: Get smoothed values aggregated under a given *name*. Raises: KeyError: if no metrics have been logged under *name*. Here is the function: def get_smoothed_values(name: str) -> Dict[str, float]: """Get smoothed values aggregated under a given *name*. Raises: KeyError: if no metrics have been logged under *name*. """ return _aggregators[name].get_smoothed_values()
Get smoothed values aggregated under a given *name*. Raises: KeyError: if no metrics have been logged under *name*.
183,839
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * _aggregators = OrderedDict() def state_dict(): return OrderedDict([(name, agg.state_dict()) for name, agg in _aggregators.items()])
null
183,840
import contextlib import time import uuid from collections import OrderedDict, defaultdict from typing import Callable, Dict, List, Optional from .meters import * _aggregators = OrderedDict() class MetersDict(OrderedDict): """A sorted dictionary of :class:`Meters`. Meters are sorted according to a priority that is given when the meter is first added to the dictionary. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.priorities = [] def __setitem__(self, key, value): assert key not in self, "MetersDict doesn't support reassignment" priority, value = value bisect.insort(self.priorities, (priority, len(self.priorities), key)) super().__setitem__(key, value) for _, _, key in self.priorities: # reorder dict to match priorities self.move_to_end(key) def add_meter(self, key, meter, priority): self.__setitem__(key, (priority, meter)) def state_dict(self): return [ (pri, key, self[key].__class__.__name__, self[key].state_dict()) for pri, _, key in self.priorities # can't serialize DerivedMeter instances if not isinstance(self[key], MetersDict._DerivedMeter) ] def load_state_dict(self, state_dict): self.clear() self.priorities.clear() for pri, key, meter_cls, meter_state in state_dict: meter = globals()[meter_cls]() meter.load_state_dict(meter_state) self.add_meter(key, meter, pri) def get_smoothed_value(self, key: str) -> float: """Get a single smoothed value.""" meter = self[key] if isinstance(meter, MetersDict._DerivedMeter): return meter.fn(self) else: return meter.smoothed_value def get_smoothed_values(self) -> Dict[str, float]: """Get all smoothed values.""" return OrderedDict( [ (key, self.get_smoothed_value(key)) for key in self.keys() if not key.startswith("_") ] ) def reset(self): """Reset Meter instances.""" for meter in self.values(): if isinstance(meter, MetersDict._DerivedMeter): continue meter.reset() class _DerivedMeter(Meter): """A Meter whose values are derived from other Meters.""" def __init__(self, fn): self.fn = fn def reset(self): pass def load_state_dict(state_dict): for name, agg_state in state_dict.items(): _aggregators[name] = MetersDict() _aggregators[name].load_state_dict(agg_state)
null
183,841
import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler def _catalog_shared_params(module, memo=None, prefix=""): if memo is None: first_call = True memo = {} else: first_call = False for name, param in module._parameters.items(): param_prefix = prefix + ("." if prefix else "") + name if param not in memo: memo[param] = [] memo[param].append(param_prefix) for name, m in module._modules.items(): if m is None: continue submodule_prefix = prefix + ("." if prefix else "") + name _catalog_shared_params(m, memo, submodule_prefix) if first_call: return [x for x in memo.values() if len(x) > 1]
null
183,842
import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler def _get_module_by_path(module, path): path = path.split(".") for name in path: module = getattr(module, name) return module
null
183,843
import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler def _set_module_by_path(module, path, value): path = path.split(".") for name in path[:-1]: module = getattr(module, name) setattr(module, path[-1], value)
null
183,852
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False The provided code snippet includes necessary dependencies for implementing the `infer_language_pair` function. Write a Python function `def infer_language_pair(path)` to solve the following problem: Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx Here is the function: def infer_language_pair(path): """Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx""" src, dst = None, None for filename in PathManager.ls(path): parts = filename.split(".") if len(parts) >= 3 and len(parts[1].split("-")) == 2: return parts[1].split("-") return src, dst
Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx
183,853
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager logger = logging.getLogger(__name__) class ConcatDataset(FairseqDataset): def cumsum(sequence, sample_ratios): r, s = [], 0 for e, ratio in zip(sequence, sample_ratios): curr_len = int(ratio * len(e)) r.append(curr_len + s) s += curr_len return r def __init__(self, datasets, sample_ratios=1): super(ConcatDataset, self).__init__() assert len(datasets) > 0, "datasets should not be an empty iterable" self.datasets = list(datasets) if isinstance(sample_ratios, int): sample_ratios = [sample_ratios] * len(self.datasets) self.sample_ratios = sample_ratios self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios) self.real_sizes = [len(d) for d in self.datasets] def __len__(self): return self.cumulative_sizes[-1] def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx][sample_idx] def _get_dataset_and_sample_index(self, idx: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] sample_idx = sample_idx % self.real_sizes[dataset_idx] return dataset_idx, sample_idx def collater(self, samples, **extra_args): # For now only supports datasets with same underlying collater implementations if hasattr(self.datasets[0], "collater"): return self.datasets[0].collater(samples, **extra_args) else: return default_collate(samples, **extra_args) def size(self, idx: int): """ Return an example's size as a float or tuple. """ dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx].size(sample_idx) def num_tokens(self, index: int): return np.max(self.size(index)) def attr(self, attr: str, index: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) return getattr(self.datasets[dataset_idx], attr, None) def sizes(self): _dataset_sizes = [] for ds, sr in zip(self.datasets, self.sample_ratios): if isinstance(ds.sizes, np.ndarray): _dataset_sizes.append(np.tile(ds.sizes, sr)) else: # Only support underlying dataset with single size array. assert isinstance(ds.sizes, list) _dataset_sizes.append(np.tile(ds.sizes[0], sr)) return np.concatenate(_dataset_sizes) def supports_prefetch(self): return all(d.supports_prefetch for d in self.datasets) def ordered_indices(self): """ Returns indices sorted by length. So less padding is needed. """ if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1: # special handling for concatenating lang_pair_datasets indices = np.arange(len(self)) sizes = self.sizes tgt_sizes = ( sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None ) src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) # sort by target length, then source length if tgt_sizes is not None: indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(src_sizes[indices], kind="mergesort")] else: return np.argsort(self.sizes) def prefetch(self, indices): frm = 0 for to, ds in zip(self.cumulative_sizes, self.datasets): real_size = len(ds) if getattr(ds, "supports_prefetch", False): ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to]) frm = to def can_reuse_epoch_itr_across_epochs(self): return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.datasets: if hasattr(ds, "set_epoch"): ds.set_epoch(epoch) The provided code snippet includes necessary dependencies for implementing the `load_indexed_dataset` function. Write a Python function `def load_indexed_dataset( path, dictionary=None, dataset_impl=None, combine=False, default="cached" )` to solve the following problem: A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use the 'cached' implementation by default. combine (bool, optional): automatically load and combine multiple datasets. For example, if *path* is 'data-bin/train', then we will combine 'data-bin/train', 'data-bin/train1', ... and return a single ConcatDataset instance. Here is the function: def load_indexed_dataset( path, dictionary=None, dataset_impl=None, combine=False, default="cached" ): """A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use the 'cached' implementation by default. combine (bool, optional): automatically load and combine multiple datasets. For example, if *path* is 'data-bin/train', then we will combine 'data-bin/train', 'data-bin/train1', ... and return a single ConcatDataset instance. """ import fairseq.data.indexed_dataset as indexed_dataset from fairseq.data.concat_dataset import ConcatDataset datasets = [] for k in itertools.count(): path_k = path + (str(k) if k > 0 else "") path_k = indexed_dataset.get_indexed_dataset_to_local(path_k) dataset_impl_k = dataset_impl if dataset_impl_k is None: dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k) dataset = indexed_dataset.make_dataset( path_k, impl=dataset_impl_k or default, fix_lua_indexing=True, dictionary=dictionary, ) if dataset is None: break logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k)) datasets.append(dataset) if not combine: break if len(datasets) == 0: return None elif len(datasets) == 1: return datasets[0] else: return ConcatDataset(datasets)
A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use the 'cached' implementation by default. combine (bool, optional): automatically load and combine multiple datasets. For example, if *path* is 'data-bin/train', then we will combine 'data-bin/train', 'data-bin/train1', ... and return a single ConcatDataset instance.
183,854
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager The provided code snippet includes necessary dependencies for implementing the `numpy_seed` function. Write a Python function `def numpy_seed(seed, *addl_seeds)` to solve the following problem: Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward Here is the function: def numpy_seed(seed, *addl_seeds): """Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward""" if seed is None: yield return if len(addl_seeds) > 0: seed = int(hash((seed, *addl_seeds)) % 1e6) state = np.random.get_state() np.random.seed(seed) try: yield finally: np.random.set_state(state)
Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward
183,855
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager logger = logging.getLogger(__name__) def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False): def compare_leq(a, b): return a <= b if not isinstance(a, tuple) else max(a) <= b def check_size(idx): if isinstance(max_positions, float) or isinstance(max_positions, int): return size_fn(idx) <= max_positions elif isinstance(max_positions, dict): idx_size = size_fn(idx) assert isinstance(idx_size, dict) intersect_keys = set(max_positions.keys()) & set(idx_size.keys()) return all( all( a is None or b is None or a <= b for a, b in zip(idx_size[key], max_positions[key]) ) for key in intersect_keys ) else: # For MultiCorpusSampledDataset, will generalize it later if not isinstance(size_fn(idx), Iterable): return all(size_fn(idx) <= b for b in max_positions) return all( a is None or b is None or a <= b for a, b in zip(size_fn(idx), max_positions) ) ignored = [] itr = collect_filtered(check_size, indices, ignored) indices = np.fromiter(itr, dtype=np.int64, count=-1) return indices, ignored The provided code snippet includes necessary dependencies for implementing the `filter_by_size` function. Write a Python function `def filter_by_size(indices, dataset, max_positions, raise_exception=False)` to solve the following problem: [deprecated] Filter indices based on their size. Use `FairseqDataset::filter_indices_by_size` instead. Args: indices (List[int]): ordered list of dataset indices dataset (FairseqDataset): fairseq dataset instance max_positions (tuple): filter elements larger than this size. Comparisons are done component-wise. raise_exception (bool, optional): if ``True``, raise an exception if any elements are filtered (default: False). Here is the function: def filter_by_size(indices, dataset, max_positions, raise_exception=False): """ [deprecated] Filter indices based on their size. Use `FairseqDataset::filter_indices_by_size` instead. Args: indices (List[int]): ordered list of dataset indices dataset (FairseqDataset): fairseq dataset instance max_positions (tuple): filter elements larger than this size. Comparisons are done component-wise. raise_exception (bool, optional): if ``True``, raise an exception if any elements are filtered (default: False). """ warnings.warn( "data_utils.filter_by_size is deprecated. " "Use `FairseqDataset::filter_indices_by_size` instead.", stacklevel=2, ) if isinstance(max_positions, float) or isinstance(max_positions, int): if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray): ignored = indices[dataset.sizes[indices] > max_positions].tolist() indices = indices[dataset.sizes[indices] <= max_positions] elif ( hasattr(dataset, "sizes") and isinstance(dataset.sizes, list) and len(dataset.sizes) == 1 ): ignored = indices[dataset.sizes[0][indices] > max_positions].tolist() indices = indices[dataset.sizes[0][indices] <= max_positions] else: indices, ignored = _filter_by_size_dynamic( indices, dataset.size, max_positions ) else: indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions) if len(ignored) > 0 and raise_exception: raise Exception( ( "Size of sample #{} is invalid (={}) since max_positions={}, " "skip this example with --skip-invalid-size-inputs-valid-test" ).format(ignored[0], dataset.size(ignored[0]), max_positions) ) if len(ignored) > 0: logger.warning( ( "{} samples have invalid sizes and will be skipped, " "max_positions={}, first few sample ids={}" ).format(len(ignored), max_positions, ignored[:10]) ) return indices
[deprecated] Filter indices based on their size. Use `FairseqDataset::filter_indices_by_size` instead. Args: indices (List[int]): ordered list of dataset indices dataset (FairseqDataset): fairseq dataset instance max_positions (tuple): filter elements larger than this size. Comparisons are done component-wise. raise_exception (bool, optional): if ``True``, raise an exception if any elements are filtered (default: False).
183,856
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager The provided code snippet includes necessary dependencies for implementing the `filter_paired_dataset_indices_by_size` function. Write a Python function `def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes)` to solve the following problem: Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices Here is the function: def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ if max_sizes is None: return indices, [] if type(max_sizes) in (int, float): max_src_size, max_tgt_size = max_sizes, max_sizes else: max_src_size, max_tgt_size = max_sizes if tgt_sizes is None: ignored = indices[src_sizes[indices] > max_src_size] else: ignored = indices[ (src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size) ] if len(ignored) > 0: if tgt_sizes is None: indices = indices[src_sizes[indices] <= max_src_size] else: indices = indices[ (src_sizes[indices] <= max_src_size) & (tgt_sizes[indices] <= max_tgt_size) ] return indices, ignored.tolist()
Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices
183,857
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager The provided code snippet includes necessary dependencies for implementing the `batch_by_size` function. Write a Python function `def batch_by_size( indices, num_tokens_fn, num_tokens_vec=None, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, fixed_shapes=None, )` to solve the following problem: Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index num_tokens_vec (List[int], optional): precomputed vector of the number of tokens for each index in indices (to enable faster batch generation) max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). required_batch_size_multiple (int, optional): require batch size to be less than N or a multiple of N (default: 1). fixed_shapes (List[Tuple[int, int]], optional): if given, batches will only be created with the given shapes. *max_sentences* and *required_batch_size_multiple* will be ignored (default: None). Here is the function: def batch_by_size( indices, num_tokens_fn, num_tokens_vec=None, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, fixed_shapes=None, ): """ Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index num_tokens_vec (List[int], optional): precomputed vector of the number of tokens for each index in indices (to enable faster batch generation) max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). required_batch_size_multiple (int, optional): require batch size to be less than N or a multiple of N (default: 1). fixed_shapes (List[Tuple[int, int]], optional): if given, batches will only be created with the given shapes. *max_sentences* and *required_batch_size_multiple* will be ignored (default: None). """ try: from fairseq.data.data_utils_fast import ( batch_by_size_fn, batch_by_size_vec, batch_fixed_shapes_fast, ) except ImportError: raise ImportError( "Please build Cython components with: `pip install --editable .` " "or `python setup.py build_ext --inplace`" ) except ValueError: raise ValueError( "Please build (or rebuild) Cython components with: `pip install " " --editable .` or `python setup.py build_ext --inplace`." ) # added int() to avoid TypeError: an integer is required max_tokens = ( int(max_tokens) if max_tokens is not None else -1 ) max_sentences = max_sentences if max_sentences is not None else -1 bsz_mult = required_batch_size_multiple if not isinstance(indices, np.ndarray): indices = np.fromiter(indices, dtype=np.int64, count=-1) if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray): num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1) if fixed_shapes is None: if num_tokens_vec is None: return batch_by_size_fn( indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult, ) else: return batch_by_size_vec( indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ) else: fixed_shapes = np.array(fixed_shapes, dtype=np.int64) sort_order = np.lexsort( [ fixed_shapes[:, 1].argsort(), # length fixed_shapes[:, 0].argsort(), # bsz ] ) fixed_shapes_sorted = fixed_shapes[sort_order] return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted)
Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index num_tokens_vec (List[int], optional): precomputed vector of the number of tokens for each index in indices (to enable faster batch generation) max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). required_batch_size_multiple (int, optional): require batch size to be less than N or a multiple of N (default: 1). fixed_shapes (List[Tuple[int, int]], optional): if given, batches will only be created with the given shapes. *max_sentences* and *required_batch_size_multiple* will be ignored (default: None).
183,858
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager def post_process(sentence: str, symbol: str): if symbol == "sentencepiece": sentence = sentence.replace(" ", "").replace("\u2581", " ").strip() elif symbol == "wordpiece": sentence = sentence.replace(" ", "").replace("_", " ").strip() elif symbol == "letter": sentence = sentence.replace(" ", "").replace("|", " ").strip() elif symbol == "_EOW": sentence = sentence.replace(" ", "").replace("_EOW", " ").strip() elif symbol == "bert": sentence = sentence.replace(" ##", "").rstrip() elif symbol in {"subword_nmt", "@@ ", "@@"}: if symbol == "subword_nmt": symbol = "@@ " sentence = (sentence + " ").replace(symbol, "").rstrip() elif symbol == "none": pass elif symbol is not None: raise NotImplementedError(f"Unknown post_process option: {symbol}") return sentence
null
183,859
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager The provided code snippet includes necessary dependencies for implementing the `compute_mask_indices` function. Write a Python function `def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, ) -> np.ndarray` to solve the following problem: Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans Here is the function: def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans """ bsz, all_sz = shape mask = np.full((bsz, all_sz), False) all_num_mask = int( # add a random number for probabilistic rounding mask_prob * all_sz / float(mask_length) + np.random.rand() ) all_num_mask = max(min_masks, all_num_mask) mask_idcs = [] for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() num_mask = int( # add a random number for probabilistic rounding mask_prob * sz / float(mask_length) + np.random.rand() ) num_mask = max(min_masks, num_mask) else: sz = all_sz num_mask = all_num_mask if mask_type == "static": lengths = np.full(num_mask, mask_length) elif mask_type == "uniform": lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask) elif mask_type == "normal": lengths = np.random.normal(mask_length, mask_other, size=num_mask) lengths = [max(1, int(round(x))) for x in lengths] elif mask_type == "poisson": lengths = np.random.poisson(mask_length, size=num_mask) lengths = [int(round(x)) for x in lengths] else: raise Exception("unknown mask selection " + mask_type) if sum(lengths) == 0: lengths[0] = min(mask_length, sz - 1) if no_overlap: mask_idc = [] def arrange(s, e, length, keep_length): span_start = np.random.randint(s, e - length) mask_idc.extend(span_start + i for i in range(length)) new_parts = [] if span_start - s - min_space >= keep_length: new_parts.append((s, span_start - min_space + 1)) if e - span_start - keep_length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): lens = np.fromiter( (e - s if e - s >= length + min_space else 0 for s, e in parts), np.int, ) l_sum = np.sum(lens) if l_sum == 0: break probs = lens / np.sum(lens) c = np.random.choice(len(parts), p=probs) s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = np.asarray(mask_idc) else: min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) mask_idc = np.asarray( [ mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j]) ] ) mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): if len(mask_idc) > min_len: mask_idc = np.random.choice(mask_idc, min_len, replace=False) mask[i, mask_idc] = True return mask
Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
183,860
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager def get_mem_usage(): try: import psutil mb = 1024 * 1024 return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb" except ImportError: return "N/A"
null
183,861
import contextlib import itertools import logging import os import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager def lengths_to_padding_mask(lens: torch.LongTensor) -> torch.BoolTensor: bsz, max_lens = lens.size(0), torch.max(lens).item() mask = torch.arange(max_lens).to(lens.device).view(1, max_lens) mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens) return mask def lengths_to_mask(lens: torch.LongTensor) -> torch.BoolTensor: return ~lengths_to_padding_mask(lens)
null
183,862
import os.path as op from typing import BinaryIO, Optional, Tuple, Union import numpy as np def get_waveform( path_or_fp: Union[str, BinaryIO], normalization=True ) -> Tuple[np.ndarray, int]: """Get the waveform and sample rate of a 16-bit mono-channel WAV or FLAC. Args: path_or_fp (str or BinaryIO): the path or file-like object normalization (bool): Normalize values to [-1, 1] (Default: True) """ if isinstance(path_or_fp, str): ext = op.splitext(op.basename(path_or_fp))[1] if ext not in {".flac", ".wav"}: raise ValueError(f"Unsupported audio format: {ext}") try: import soundfile as sf except ImportError: raise ImportError("Please install soundfile to load WAV/FLAC file") waveform, sample_rate = sf.read(path_or_fp, dtype="float32") if not normalization: waveform *= 2 ** 15 # denormalized to 16-bit signed integers return waveform, sample_rate def _get_kaldi_fbank(waveform, sample_rate, n_bins=80) -> Optional[np.ndarray]: """Get mel-filter bank features via PyKaldi.""" try: from kaldi.feat.mel import MelBanksOptions from kaldi.feat.fbank import FbankOptions, Fbank from kaldi.feat.window import FrameExtractionOptions from kaldi.matrix import Vector mel_opts = MelBanksOptions() mel_opts.num_bins = n_bins frame_opts = FrameExtractionOptions() frame_opts.samp_freq = sample_rate opts = FbankOptions() opts.mel_opts = mel_opts opts.frame_opts = frame_opts fbank = Fbank(opts=opts) features = fbank.compute(Vector(waveform), 1.0).numpy() return features except ImportError: return None def _get_torchaudio_fbank(waveform, sample_rate, n_bins=80) -> Optional[np.ndarray]: """Get mel-filter bank features via TorchAudio.""" try: import torch import torchaudio.compliance.kaldi as ta_kaldi import torchaudio.sox_effects as ta_sox waveform = torch.from_numpy(waveform) if len(waveform.shape) == 1: # Mono channel: D -> 1 x D waveform = waveform.unsqueeze(0) else: # Merge multiple channels to one: C x D -> 1 x D waveform, _ = ta_sox.apply_effects_tensor(waveform, sample_rate, ['channels', '1']) features = ta_kaldi.fbank( waveform, num_mel_bins=n_bins, sample_frequency=sample_rate ) return features.numpy() except ImportError: return None The provided code snippet includes necessary dependencies for implementing the `get_fbank` function. Write a Python function `def get_fbank(path_or_fp: Union[str, BinaryIO], n_bins=80) -> np.ndarray` to solve the following problem: Get mel-filter bank features via PyKaldi or TorchAudio. Prefer PyKaldi (faster CPP implementation) to TorchAudio (Python implementation). Note that Kaldi/TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized. Here is the function: def get_fbank(path_or_fp: Union[str, BinaryIO], n_bins=80) -> np.ndarray: """Get mel-filter bank features via PyKaldi or TorchAudio. Prefer PyKaldi (faster CPP implementation) to TorchAudio (Python implementation). Note that Kaldi/TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized.""" sound, sample_rate = get_waveform(path_or_fp, normalization=False) features = _get_kaldi_fbank(sound, sample_rate, n_bins) if features is None: features = _get_torchaudio_fbank(sound, sample_rate, n_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable " "online filterbank feature extraction" ) return features
Get mel-filter bank features via PyKaldi or TorchAudio. Prefer PyKaldi (faster CPP implementation) to TorchAudio (Python implementation). Note that Kaldi/TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized.
183,863
import csv import io import logging import os.path as op import re from typing import Dict, List, Optional, Tuple import numpy as np import torch from fairseq.data import ( ConcatDataset, Dictionary, FairseqDataset, ResamplingDataset, data_utils as fairseq_data_utils, ) from fairseq.data.audio.audio_utils import get_fbank, get_waveform from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform def get_features_from_npy_or_audio(path): ext = op.splitext(op.basename(path))[1] if ext not in {".npy", ".flac", ".wav"}: raise ValueError(f'Unsupported file format for "{path}"') return np.load(path) if ext == ".npy" else get_fbank(path) def get_features_or_waveform_from_uncompressed_zip( path, byte_offset, byte_size, need_waveform=False ): assert path.endswith(".zip") data = read_from_uncompressed_zip(path, byte_offset, byte_size) f = io.BytesIO(data) if is_npy_data(data): features_or_waveform = np.load(f) elif is_flac_or_wav_data(data): features_or_waveform = get_waveform(f)[0] if need_waveform else get_fbank(f) else: raise ValueError(f'Unknown file format for "{path}"') return features_or_waveform def get_waveform( path_or_fp: Union[str, BinaryIO], normalization: bool = True, mono: bool = True, frames: int = -1, start: int = 0, always_2d: bool = True, output_sample_rate: Optional[int] = None, normalize_volume: bool = False ) -> Tuple[np.ndarray, int]: """Get the waveform and sample rate of a 16-bit WAV/FLAC/OGG Vorbis audio. Args: path_or_fp (str or BinaryIO): the path or file-like object normalization (bool): normalize values to [-1, 1] (Default: True) mono (bool): convert multi-channel audio to mono-channel one frames (int): the number of frames to read. (-1 for reading all) start (int): Where to start reading. A negative value counts from the end. always_2d (bool): always return 2D array even for mono-channel audios output_sample_rate (Optional[int]): output sample rate normalize_volume (bool): normalize volume Returns: waveform (numpy.ndarray): 1D or 2D waveform (channels x length) sample_rate (float): sample rate """ if isinstance(path_or_fp, str): ext = Path(path_or_fp).suffix if ext not in SF_AUDIO_FILE_EXTENSIONS: raise ValueError(f"Unsupported audio format: {ext}") try: import soundfile as sf except ImportError: raise ImportError("Please install soundfile: pip install soundfile") waveform, sample_rate = sf.read( path_or_fp, dtype="float32", always_2d=True, frames=frames, start=start ) waveform = waveform.T # T x C -> C x T waveform, sample_rate = convert_waveform( waveform, sample_rate, normalize_volume=normalize_volume, to_mono=mono, to_sample_rate=output_sample_rate ) if not normalization: waveform *= 2 ** 15 # denormalized to 16-bit signed integers if not always_2d: waveform = waveform.squeeze(axis=0) return waveform, sample_rate The provided code snippet includes necessary dependencies for implementing the `get_features_or_waveform` function. Write a Python function `def get_features_or_waveform(path: str, need_waveform=False)` to solve the following problem: Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. Returns: features_or_waveform (numpy.ndarray): speech features or waveform. Here is the function: def get_features_or_waveform(path: str, need_waveform=False): """Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. Returns: features_or_waveform (numpy.ndarray): speech features or waveform. """ _path, *extra = path.split(":") if not op.exists(_path): raise FileNotFoundError(f"File not found: {_path}") if len(extra) == 0: if need_waveform: return get_waveform(_path) return get_features_from_npy_or_audio(_path) elif len(extra) == 2: extra = [int(i) for i in extra] features_or_waveform = get_features_or_waveform_from_uncompressed_zip( _path, extra[0], extra[1], need_waveform=need_waveform ) else: raise ValueError(f"Invalid path: {path}") return features_or_waveform
Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. Returns: features_or_waveform (numpy.ndarray): speech features or waveform.
183,864
import csv import io import logging import os.path as op import re from typing import Dict, List, Optional, Tuple import numpy as np import torch from fairseq.data import ( ConcatDataset, Dictionary, FairseqDataset, ResamplingDataset, data_utils as fairseq_data_utils, ) from fairseq.data.audio.audio_utils import get_fbank, get_waveform from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform The provided code snippet includes necessary dependencies for implementing the `_collate_frames` function. Write a Python function `def _collate_frames( frames: List[torch.Tensor], is_audio_input: bool = False ) -> torch.Tensor` to solve the following problem: Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] Here is the function: def _collate_frames( frames: List[torch.Tensor], is_audio_input: bool = False ) -> torch.Tensor: """ Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] """ max_len = max(frame.size(0) for frame in frames) if is_audio_input: out = frames[0].new_zeros((len(frames), max_len)) else: out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1))) for i, v in enumerate(frames): out[i, : v.size(0)] = v return out
Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
183,865
import itertools import json import logging import math import os from collections import OrderedDict, defaultdict from fairseq import utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, Dictionary, LanguagePairDataset, PrependTokenDataset, SampledMultiDataset, SampledMultiEpochDataset, StripTokenDataset, TransformEosLangPairDataset, TruncateDataset, data_utils, indexed_dataset, ) from fairseq.data.multilingual.multilingual_utils import ( EncoderLangtok, LangTokSpec, LangTokStyle, augment_dictionary, get_lang_tok, ) from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat from fairseq.file_io import PathManager from fairseq.utils import FileContentsAction, csv_str_list, eval_str_dict The provided code snippet includes necessary dependencies for implementing the `_lang_id` function. Write a Python function `def _lang_id(dic: Dictionary, lang: str)` to solve the following problem: Return language ID index. Here is the function: def _lang_id(dic: Dictionary, lang: str): """Return language ID index.""" idx = dic.index(lang) assert idx != dic.unk_index, "cannot find language ID for lang {}".format(lang) return idx
Return language ID index.
183,866
import itertools import json import logging import math import os from collections import OrderedDict, defaultdict from fairseq import utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, Dictionary, LanguagePairDataset, PrependTokenDataset, SampledMultiDataset, SampledMultiEpochDataset, StripTokenDataset, TransformEosLangPairDataset, TruncateDataset, data_utils, indexed_dataset, ) from fairseq.data.multilingual.multilingual_utils import ( EncoderLangtok, LangTokSpec, LangTokStyle, augment_dictionary, get_lang_tok, ) from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat from fairseq.file_io import PathManager from fairseq.utils import FileContentsAction, csv_str_list, eval_str_dict def load_sampling_weights(from_file): with open(from_file) as f: weights = json.load(f) return weights
null
183,875
import logging import numpy as np import torch from fairseq.data import FairseqDataset, data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) def check_alignment(alignment, src_len, tgt_len): if alignment is None or len(alignment) == 0: return False if ( alignment[:, 0].max().item() >= src_len - 1 or alignment[:, 1].max().item() >= tgt_len - 1 ): logger.warning("alignment size mismatch found, skipping alignment!") return False return True def compute_alignment_weights(alignments): """ Given a tensor of shape [:, 2] containing the source-target indices corresponding to the alignments, a weight vector containing the inverse frequency of each target index is computed. For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then a tensor containing [1., 0.5, 0.5, 1] should be returned (since target index 3 is repeated twice) """ align_tgt = alignments[:, 1] _, align_tgt_i, align_tgt_c = torch.unique( align_tgt, return_inverse=True, return_counts=True ) align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] return 1.0 / align_weights.float() id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, ) # sort by descending source length src_lengths = torch.LongTensor( [s["source"].ne(pad_idx).long().sum() for s in samples] ) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) tgt_lengths = torch.LongTensor( [s["target"].ne(pad_idx).long().sum() for s in samples] ).index_select(0, sort_order) ntokens = tgt_lengths.sum().item() if samples[0].get("prev_output_tokens", None) is not None: prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target) elif input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) else: ntokens = src_lengths.sum().item() batch = { "id": id, "nsentences": len(samples), "ntokens": ntokens, "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths,}, "target": target, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select( 0, sort_order ) if samples[0].get("alignment", None) is not None: bsz, tgt_sz = batch["target"].shape src_sz = batch["net_input"]["src_tokens"].shape[1] offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz if left_pad_source: offsets[:, 0] += src_sz - src_lengths if left_pad_target: offsets[:, 1] += tgt_sz - tgt_lengths alignments = [ alignment + offset for align_idx, offset, src_len, tgt_len in zip( sort_order, offsets, src_lengths, tgt_lengths ) for alignment in [samples[align_idx]["alignment"].view(-1, 2)] if check_alignment(alignment, src_len, tgt_len) ] if len(alignments) > 0: alignments = torch.cat(alignments, dim=0) align_weights = compute_alignment_weights(alignments) batch["alignments"] = alignments batch["align_weights"] = align_weights if samples[0].get("constraints", None) is not None: # Collate the packed constraints across the samples, padding to # the length of the longest sample. lens = [sample.get("constraints").size(0) for sample in samples] max_len = max(lens) constraints = torch.zeros((len(samples), max(lens))).long() for i, sample in enumerate(samples): constraints[i, 0 : lens[i]] = samples[i].get("constraints") batch["constraints"] = constraints return batch
null
183,878
import numpy as np import torch from . import FairseqDataset, data_utils def collate(samples, pad_idx, eos_idx): if len(samples) == 0: return {} def merge(key, is_list=False): if is_list: res = [] for i in range(len(samples[0][key])): res.append( data_utils.collate_tokens( [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False, ) ) return res else: return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad=False, ) src_tokens = merge("source") if samples[0]["target"] is not None: is_target_list = isinstance(samples[0]["target"], list) target = merge("target", is_target_list) else: target = src_tokens return { "id": torch.LongTensor([s["id"] for s in samples]), "nsentences": len(samples), "ntokens": sum(len(s["source"]) for s in samples), "net_input": { "src_tokens": src_tokens, "src_lengths": torch.LongTensor([s["source"].numel() for s in samples]), }, "target": target, }
null
183,886
import shutil import struct from functools import lru_cache import numpy as np import torch from fairseq.dataclass.constants import DATASET_IMPL_CHOICES from fairseq.data.fasta_dataset import FastaDataset from fairseq.file_io import PathManager from . import FairseqDataset from typing import Union DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"]) def get_available_dataset_impl(): return list(map(str, DATASET_IMPL_CHOICES))
null
183,887
import shutil import struct from functools import lru_cache import numpy as np import torch from fairseq.dataclass.constants import DATASET_IMPL_CHOICES from fairseq.data.fasta_dataset import FastaDataset from fairseq.file_io import PathManager from . import FairseqDataset from typing import Union def index_file_path(prefix_path): return prefix_path + ".idx" class IndexedDataset(FairseqDataset): """Loader for TorchNet IndexedDataset""" _HDR_MAGIC = b"TNTIDX\x00\x00" def __init__(self, path, fix_lua_indexing=False): super().__init__() self.path = path self.fix_lua_indexing = fix_lua_indexing self.data_file = None self.read_index(path) def read_index(self, path): with open(index_file_path(path), "rb") as f: magic = f.read(8) assert magic == self._HDR_MAGIC, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = f.read(8) assert struct.unpack("<Q", version) == (1,) code, self.element_size = struct.unpack("<QQ", f.read(16)) self.dtype = _code_to_dtype[code] self._len, self.s = struct.unpack("<QQ", f.read(16)) self.dim_offsets = read_longs(f, self._len + 1) self.data_offsets = read_longs(f, self._len + 1) self.sizes = read_longs(f, self.s) def read_data(self, path): self.data_file = open(data_file_path(path), "rb", buffering=0) def check_index(self, i): if i < 0 or i >= self._len: raise IndexError("index out of range") def __del__(self): if self.data_file: self.data_file.close() def __getitem__(self, i) -> torch.Tensor: if not self.data_file: self.read_data(self.path) self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 # subtract 1 for 0-based indexing return item def __len__(self): return self._len def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) def supports_prefetch(self): return False # avoid prefetching to save memory class IndexedRawTextDataset(FairseqDataset): """Takes a text file as input and binarizes it in memory at instantiation. Original lines are also kept in memory""" def __init__(self, path, dictionary, append_eos=True, reverse_order=False): self.tokens_list = [] self.lines = [] self.sizes = [] self.append_eos = append_eos self.reverse_order = reverse_order self.read_data(path, dictionary) self.size = len(self.tokens_list) def read_data(self, path, dictionary): with open(path, "r", encoding="utf-8") as f: for line in f: self.lines.append(line.strip("\n")) tokens = dictionary.encode_line( line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order, ).long() self.tokens_list.append(tokens) self.sizes.append(len(tokens)) self.sizes = np.array(self.sizes) def check_index(self, i): if i < 0 or i >= self.size: raise IndexError("index out of range") def __getitem__(self, i): self.check_index(i) return self.tokens_list[i] def get_original_text(self, i): self.check_index(i) return self.lines[i] def __del__(self): pass def __len__(self): return self.size def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] def exists(path): return PathManager.exists(path) class MMapIndexedDataset(torch.utils.data.Dataset): class Index: _HDR_MAGIC = b"MMIDIDX\x00\x00" def writer(cls, path, dtype): class _Writer: def __enter__(self): self._file = open(path, "wb") self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack("<Q", 1)) self._file.write(struct.pack("<B", _dtype_header_code(dtype))) return self def _get_pointers(sizes): dtype_size = dtype().itemsize address = 0 pointers = [] for size in sizes: pointers.append(address) address += size * dtype_size return pointers def write(self, sizes): pointers = self._get_pointers(sizes) self._file.write(struct.pack("<Q", len(sizes))) sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order="C")) del sizes pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order="C")) del pointers def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path): with open(path, "rb") as stream: magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = struct.unpack("<Q", stream.read(8)) assert (1,) == version (dtype_code,) = struct.unpack("<B", stream.read(1)) self._dtype = _code_to_dtype[dtype_code] self._dtype_size = self._dtype().itemsize self._len = struct.unpack("<Q", stream.read(8))[0] offset = stream.tell() _warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode="r", order="C") self._bin_buffer = memoryview(self._bin_buffer_mmap) self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset ) self._pointers = np.frombuffer( self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes, ) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap def dtype(self): return self._dtype def sizes(self): return self._sizes def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def __init__(self, path): super().__init__() self._path = None self._index = None self._bin_buffer = None self._do_init(path) def __getstate__(self): return self._path def __setstate__(self, state): self._do_init(state) def _do_init(self, path): self._path = path self._index = self.Index(index_file_path(self._path)) _warmup_mmap_file(data_file_path(self._path)) self._bin_buffer_mmap = np.memmap( data_file_path(self._path), mode="r", order="C" ) self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap del self._index def __len__(self): return len(self._index) def __getitem__(self, i): ptr, size = self._index[i] np_array = np.frombuffer( self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr ) if self._index.dtype != np.int64: np_array = np_array.astype(np.int64) return torch.from_numpy(np_array) def sizes(self): return self._index.sizes def supports_prefetch(self): return False def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) class FastaDataset(torch.utils.data.Dataset): """ For loading protein sequence datasets in the common FASTA data format """ def __init__(self, path: str, cache_indices=False): self.fn = fasta_file_path(path) self.threadlocal = threading.local() self.cache = Path(f"{path}.fasta.idx.npy") if cache_indices: if self.cache.exists(): self.offsets, self.sizes = np.load(self.cache) else: self.offsets, self.sizes = self._build_index(path) np.save(self.cache, np.stack([self.offsets, self.sizes])) else: self.offsets, self.sizes = self._build_index(path) def _get_file(self): if not hasattr(self.threadlocal, "f"): self.threadlocal.f = open(self.fn, "r") return self.threadlocal.f def __getitem__(self, idx): f = self._get_file() f.seek(self.offsets[idx]) desc = f.readline().strip() line = f.readline() seq = "" while line != "" and line[0] != ">": seq += line.strip() line = f.readline() return desc, seq def __len__(self): return self.offsets.size def _build_index(self, path: str): # Use grep and awk to get 100M/s on local SSD. # Should process your enormous 100G fasta in ~10 min single core... path = fasta_file_path(path) bytes_offsets = subprocess.check_output( f"cat {path} | tqdm --bytes --total $(wc -c < {path})" "| grep --byte-offset '^>' -o | cut -d: -f1", shell=True, ) fasta_lengths = subprocess.check_output( f"cat {path} | tqdm --bytes --total $(wc -c < {path})" "| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'", shell=True, ) bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ") sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ") return bytes_np, sizes_np def __setstate__(self, state): self.__dict__ = state self.threadlocal = threading.local() def __getstate__(self): d = {} for i, v in self.__dict__.items(): if i != "threadlocal": d[i] = v return d def __del__(self): if hasattr(self.threadlocal, "f"): self.threadlocal.f.close() del self.threadlocal.f def exists(path): return os.path.exists(fasta_file_path(path)) def infer_dataset_impl(path): if IndexedRawTextDataset.exists(path): return "raw" elif IndexedDataset.exists(path): with open(index_file_path(path), "rb") as f: magic = f.read(8) if magic == IndexedDataset._HDR_MAGIC: return "cached" elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: return "mmap" else: return None elif FastaDataset.exists(path): return "fasta" else: return None
null
183,888
import shutil import struct from functools import lru_cache import numpy as np import torch from fairseq.dataclass.constants import DATASET_IMPL_CHOICES from fairseq.data.fasta_dataset import FastaDataset from fairseq.file_io import PathManager from . import FairseqDataset from typing import Union def best_fitting_int_dtype( max_int_to_represent, ) -> Union[np.uint16, np.uint32, np.int64]: if max_int_to_represent is None: return np.uint32 # Safe guess elif max_int_to_represent < 65500: return np.uint16 elif max_int_to_represent < 4294967295: return np.uint32 else: return np.int64 # we avoid np.uint64 because it doesn't save space and its type promotion behaves unexpectedly # https://github.com/numpy/numpy/issues/5745 class IndexedDatasetBuilder: element_sizes = { np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8, } def __init__(self, out_file, dtype=np.int32): self.out_file = open(out_file, "wb") self.dtype = dtype self.data_offsets = [0] self.dim_offsets = [0] self.sizes = [] self.element_size = self.element_sizes[self.dtype] def add_item(self, tensor): # +1 for Lua compatibility bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype)) self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) for s in tensor.size(): self.sizes.append(s) self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) def merge_file_(self, another_file): index = IndexedDataset(another_file) assert index.dtype == self.dtype begin = self.data_offsets[-1] for offset in index.data_offsets[1:]: self.data_offsets.append(begin + offset) self.sizes.extend(index.sizes) begin = self.dim_offsets[-1] for dim_offset in index.dim_offsets[1:]: self.dim_offsets.append(begin + dim_offset) with open(data_file_path(another_file), "rb") as f: while True: data = f.read(1024) if data: self.out_file.write(data) else: break def finalize(self, index_file): self.out_file.close() index = open(index_file, "wb") index.write(b"TNTIDX\x00\x00") index.write(struct.pack("<Q", 1)) index.write( struct.pack("<QQ", _dtype_header_code(self.dtype), self.element_size) ) index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes))) write_longs(index, self.dim_offsets) write_longs(index, self.data_offsets) write_longs(index, self.sizes) index.close() class MMapIndexedDatasetBuilder: def __init__(self, out_file, dtype=np.int64): self._data_file = open(out_file, "wb") self._dtype = dtype self._sizes = [] def add_item(self, tensor): np_array = np.array(tensor.numpy(), dtype=self._dtype) self._data_file.write(np_array.tobytes(order="C")) self._sizes.append(np_array.size) def merge_file_(self, another_file): # Concatenate index index = MMapIndexedDataset.Index(index_file_path(another_file)) assert index.dtype == self._dtype for size in index.sizes: self._sizes.append(size) # Concatenate data with open(data_file_path(another_file), "rb") as f: shutil.copyfileobj(f, self._data_file) def finalize(self, index_file): self._data_file.close() with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index: index.write(self._sizes) def make_builder(out_file, impl, vocab_size=None): if impl == "mmap": return MMapIndexedDatasetBuilder( out_file, dtype=best_fitting_int_dtype(vocab_size) ) elif impl == "fasta": raise NotImplementedError else: return IndexedDatasetBuilder(out_file)
null