id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
18,956
from collections import namedtuple import random import os import csv import torch import nltk from nltk import tokenize as nltk_tokenize import sentencepiece as spm from .wordpiece import BertTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP from .tokenization_gpt2 import GPT2Tokenizer import regex as re class TypeToken(object): def __init__(self, name, token, Id): self.name = name self.token = token self.Id = Id def __str__(self): return str(TYPE_TUPLE(self.name, self.token, self.Id)) def prep_type_tokens(tokenlist, token_format=token_format): return [TypeToken(tok[0], token_format.format(tok[0]), tok[1]) for tok in tokenlist]
null
18,969
import torch from torch import nn from torch.autograd import Variable from torch.nn.parameter import Parameter FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) def conversion_helper(val, conversion): """Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure.""" if not isinstance(val, (tuple, list)): return conversion(val) rtn = [conversion_helper(v, conversion) for v in val] if isinstance(val, tuple): rtn = tuple(rtn) return rtn The provided code snippet includes necessary dependencies for implementing the `fp32_to_fp16` function. Write a Python function `def fp32_to_fp16(val)` to solve the following problem: Convert fp32 `val` to fp16 Here is the function: def fp32_to_fp16(val): """Convert fp32 `val` to fp16""" def half_conversion(val): val_typecheck = val if isinstance(val_typecheck, (Parameter, Variable)): val_typecheck = val.data if isinstance(val_typecheck, FLOAT_TYPES): val = val.half() return val return conversion_helper(val, half_conversion)
Convert fp32 `val` to fp16
18,970
import torch from torch import nn from torch.autograd import Variable from torch.nn.parameter import Parameter HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) def conversion_helper(val, conversion): """Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure.""" if not isinstance(val, (tuple, list)): return conversion(val) rtn = [conversion_helper(v, conversion) for v in val] if isinstance(val, tuple): rtn = tuple(rtn) return rtn The provided code snippet includes necessary dependencies for implementing the `fp16_to_fp32` function. Write a Python function `def fp16_to_fp32(val)` to solve the following problem: Convert fp16 `val` to fp32 Here is the function: def fp16_to_fp32(val): """Convert fp16 `val` to fp32""" def float_conversion(val): val_typecheck = val if isinstance(val_typecheck, (Parameter, Variable)): val_typecheck = val.data if isinstance(val_typecheck, HALF_TYPES): val = val.float() return val return conversion_helper(val, float_conversion)
Convert fp16 `val` to fp32
18,971
from __future__ import absolute_import, division, print_function, unicode_literals import os import copy import json import math import logging import tarfile import tempfile import shutil import torch from torch import nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from data_utils.file_utils import cached_path import mpu import copy from deepspeed.utils.timer import SynchronizedWallClockTimer def normal_init_method(mean, std): def init_(tensor): return torch.nn.init.normal_(tensor, mean=mean, std=std) return init_
null
18,972
from __future__ import absolute_import, division, print_function, unicode_literals import os import copy import json import math import logging import tarfile import tempfile import shutil import torch from torch import nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from data_utils.file_utils import cached_path import mpu import copy from deepspeed.utils.timer import SynchronizedWallClockTimer The provided code snippet includes necessary dependencies for implementing the `scaled_init_method` function. Write a Python function `def scaled_init_method(mean, std, num_layers)` to solve the following problem: Init method based on N(0, sigma/sqrt(2*num_layers). Here is the function: def scaled_init_method(mean, std, num_layers): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = std / math.sqrt(2.0 * num_layers) def init_(tensor): return torch.nn.init.normal_(tensor, mean=mean, std=std) return init_
Init method based on N(0, sigma/sqrt(2*num_layers).
18,973
from __future__ import absolute_import, division, print_function, unicode_literals import os import copy import json import math import logging import tarfile import tempfile import shutil import torch from torch import nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from data_utils.file_utils import cached_path import mpu import copy from deepspeed.utils.timer import SynchronizedWallClockTimer The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_bert` function. Write a Python function `def load_tf_weights_in_bert(model, tf_checkpoint_path)` to solve the following problem: Load tf checkpoints in a pytorch model Here is the function: def load_tf_weights_in_bert(model, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
Load tf checkpoints in a pytorch model
18,974
from __future__ import absolute_import, division, print_function, unicode_literals import os import copy import json import math import logging import tarfile import tempfile import shutil import torch from torch import nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from data_utils.file_utils import cached_path import mpu import copy from deepspeed.utils.timer import SynchronizedWallClockTimer The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem: Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Here is the function: def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
18,975
from __future__ import absolute_import, division, print_function, unicode_literals import os import copy import json import math import logging import tarfile import tempfile import shutil import torch from torch import nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from data_utils.file_utils import cached_path import mpu import copy from deepspeed.utils.timer import SynchronizedWallClockTimer def swish(x): return x * torch.sigmoid(x)
null
18,976
import copy import torch import data_utils import random import mpu from data_utils.wordpiece import BertTokenizer from torch.utils.data import Subset def make_data_loader(dataset, batch_size, args): shuffle = args.shuffle if shuffle: #if not args.struct_bert_dataset and not args.palm_dataset: # sampler = data_utils.samplers.RandomSampler(dataset, replacement=True, num_samples=batch_size*args.train_iters) #else: if 1: sampler = data_utils.samplers.RandomSampler(dataset, replacement=False) else: sampler = torch.utils.data.SequentialSampler(dataset) world_size = torch.distributed.get_world_size( group=mpu.get_data_parallel_group()) rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group()) distributed = world_size > 1 drop_last = distributed if not args.struct_bert_dataset and not args.palm_dataset and not args.image_dataset: if distributed: batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) else: batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, shuffle) #if not shuffle, than don't drop_last data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True) elif args.image_dataset: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=ImageBatchify, worker_init_fn=_worker_init_fn) else: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=batchify if args.struct_bert_dataset else PalmBatchify, worker_init_fn=_worker_init_fn) return data_loader class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BertTokenizer. Args: vocab_file: Path to a one-wordpiece-per-line vocabulary file do_lower_case: Whether to lower case the input Only has an effect when do_wordpiece_only=False do_basic_tokenize: Whether to do basic tokenization before wordpiece. max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. never_split: List of tokens which will never be split during tokenization. Only has an effect when do_wordpiece_only=False """ if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): if self.do_basic_tokenize: split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer def make_downstream_loaders(args, train, valid, test): world_size = torch.distributed.get_world_size( group=mpu.get_data_parallel_group()) batch_size = args.batch_size * world_size eval_batch_size = args.eval_batch_size * world_size tokenizer = BertTokenizer.from_pretrained(args.tokenizer_model_type) tokenizer.num_tokens = len(tokenizer.vocab) tokenizer.num_type_tokens = 3 args.do_train = True args.do_valid = True args.do_test = True train = make_data_loader(train, batch_size, args) valid = make_data_loader(valid, eval_batch_size, args) shuffle = args.shuffle args.shuffle = False test = make_data_loader(test, eval_batch_size, args) args.shuffle = shuffle return (train, valid, test), tokenizer
null
18,977
import copy import torch import data_utils import random import mpu from data_utils.wordpiece import BertTokenizer from torch.utils.data import Subset def make_data_loader(dataset, batch_size, args): shuffle = args.shuffle if shuffle: #if not args.struct_bert_dataset and not args.palm_dataset: # sampler = data_utils.samplers.RandomSampler(dataset, replacement=True, num_samples=batch_size*args.train_iters) #else: if 1: sampler = data_utils.samplers.RandomSampler(dataset, replacement=False) else: sampler = torch.utils.data.SequentialSampler(dataset) world_size = torch.distributed.get_world_size( group=mpu.get_data_parallel_group()) rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group()) distributed = world_size > 1 drop_last = distributed if not args.struct_bert_dataset and not args.palm_dataset and not args.image_dataset: if distributed: batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) else: batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, shuffle) #if not shuffle, than don't drop_last data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True) elif args.image_dataset: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=ImageBatchify, worker_init_fn=_worker_init_fn) else: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=batchify if args.struct_bert_dataset else PalmBatchify, worker_init_fn=_worker_init_fn) return data_loader def get_split(args): """ Get dataset splits from comma separated string list """ splits = [] if args.split.find(',') != -1: splits = [float(s) for s in args.split.split(',')] elif args.split.find('/') != -1: splits = [float(s) for s in args.split.split('/')] else: splits = [float(args.split)] split_total = sum(splits) if split_total < 1.: splits.append(1-split_total) while len(splits) < 3: splits.append(0.) splits = splits[:3] if args.valid_data is not None: splits[1] = 0. if args.test_data is not None: splits[2] = 0. final_sum = sum(splits) return [s/final_sum for s in splits] class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BertTokenizer. Args: vocab_file: Path to a one-wordpiece-per-line vocabulary file do_lower_case: Whether to lower case the input Only has an effect when do_wordpiece_only=False do_basic_tokenize: Whether to do basic tokenization before wordpiece. max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. never_split: List of tokens which will never be split during tokenization. Only has an effect when do_wordpiece_only=False """ if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): if self.do_basic_tokenize: split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer def make_structbert_loaders(args): #world_size = torch.distributed.get_world_size( # group=mpu.get_data_parallel_group()) #batch_size = args.batch_size * world_size #we don't need multiple world_size because we don't use distributed batch sampler batch_size = args.batch_size eval_batch_size = batch_size if args.eval_batch_size is not None: eval_batch_size = args.eval_batch_size #* world_size seq_length = args.seq_length if seq_length < 0: seq_length = seq_length * world_size eval_seq_length = args.eval_seq_length if eval_seq_length is not None and eval_seq_length < 0: eval_seq_length = eval_seq_length * world_size split = get_split(args) tokenizer = BertTokenizer.from_pretrained(args.tokenizer_model_type) tokenizer.num_tokens = len(tokenizer.vocab) tokenizer.num_type_tokens = 3 args.tokenizer = tokenizer args.cls_token, args.sep_token, args.mask_token = '[CLS]', '[SEP]', '[MASK]' args.vocab_words = list(tokenizer.vocab) #add structbert args args.environ = 'local' args.dataset_has_lang_id = False args.one_sentence = False args.short_seq_prob = 0 args.ns_type = 3 args.jieba = False args.do_whole_word_mask = False args.masked_lm_prob = 0.15 args.do_mask_rate_range = False args.all_token_mlm = False args.predict_context_prob = 0 args.continue_mask_prob = 0 args.shuffle_order_prob = 0 args.tokenizer_type = 'bert' args.do_train = True args.do_valid = True args.do_test = False data_parallel_rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group()) train = HDF5Dataset(args, args.sub_train_lst[data_parallel_rank], args.tokenizer, args.vocab_words, args.train_iters * args.gradient_accumulation_steps * args.batch_size // args.num_epochs, is_training=True) valid = Subset(train, list(range(args.eval_iters * eval_batch_size))) train = make_data_loader(train, batch_size, args) valid = make_data_loader(valid, eval_batch_size, args) return (train, valid, None), tokenizer
null
18,978
import copy import torch import data_utils import random import mpu from data_utils.wordpiece import BertTokenizer from torch.utils.data import Subset def make_data_loader(dataset, batch_size, args): shuffle = args.shuffle if shuffle: #if not args.struct_bert_dataset and not args.palm_dataset: # sampler = data_utils.samplers.RandomSampler(dataset, replacement=True, num_samples=batch_size*args.train_iters) #else: if 1: sampler = data_utils.samplers.RandomSampler(dataset, replacement=False) else: sampler = torch.utils.data.SequentialSampler(dataset) world_size = torch.distributed.get_world_size( group=mpu.get_data_parallel_group()) rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group()) distributed = world_size > 1 drop_last = distributed if not args.struct_bert_dataset and not args.palm_dataset and not args.image_dataset: if distributed: batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) else: batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, shuffle) #if not shuffle, than don't drop_last data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True) elif args.image_dataset: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=ImageBatchify, worker_init_fn=_worker_init_fn) else: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=batchify if args.struct_bert_dataset else PalmBatchify, worker_init_fn=_worker_init_fn) return data_loader def get_split(args): """ Get dataset splits from comma separated string list """ splits = [] if args.split.find(',') != -1: splits = [float(s) for s in args.split.split(',')] elif args.split.find('/') != -1: splits = [float(s) for s in args.split.split('/')] else: splits = [float(args.split)] split_total = sum(splits) if split_total < 1.: splits.append(1-split_total) while len(splits) < 3: splits.append(0.) splits = splits[:3] if args.valid_data is not None: splits[1] = 0. if args.test_data is not None: splits[2] = 0. final_sum = sum(splits) return [s/final_sum for s in splits] class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BertTokenizer. Args: vocab_file: Path to a one-wordpiece-per-line vocabulary file do_lower_case: Whether to lower case the input Only has an effect when do_wordpiece_only=False do_basic_tokenize: Whether to do basic tokenization before wordpiece. max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. never_split: List of tokens which will never be split during tokenization. Only has an effect when do_wordpiece_only=False """ if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): if self.do_basic_tokenize: split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer def make_image_loaders(args): #world_size = torch.distributed.get_world_size( # group=mpu.get_data_parallel_group()) #batch_size = args.batch_size * world_size #we don't need multiple world_size because we don't use distributed batch sampler batch_size = args.batch_size eval_batch_size = batch_size if args.eval_batch_size is not None: eval_batch_size = args.eval_batch_size #* world_size seq_length = args.seq_length if seq_length < 0: seq_length = seq_length * world_size eval_seq_length = args.eval_seq_length if eval_seq_length is not None and eval_seq_length < 0: eval_seq_length = eval_seq_length * world_size split = get_split(args) tokenizer = BertTokenizer.from_pretrained(args.tokenizer_model_type) tokenizer.num_tokens = len(tokenizer.vocab) tokenizer.num_type_tokens = 3 args.tokenizer = tokenizer args.cls_token, args.sep_token, args.mask_token = '[CLS]', '[SEP]', '[MASK]' args.bos_token, args.eos_token = '[CLS]', '[SEP]' args.vocab_words = list(tokenizer.vocab) #add palm args args.start_length = 30 args.tgt_length = 1025 args.full_sent_prob = 0.3 #add structbert args args.environ = 'local' args.dataset_has_lang_id = False args.one_sentence = False args.short_seq_prob = 0 args.ns_type = 3 args.jieba = False args.do_whole_word_mask = False args.masked_lm_prob = 0.15 args.do_mask_rate_range = False args.all_token_mlm = False args.predict_context_prob = 0 args.continue_mask_prob = 0 args.shuffle_order_prob = 0 args.tokenizer_type = 'bert' args.do_train = True args.do_valid = True args.do_test = False data_parallel_rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group()) train = ImageHDF5Dataset(args, args.sub_train_lst[data_parallel_rank], args.tokenizer, args.vocab_words, args.train_iters * args.gradient_accumulation_steps * args.batch_size // args.num_epochs, is_training=True) valid = Subset(train, list(range(args.eval_iters * eval_batch_size))) train = make_data_loader(train, batch_size, args) valid = make_data_loader(valid, eval_batch_size, args) return (train, valid, None), tokenizer
null
18,979
import copy import torch import data_utils import random import mpu from data_utils.wordpiece import BertTokenizer from torch.utils.data import Subset def make_data_loader(dataset, batch_size, args): shuffle = args.shuffle if shuffle: #if not args.struct_bert_dataset and not args.palm_dataset: # sampler = data_utils.samplers.RandomSampler(dataset, replacement=True, num_samples=batch_size*args.train_iters) #else: if 1: sampler = data_utils.samplers.RandomSampler(dataset, replacement=False) else: sampler = torch.utils.data.SequentialSampler(dataset) world_size = torch.distributed.get_world_size( group=mpu.get_data_parallel_group()) rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group()) distributed = world_size > 1 drop_last = distributed if not args.struct_bert_dataset and not args.palm_dataset and not args.image_dataset: if distributed: batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) else: batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, shuffle) #if not shuffle, than don't drop_last data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True) elif args.image_dataset: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) batch_sampler = data_utils.samplers.DistributedBatchSampler(sampler, batch_size, shuffle, #if not shuffle, than don't drop_last rank, world_size) data_loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=ImageBatchify, worker_init_fn=_worker_init_fn) else: _worker_init_fn = WorkerInitObj(args.seed + torch.distributed.get_rank()) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True, collate_fn=batchify if args.struct_bert_dataset else PalmBatchify, worker_init_fn=_worker_init_fn) return data_loader def make_tfrecord_loaders(args): """Load train/val/test dataset from shuffled TFRecords""" import data_utils.tf_dl data_set_args = {'batch_size': args.batch_size, 'max_seq_len': args.seq_length, 'max_preds_per_seq': args.max_preds_per_seq, 'train': True, 'num_workers': max(args.num_workers, 1), 'seed': args.seed + args.rank + 1, 'threaded_dl': args.num_workers > 0 } train = data_utils.tf_dl.TFRecordDataLoader(args.train_data, **data_set_args) data_set_args['train'] = False if args.eval_seq_length is not None: data_set_args['max_seq_len'] = args.eval_seq_length if args.eval_max_preds_per_seq is not None: data_set_args['max_preds_per_seq'] = args.eval_max_preds_per_seq valid = None if args.valid_data is not None: valid = data_utils.tf_dl.TFRecordDataLoader(args.valid_data, **data_set_args) test = None if args.test_data is not None: test = data_utils.tf_dl.TFRecordDataLoader(args.test_data, **data_set_args) tokenizer = data_utils.make_tokenizer(args.tokenizer_type, train, args.tokenizer_path, args.vocab_size, args.tokenizer_model_type, cache_dir=args.cache_dir) return (train, valid, test), tokenizer def get_split(args): """ Get dataset splits from comma separated string list """ splits = [] if args.split.find(',') != -1: splits = [float(s) for s in args.split.split(',')] elif args.split.find('/') != -1: splits = [float(s) for s in args.split.split('/')] else: splits = [float(args.split)] split_total = sum(splits) if split_total < 1.: splits.append(1-split_total) while len(splits) < 3: splits.append(0.) splits = splits[:3] if args.valid_data is not None: splits[1] = 0. if args.test_data is not None: splits[2] = 0. final_sum = sum(splits) return [s/final_sum for s in splits] The provided code snippet includes necessary dependencies for implementing the `make_loaders` function. Write a Python function `def make_loaders(args)` to solve the following problem: makes training/val/test Here is the function: def make_loaders(args): """makes training/val/test""" if args.use_tfrecords: return make_tfrecord_loaders(args) world_size = torch.distributed.get_world_size( group=mpu.get_data_parallel_group()) batch_size = args.batch_size * world_size eval_batch_size = batch_size if args.eval_batch_size is not None: eval_batch_size = args.eval_batch_size * world_size seq_length = args.seq_length if seq_length < 0: seq_length = seq_length * world_size eval_seq_length = args.eval_seq_length if eval_seq_length is not None and eval_seq_length < 0: eval_seq_length = eval_seq_length * world_size split = get_split(args) data_set_args = { 'path': args.train_data, 'seq_length': seq_length, 'lazy': args.lazy_loader, 'delim': args.delim, 'text_key': args.text_key, 'label_key': 'label', 'non_binary_cols': None, 'ds_type': args.data_set_type, 'split': split, 'loose': args.loose_json, 'tokenizer_type': args.tokenizer_type, 'tokenizer_model_path': args.tokenizer_path, 'vocab_size': args.vocab_size, 'model_type': args.tokenizer_model_type, 'cache_dir': args.cache_dir, 'max_preds_per_seq': args.max_preds_per_seq, 'presplit_sentences': args.presplit_sentences} eval_set_args = copy.copy(data_set_args) eval_set_args['split'] = [1.] # if optional eval args were set then replace their # equivalent values in the arg dict if eval_seq_length: eval_set_args['seq_length'] = eval_seq_length if args.eval_max_preds_per_seq: eval_set_args['max_preds_per_seq'] = args.eval_max_preds_per_seq if args.eval_text_key is not None: eval_set_args['text_key'] = args.eval_text_key # make datasets splits and tokenizer train = None valid = None test = None if args.train_data is not None: print(data_set_args) train, tokenizer = data_utils.make_dataset(**data_set_args) if data_utils.should_split(split): train, valid, test = train eval_set_args['tokenizer'] = tokenizer # make training and val dataset if necessary if valid is None and args.valid_data is not None: eval_set_args['path'] = args.valid_data valid, tokenizer = data_utils.make_dataset(**eval_set_args) eval_set_args['tokenizer'] = tokenizer if test is None and args.test_data is not None: eval_set_args['path'] = args.test_data test, tokenizer = data_utils.make_dataset(**eval_set_args) # wrap datasets with data loader if train is not None and args.batch_size > 0: train = make_data_loader(train, batch_size, args) args.do_train = True else: args.do_train = False eval_batch_size = eval_batch_size if eval_batch_size != 0 else batch_size if valid is not None: valid = make_data_loader(valid, eval_batch_size, args) args.do_valid = True else: args.do_valid = False if test is not None: test = make_data_loader(test, eval_batch_size, args) args.do_test = True else: args.do_test = False return (train, valid, test), tokenizer
makes training/val/test
18,980
import torch def calc_mean_invstddev(feature): def apply_mv_norm(features): # If there is less than 2 spectrograms, the variance cannot be computed (is NaN) # and normalization is not possible, so return the item as it is if features.size(0) < 2: return features mean, invstddev = calc_mean_invstddev(features) res = (features - mean) * invstddev return res
null
18,981
import torch The provided code snippet includes necessary dependencies for implementing the `lengths_to_encoder_padding_mask` function. Write a Python function `def lengths_to_encoder_padding_mask(lengths, batch_first=False)` to solve the following problem: convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = 0 for t < lengths[b] and 1 otherwise TODO: kernelize this function if benchmarking shows this function is slow Here is the function: def lengths_to_encoder_padding_mask(lengths, batch_first=False): """ convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = 0 for t < lengths[b] and 1 otherwise TODO: kernelize this function if benchmarking shows this function is slow """ max_lengths = torch.max(lengths).item() bsz = lengths.size(0) encoder_padding_mask = torch.arange( max_lengths ).to( # a (T, ) tensor with [0, ..., T-1] lengths.device ).view( # move to the right device 1, max_lengths ).expand( # reshape to (1, T)-shaped tensor bsz, -1 ) >= lengths.view( # expand to (B, T)-shaped tensor bsz, 1 ).expand( -1, max_lengths ) if not batch_first: return encoder_padding_mask.t(), max_lengths else: return encoder_padding_mask, max_lengths
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = 0 for t < lengths[b] and 1 otherwise TODO: kernelize this function if benchmarking shows this function is slow
18,982
import torch The provided code snippet includes necessary dependencies for implementing the `encoder_padding_mask_to_lengths` function. Write a Python function `def encoder_padding_mask_to_lengths( encoder_padding_mask, max_lengths, batch_size, device )` to solve the following problem: convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor Conventionally, encoder output contains a encoder_padding_mask, which is a 2-D mask in a shape (T, B), whose (t, b) element indicate whether encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we need to convert this mask tensor to a 1-D tensor in shape (B, ), where [b] denotes the valid length of b-th sequence Args: encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None, indicating all are valid Return: seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the number of valid elements of b-th sequence max_lengths: maximum length of all sequence, if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(0) batch_size: batch size; if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(1) device: which device to put the result on Here is the function: def encoder_padding_mask_to_lengths( encoder_padding_mask, max_lengths, batch_size, device ): """ convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor Conventionally, encoder output contains a encoder_padding_mask, which is a 2-D mask in a shape (T, B), whose (t, b) element indicate whether encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we need to convert this mask tensor to a 1-D tensor in shape (B, ), where [b] denotes the valid length of b-th sequence Args: encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None, indicating all are valid Return: seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the number of valid elements of b-th sequence max_lengths: maximum length of all sequence, if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(0) batch_size: batch size; if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(1) device: which device to put the result on """ if encoder_padding_mask is None: return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device) assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match" assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match" return max_lengths - torch.sum(encoder_padding_mask, dim=0)
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor Conventionally, encoder output contains a encoder_padding_mask, which is a 2-D mask in a shape (T, B), whose (t, b) element indicate whether encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we need to convert this mask tensor to a 1-D tensor in shape (B, ), where [b] denotes the valid length of b-th sequence Args: encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None, indicating all are valid Return: seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the number of valid elements of b-th sequence max_lengths: maximum length of all sequence, if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(0) batch_size: batch size; if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(1) device: which device to put the result on
18,983
def replabel_symbol(i): """ Replabel symbols used in wav2letter, currently just "1", "2", ... This prevents training with numeral tokens, so this might change in the future """ return str(i) The provided code snippet includes necessary dependencies for implementing the `pack_replabels` function. Write a Python function `def pack_replabels(tokens, dictionary, max_reps)` to solve the following problem: Pack a token sequence so that repeated symbols are replaced by replabels Here is the function: def pack_replabels(tokens, dictionary, max_reps): """ Pack a token sequence so that repeated symbols are replaced by replabels """ if len(tokens) == 0 or max_reps <= 0: return tokens replabel_value_to_idx = [0] * (max_reps + 1) for i in range(1, max_reps + 1): replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i)) result = [] prev_token = -1 num_reps = 0 for token in tokens: if token == prev_token and num_reps < max_reps: num_reps += 1 else: if num_reps > 0: result.append(replabel_value_to_idx[num_reps]) num_reps = 0 result.append(token) prev_token = token if num_reps > 0: result.append(replabel_value_to_idx[num_reps]) return result
Pack a token sequence so that repeated symbols are replaced by replabels
18,984
def replabel_symbol(i): """ Replabel symbols used in wav2letter, currently just "1", "2", ... This prevents training with numeral tokens, so this might change in the future """ return str(i) The provided code snippet includes necessary dependencies for implementing the `unpack_replabels` function. Write a Python function `def unpack_replabels(tokens, dictionary, max_reps)` to solve the following problem: Unpack a token sequence so that replabels are replaced by repeated symbols Here is the function: def unpack_replabels(tokens, dictionary, max_reps): """ Unpack a token sequence so that replabels are replaced by repeated symbols """ if len(tokens) == 0 or max_reps <= 0: return tokens replabel_idx_to_value = {} for i in range(1, max_reps + 1): replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i result = [] prev_token = -1 for token in tokens: try: for _ in range(replabel_idx_to_value[token]): result.append(prev_token) prev_token = -1 except KeyError: result.append(token) prev_token = token return result
Unpack a token sequence so that replabels are replaced by repeated symbols
18,985
import json import os import re import torch from fairseq.data import Dictionary from fairseq.tasks import FairseqTask, register_task from examples.speech_recognition.data import AsrDataset from examples.speech_recognition.data.replabels import replabel_symbol The provided code snippet includes necessary dependencies for implementing the `get_asr_dataset_from_json` function. Write a Python function `def get_asr_dataset_from_json(data_json_path, tgt_dict)` to solve the following problem: Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } } Here is the function: def get_asr_dataset_from_json(data_json_path, tgt_dict): """ Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } } """ if not os.path.isfile(data_json_path): raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) with open(data_json_path, "rb") as f: data_samples = json.load(f)["utts"] assert len(data_samples) != 0 sorted_samples = sorted( data_samples.items(), key=lambda sample: int(sample[1]["input"]["length_ms"]), reverse=True, ) aud_paths = [s[1]["input"]["path"] for s in sorted_samples] ids = [s[0] for s in sorted_samples] speakers = [] for s in sorted_samples: m = re.search("(.+?)-(.+?)-(.+?)", s[0]) speakers.append(m.group(1) + "_" + m.group(2)) frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] tgt = [ [int(i) for i in s[1]["output"]["tokenid"].split(", ")] for s in sorted_samples ] # append eos tgt = [[*t, tgt_dict.eos()] for t in tgt] return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } }
18,986
import logging import math from itertools import groupby import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion from examples.speech_recognition.data.data_utils import encoder_padding_mask_to_lengths from examples.speech_recognition.utils.wer_utils import Code, EditDistance, Token def arr_to_toks(arr): toks = [] for a in arr: toks.append(Token(str(a), 0.0, 0.0)) return toks class Code(Enum): match = 1 substitution = 2 insertion = 3 deletion = 4 class EditDistance(object): def __init__(self, time_mediated): self.time_mediated_ = time_mediated self.scores_ = np.nan # Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> self.backtraces_ = ( np.nan ) # Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> backtraces_; self.confusion_pairs_ = {} def cost(self, ref, hyp, code): if self.time_mediated_: if code == Code.match: return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) elif code == Code.insertion: return hyp.end - hyp.start elif code == Code.deletion: return ref.end - ref.start else: # substitution return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1 else: if code == Code.match: return 0 elif code == Code.insertion or code == Code.deletion: return 3 else: # substitution return 4 def get_result(self, refs, hyps): res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan) num_rows, num_cols = self.scores_.shape res.score = self.scores_[num_rows - 1, num_cols - 1] curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols) while curr_offset != 0: curr_row = offset_to_row(curr_offset, num_cols) curr_col = offset_to_col(curr_offset, num_cols) prev_offset = self.backtraces_[curr_row, curr_col] prev_row = offset_to_row(prev_offset, num_cols) prev_col = offset_to_col(prev_offset, num_cols) res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++ res.hyps.appendleft(curr_col - 1) if curr_row - 1 == prev_row and curr_col == prev_col: res.codes.appendleft(Code.deletion) elif curr_row == prev_row and curr_col - 1 == prev_col: res.codes.appendleft(Code.insertion) else: # assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col) ref_str = refs[res.refs[0]].label hyp_str = hyps[res.hyps[0]].label if ref_str == hyp_str: res.codes.appendleft(Code.match) else: res.codes.appendleft(Code.substitution) confusion_pair = "%s -> %s" % (ref_str, hyp_str) if confusion_pair not in self.confusion_pairs_: self.confusion_pairs_[confusion_pair] = 1 else: self.confusion_pairs_[confusion_pair] += 1 curr_offset = prev_offset return res def align(self, refs, hyps): if len(refs) == 0 and len(hyps) == 0: return np.nan # NOTE: we're not resetting the values in these matrices because every value # will be overridden in the loop below. If this assumption doesn't hold, # be sure to set all entries in self.scores_ and self.backtraces_ to 0. self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1)) self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1)) num_rows, num_cols = self.scores_.shape for i in range(num_rows): for j in range(num_cols): if i == 0 and j == 0: self.scores_[i, j] = 0.0 self.backtraces_[i, j] = 0 continue if i == 0: self.scores_[i, j] = self.scores_[i, j - 1] + self.cost( None, hyps[j - 1], Code.insertion ) self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols) continue if j == 0: self.scores_[i, j] = self.scores_[i - 1, j] + self.cost( refs[i - 1], None, Code.deletion ) self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols) continue # Below here both i and j are greater than 0 ref = refs[i - 1] hyp = hyps[j - 1] best_score = self.scores_[i - 1, j - 1] + ( self.cost(ref, hyp, Code.match) if (ref.label == hyp.label) else self.cost(ref, hyp, Code.substitution) ) prev_row = i - 1 prev_col = j - 1 ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion) if ins < best_score: best_score = ins prev_row = i prev_col = j - 1 delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion) if delt < best_score: best_score = delt prev_row = i - 1 prev_col = j self.scores_[i, j] = best_score self.backtraces_[i, j] = coordinate_to_offset( prev_row, prev_col, num_cols ) return self.get_result(refs, hyps) The provided code snippet includes necessary dependencies for implementing the `compute_ctc_uer` function. Write a Python function `def compute_ctc_uer(logprobs, targets, input_lengths, target_lengths, blank_idx)` to solve the following problem: Computes utterance error rate for CTC outputs Args: logprobs: (Torch.tensor) N, T1, D tensor of log probabilities out of the encoder targets: (Torch.tensor) N, T2 tensor of targets input_lengths: (Torch.tensor) lengths of inputs for each sample target_lengths: (Torch.tensor) lengths of targets for each sample blank_idx: (integer) id of blank symbol in target dictionary Returns: batch_errors: (float) errors in the batch batch_total: (float) total number of valid samples in batch Here is the function: def compute_ctc_uer(logprobs, targets, input_lengths, target_lengths, blank_idx): """ Computes utterance error rate for CTC outputs Args: logprobs: (Torch.tensor) N, T1, D tensor of log probabilities out of the encoder targets: (Torch.tensor) N, T2 tensor of targets input_lengths: (Torch.tensor) lengths of inputs for each sample target_lengths: (Torch.tensor) lengths of targets for each sample blank_idx: (integer) id of blank symbol in target dictionary Returns: batch_errors: (float) errors in the batch batch_total: (float) total number of valid samples in batch """ batch_errors = 0.0 batch_total = 0.0 for b in range(logprobs.shape[0]): predicted = logprobs[b][: input_lengths[b]].argmax(1).tolist() target = targets[b][: target_lengths[b]].tolist() # dedup predictions predicted = [p[0] for p in groupby(predicted)] # remove blanks nonblanks = [] for p in predicted: if p != blank_idx: nonblanks.append(p) predicted = nonblanks # compute the alignment based on EditDistance alignment = EditDistance(False).align( arr_to_toks(predicted), arr_to_toks(target) ) # compute the number of errors # note that alignment.codes can also be used for computing # deletion, insersion and substitution error breakdowns in future for a in alignment.codes: if a != Code.match: batch_errors += 1 batch_total += len(target) return batch_errors, batch_total
Computes utterance error rate for CTC outputs Args: logprobs: (Torch.tensor) N, T1, D tensor of log probabilities out of the encoder targets: (Torch.tensor) N, T2 tensor of targets input_lengths: (Torch.tensor) lengths of inputs for each sample target_lengths: (Torch.tensor) lengths of targets for each sample blank_idx: (integer) id of blank symbol in target dictionary Returns: batch_errors: (float) errors in the batch batch_total: (float) total number of valid samples in batch
18,987
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, utils, tasks from fairseq.logging import meters, progress_bar from fairseq.utils import import_user_module def check_args(args): assert args.path is not None, "--path required for generation!" assert args.results_path is not None, "--results_path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.dataset_impl == "raw" ), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
null
18,988
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, utils, tasks from fairseq.logging import meters, progress_bar from fairseq.utils import import_user_module def get_dataset_itr(args, task): return task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=(1000000.0, 1000000.0), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False)
null
18,989
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, utils, tasks from fairseq.logging import meters, progress_bar from fairseq.utils import import_user_module logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def process_predictions( args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id ): for hypo in hypos[: min(len(hypos), args.nbest)]: hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu()) hyp_words = sp.DecodePieces(hyp_pieces.split()) print( "{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"] ) print("{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"]) tgt_pieces = tgt_dict.string(target_tokens) tgt_words = sp.DecodePieces(tgt_pieces.split()) print("{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"]) print("{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]) # only score top hypothesis if not args.quiet: logger.debug("HYPO:" + hyp_words) logger.debug("TARGET:" + tgt_words) logger.debug("___________________")
null
18,990
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, utils, tasks from fairseq.logging import meters, progress_bar from fairseq.utils import import_user_module def prepare_result_files(args): def get_res_file(file_prefix): path = os.path.join( args.results_path, "{}-{}-{}.txt".format( file_prefix, os.path.basename(args.path), args.gen_subset ), ) return open(path, "w", buffering=1) return { "hypo.words": get_res_file("hypo.word"), "hypo.units": get_res_file("hypo.units"), "ref.words": get_res_file("ref.word"), "ref.units": get_res_file("ref.units"), }
null
18,991
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, utils, tasks from fairseq.logging import meters, progress_bar from fairseq.utils import import_user_module def load_models_and_criterions(filenames, arg_overrides=None, task=None): models = [] criterions = [] for filename in filenames: if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides) args = state["args"] if task is None: task = tasks.setup_task(args) # build model for ensemble model = task.build_model(args) model.load_state_dict(state["model"], strict=True) models.append(model) criterion = task.build_criterion(args) if "criterion" in state: criterion.load_state_dict(state["criterion"], strict=True) criterions.append(criterion) return models, criterions, args
null
18,992
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, utils, tasks from fairseq.logging import meters, progress_bar from fairseq.utils import import_user_module The provided code snippet includes necessary dependencies for implementing the `optimize_models` function. Write a Python function `def optimize_models(args, use_cuda, models)` to solve the following problem: Optimize ensemble for generation Here is the function: def optimize_models(args, use_cuda, models): """Optimize ensemble for generation """ for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda()
Optimize ensemble for generation
18,993
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, utils, tasks from fairseq.logging import meters, progress_bar from fairseq.utils import import_user_module def add_asr_eval_argument(parser): parser.add_argument("--kspmodel", default=None, help="sentence piece model") parser.add_argument( "--wfstlm", default=None, help="wfstlm on dictonary output units" ) parser.add_argument( "--rnnt_decoding_type", default="greedy", help="wfstlm on dictonary\ output units", ) parser.add_argument( "--lm-weight", "--lm_weight", type=float, default=0.2, help="weight for lm while interpolating with neural score", ) parser.add_argument( "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level" ) parser.add_argument( "--w2l-decoder", choices=["viterbi", "kenlm"], help="use a w2l decoder" ) parser.add_argument("--lexicon", help="lexicon for w2l decoder") parser.add_argument("--kenlm-model", help="kenlm model for w2l decoder") parser.add_argument("--beam-threshold", type=float, default=25.0) parser.add_argument("--word-score", type=float, default=1.0) parser.add_argument("--unk-weight", type=float, default=-math.inf) parser.add_argument("--sil-weight", type=float, default=0.0) return parser def main(args): check_args(args) import_user_module(args) if args.max_tokens is None and args.max_sentences is None: args.max_tokens = 30000 logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) logger.info( "| {} {} {} examples".format( args.data, args.gen_subset, len(task.dataset(args.gen_subset)) ) ) # Set dictionary tgt_dict = task.target_dictionary logger.info("| decoding with criterion {}".format(args.criterion)) # Load ensemble logger.info("| loading model(s) from {}".format(args.path)) models, criterions, _model_args = load_models_and_criterions( args.path.split(os.pathsep), arg_overrides=eval(args.model_overrides), # noqa task=task, ) optimize_models(args, use_cuda, models) # hack to pass transitions to W2lDecoder if args.criterion == "asg_loss": trans = criterions[0].asg.trans.data args.asg_transitions = torch.flatten(trans).tolist() # Load dataset (possibly sharded) itr = get_dataset_itr(args, task) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, default_log_format=('tqdm' if not args.no_progress_bar else 'none'), ) # Initialize generator gen_timer = meters.StopwatchMeter() generator = task.build_generator(models, args) num_sentences = 0 if not os.path.exists(args.results_path): os.makedirs(args.results_path) sp = spm.SentencePieceProcessor() sp.Load(os.path.join(args.data, "spm.model")) res_files = prepare_result_files(args) wps_meter = meters.TimeMeter() for sample in progress: sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] gen_timer.start() hypos = task.inference_step(generator, models, sample, prefix_tokens) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): speaker = task.dataset(args.gen_subset).speakers[int(sample_id)] id = task.dataset(args.gen_subset).ids[int(sample_id)] target_tokens = ( utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu() ) # Process top predictions process_predictions( args, hypos[i], sp, tgt_dict, target_tokens, res_files, speaker, id ) wps_meter.update(num_generated_tokens) progress.log({"wps": round(wps_meter.avg)}) num_sentences += sample["nsentences"] logger.info( "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}" "sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam)) def cli_main(): parser = options.get_generation_parser() parser = add_asr_eval_argument(parser) args = options.parse_args_and_arch(parser) main(args)
null
18,994
from __future__ import absolute_import, division, print_function, unicode_literals from collections import namedtuple import concurrent.futures from itertools import chain import argparse import os import json import sentencepiece as spm import multiprocessing from fairseq.data import Dictionary MILLISECONDS_TO_SECONDS = 0.001 def process_sample(aud_path, lable, utt_id, sp, tgt_dict): import torchaudio input = {} output = {} si, ei = torchaudio.info(aud_path) input["length_ms"] = int(si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS) input["path"] = aud_path token = " ".join(sp.EncodeAsPieces(lable)) ids = tgt_dict.encode_line(token, append_eos=False) output["text"] = lable output["token"] = token output["tokenid"] = ', '.join(map(str, [t.tolist() for t in ids])) return {utt_id: {"input": input, "output": output}}
null
18,995
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def prepare_transformer_encoder_params( input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout, ): args = argparse.Namespace() args.encoder_embed_dim = input_dim args.encoder_attention_heads = num_heads args.attention_dropout = attention_dropout args.dropout = dropout args.activation_dropout = relu_dropout args.encoder_normalize_before = normalize_before args.encoder_ffn_embed_dim = ffn_dim return args
null
18,996
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def prepare_transformer_decoder_params( input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout, ): args = argparse.Namespace() args.decoder_embed_dim = input_dim args.decoder_attention_heads = num_heads args.attention_dropout = attention_dropout args.dropout = dropout args.activation_dropout = relu_dropout args.decoder_normalize_before = normalize_before args.decoder_ffn_embed_dim = ffn_dim return args
null
18,997
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) # nn.init.uniform_(m.weight, -0.1, 0.1) # nn.init.constant_(m.weight[padding_idx], 0) return m
null
18,998
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock The provided code snippet includes necessary dependencies for implementing the `Linear` function. Write a Python function `def Linear(in_features, out_features, bias=True, dropout=0)` to solve the following problem: Linear layer (input: N x T x C) Here is the function: def Linear(in_features, out_features, bias=True, dropout=0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) # m.weight.data.uniform_(-0.1, 0.1) # if bias: # m.bias.data.uniform_(-0.1, 0.1) return m
Linear layer (input: N x T x C)
18,999
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock The provided code snippet includes necessary dependencies for implementing the `LinearizedConv1d` function. Write a Python function `def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs)` to solve the following problem: Weight-normalized Conv1d layer optimized for decoding Here is the function: def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2)
Weight-normalized Conv1d layer optimized for decoding
19,000
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def LayerNorm(embedding_dim): m = nn.LayerNorm(embedding_dim) return m
null
19,001
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2 DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2 DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2 def base_architecture(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG ) args.transformer_enc_config = getattr( args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.in_channels = getattr(args, "in_channels", 1) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) args.transformer_dec_config = getattr( args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG ) args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG) args.transformer_context = getattr(args, "transformer_context", "None")
null
19,002
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def vggtransformer_1(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4", )
null
19,003
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def vggtransformer_2(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6", )
null
19,004
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def vggtransformer_base(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12" ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6" ) # Size estimations: # Encoder: # - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K # Transformer: # - input dimension adapter: 2560 x 512 -> 1.31M # - transformer_layers (x12) --> 37.74M # * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M # * FFN weight: 512*2048*2 = 2.097M # - output dimension adapter: 512 x 512 -> 0.26 M # Decoder: # - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3 # - transformer_layer: (x6) --> 25.16M # * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M # * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M # * FFN: 512*2048*2 = 2.097M # Final FC: # - FC: 512*5000 = 256K (assuming vocab size 5K) # In total: # ~65 M
null
19,005
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def base_architecture_enconly(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2" ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.in_channels = getattr(args, "in_channels", 1) args.transformer_context = getattr(args, "transformer_context", "None") args.transformer_sampling = getattr(args, "transformer_sampling", "None")
null
19,006
import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import LinearizedConvolution from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock def vggtransformer_enc_1(args): # vggtransformer_1 is the same as vggtransformer_enc_big, except the number # of layers is increased to 16 # keep it here for backward compatiablity purpose args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
null
19,007
import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) default_conv_enc_config = """[ (400, 13, 170, 0.2), (440, 14, 0, 0.214), (484, 15, 0, 0.22898), (532, 16, 0, 0.2450086), (584, 17, 0, 0.262159202), (642, 18, 0, 0.28051034614), (706, 19, 0, 0.30014607037), (776, 20, 0, 0.321156295296), (852, 21, 0, 0.343637235966), (936, 22, 0, 0.367691842484), (1028, 23, 0, 0.393430271458), (1130, 24, 0, 0.42097039046), (1242, 25, 0, 0.450438317792), (1366, 26, 0, 0.481969000038), (1502, 27, 0, 0.51570683004), (1652, 28, 0, 0.551806308143), (1816, 29, 0, 0.590432749713), ]""" def w2l_conv_glu_enc(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.in_channels = getattr(args, "in_channels", 1) args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
null
19,008
from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np def coordinate_to_offset(row, col, ncols): return int(row * ncols + col)
null
19,009
from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np def offset_to_row(offset, ncols): return int(offset / ncols)
null
19,010
from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np def offset_to_col(offset, ncols): return int(offset % ncols)
null
19,011
from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np class WERTransformer(object): def __init__(self, hyp_str, ref_str, verbose=True): self.ed_ = EditDistance(False) self.id2oracle_errs_ = {} self.utts_ = 0 self.words_ = 0 self.insertions_ = 0 self.deletions_ = 0 self.substitutions_ = 0 self.process(["dummy_str", hyp_str, ref_str]) if verbose: print("'%s' vs '%s'" % (hyp_str, ref_str)) self.report_result() def process(self, input): # std::vector<std::string>&& input if len(input) < 3: print( "Input must be of the form <id> ... <hypo> <ref> , got ", len(input), " inputs:", ) return None # Align # std::vector<Token> hyps; # std::vector<Token> refs; hyps = str2toks(input[-2]) refs = str2toks(input[-1]) alignment = self.ed_.align(refs, hyps) if alignment is None: print("Alignment is null") return np.nan # Tally errors ins = 0 dels = 0 subs = 0 for code in alignment.codes: if code == Code.substitution: subs += 1 elif code == Code.insertion: ins += 1 elif code == Code.deletion: dels += 1 # Output row = input row.append(str(len(refs))) row.append(str(ins)) row.append(str(dels)) row.append(str(subs)) # print(row) # Accumulate kIdIndex = 0 kNBestSep = "/" pieces = input[kIdIndex].split(kNBestSep) if len(pieces) == 0: print( "Error splitting ", input[kIdIndex], " on '", kNBestSep, "', got empty list", ) return np.nan id = pieces[0] if id not in self.id2oracle_errs_: self.utts_ += 1 self.words_ += len(refs) self.insertions_ += ins self.deletions_ += dels self.substitutions_ += subs self.id2oracle_errs_[id] = [ins, dels, subs] else: curr_err = ins + dels + subs prev_err = np.sum(self.id2oracle_errs_[id]) if curr_err < prev_err: self.id2oracle_errs_[id] = [ins, dels, subs] return 0 def report_result(self): # print("---------- Summary ---------------") if self.words_ == 0: print("No words counted") return # 1-best best_wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) print( "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, " "%0.2f%% dels, %0.2f%% subs)" % ( best_wer, self.utts_, self.words_, 100.0 * self.insertions_ / self.words_, 100.0 * self.deletions_ / self.words_, 100.0 * self.substitutions_ / self.words_, ) ) def wer(self): if self.words_ == 0: wer = np.nan else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) return wer def stats(self): if self.words_ == 0: stats = {} else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) stats = dict( { "wer": wer, "utts": self.utts_, "numwords": self.words_, "ins": self.insertions_, "dels": self.deletions_, "subs": self.substitutions_, "confusion_pairs": self.ed_.confusion_pairs_, } ) return stats def calc_wer(hyp_str, ref_str): t = WERTransformer(hyp_str, ref_str, verbose=0) return t.wer()
null
19,012
from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np class WERTransformer(object): def __init__(self, hyp_str, ref_str, verbose=True): self.ed_ = EditDistance(False) self.id2oracle_errs_ = {} self.utts_ = 0 self.words_ = 0 self.insertions_ = 0 self.deletions_ = 0 self.substitutions_ = 0 self.process(["dummy_str", hyp_str, ref_str]) if verbose: print("'%s' vs '%s'" % (hyp_str, ref_str)) self.report_result() def process(self, input): # std::vector<std::string>&& input if len(input) < 3: print( "Input must be of the form <id> ... <hypo> <ref> , got ", len(input), " inputs:", ) return None # Align # std::vector<Token> hyps; # std::vector<Token> refs; hyps = str2toks(input[-2]) refs = str2toks(input[-1]) alignment = self.ed_.align(refs, hyps) if alignment is None: print("Alignment is null") return np.nan # Tally errors ins = 0 dels = 0 subs = 0 for code in alignment.codes: if code == Code.substitution: subs += 1 elif code == Code.insertion: ins += 1 elif code == Code.deletion: dels += 1 # Output row = input row.append(str(len(refs))) row.append(str(ins)) row.append(str(dels)) row.append(str(subs)) # print(row) # Accumulate kIdIndex = 0 kNBestSep = "/" pieces = input[kIdIndex].split(kNBestSep) if len(pieces) == 0: print( "Error splitting ", input[kIdIndex], " on '", kNBestSep, "', got empty list", ) return np.nan id = pieces[0] if id not in self.id2oracle_errs_: self.utts_ += 1 self.words_ += len(refs) self.insertions_ += ins self.deletions_ += dels self.substitutions_ += subs self.id2oracle_errs_[id] = [ins, dels, subs] else: curr_err = ins + dels + subs prev_err = np.sum(self.id2oracle_errs_[id]) if curr_err < prev_err: self.id2oracle_errs_[id] = [ins, dels, subs] return 0 def report_result(self): # print("---------- Summary ---------------") if self.words_ == 0: print("No words counted") return # 1-best best_wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) print( "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, " "%0.2f%% dels, %0.2f%% subs)" % ( best_wer, self.utts_, self.words_, 100.0 * self.insertions_ / self.words_, 100.0 * self.deletions_ / self.words_, 100.0 * self.substitutions_ / self.words_, ) ) def wer(self): if self.words_ == 0: wer = np.nan else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) return wer def stats(self): if self.words_ == 0: stats = {} else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) stats = dict( { "wer": wer, "utts": self.utts_, "numwords": self.words_, "ins": self.insertions_, "dels": self.deletions_, "subs": self.substitutions_, "confusion_pairs": self.ed_.confusion_pairs_, } ) return stats def calc_wer_stats(hyp_str, ref_str): t = WERTransformer(hyp_str, ref_str, verbose=0) return t.stats()
null
19,013
from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np def str2toks(str): pieces = trimWhitespace(str).split(" ") toks = [] for p in pieces: toks.append(Token(p, 0.0, 0.0)) return toks class WERTransformer(object): def __init__(self, hyp_str, ref_str, verbose=True): self.ed_ = EditDistance(False) self.id2oracle_errs_ = {} self.utts_ = 0 self.words_ = 0 self.insertions_ = 0 self.deletions_ = 0 self.substitutions_ = 0 self.process(["dummy_str", hyp_str, ref_str]) if verbose: print("'%s' vs '%s'" % (hyp_str, ref_str)) self.report_result() def process(self, input): # std::vector<std::string>&& input if len(input) < 3: print( "Input must be of the form <id> ... <hypo> <ref> , got ", len(input), " inputs:", ) return None # Align # std::vector<Token> hyps; # std::vector<Token> refs; hyps = str2toks(input[-2]) refs = str2toks(input[-1]) alignment = self.ed_.align(refs, hyps) if alignment is None: print("Alignment is null") return np.nan # Tally errors ins = 0 dels = 0 subs = 0 for code in alignment.codes: if code == Code.substitution: subs += 1 elif code == Code.insertion: ins += 1 elif code == Code.deletion: dels += 1 # Output row = input row.append(str(len(refs))) row.append(str(ins)) row.append(str(dels)) row.append(str(subs)) # print(row) # Accumulate kIdIndex = 0 kNBestSep = "/" pieces = input[kIdIndex].split(kNBestSep) if len(pieces) == 0: print( "Error splitting ", input[kIdIndex], " on '", kNBestSep, "', got empty list", ) return np.nan id = pieces[0] if id not in self.id2oracle_errs_: self.utts_ += 1 self.words_ += len(refs) self.insertions_ += ins self.deletions_ += dels self.substitutions_ += subs self.id2oracle_errs_[id] = [ins, dels, subs] else: curr_err = ins + dels + subs prev_err = np.sum(self.id2oracle_errs_[id]) if curr_err < prev_err: self.id2oracle_errs_[id] = [ins, dels, subs] return 0 def report_result(self): # print("---------- Summary ---------------") if self.words_ == 0: print("No words counted") return # 1-best best_wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) print( "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, " "%0.2f%% dels, %0.2f%% subs)" % ( best_wer, self.utts_, self.words_, 100.0 * self.insertions_ / self.words_, 100.0 * self.deletions_ / self.words_, 100.0 * self.substitutions_ / self.words_, ) ) def wer(self): if self.words_ == 0: wer = np.nan else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) return wer def stats(self): if self.words_ == 0: stats = {} else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) stats = dict( { "wer": wer, "utts": self.utts_, "numwords": self.words_, "ins": self.insertions_, "dels": self.deletions_, "subs": self.substitutions_, "confusion_pairs": self.ed_.confusion_pairs_, } ) return stats The provided code snippet includes necessary dependencies for implementing the `get_wer_alignment_codes` function. Write a Python function `def get_wer_alignment_codes(hyp_str, ref_str)` to solve the following problem: INPUT: hypothesis string, reference string OUTPUT: List of alignment codes (intermediate results from WER computation) Here is the function: def get_wer_alignment_codes(hyp_str, ref_str): """ INPUT: hypothesis string, reference string OUTPUT: List of alignment codes (intermediate results from WER computation) """ t = WERTransformer(hyp_str, ref_str, verbose=0) return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes
INPUT: hypothesis string, reference string OUTPUT: List of alignment codes (intermediate results from WER computation)
19,014
from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np def merge_counts(x, y): # Merge two hashes which have 'counts' as their values # This can be used for example to merge confusion pair counts # conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs']) for k, v in y.items(): if k not in x: x[k] = 0 x[k] += v return x
null
19,015
from functools import lru_cache import json def convert_sentence_to_json(sentence): if '_' in sentence: prefix, rest = sentence.split('_', 1) query, rest = rest.split('_', 1) query_index = len(prefix.rstrip().split(' ')) else: query, query_index = None, None prefix, rest = sentence.split('[', 1) pronoun, rest = rest.split(']', 1) pronoun_index = len(prefix.rstrip().split(' ')) sentence = sentence.replace('_', '').replace('[', '').replace(']', '') return { 'idx': 0, 'text': sentence, 'target': { 'span1_index': query_index, 'span1_text': query, 'span2_index': pronoun_index, 'span2_text': pronoun, }, }
null
19,016
from functools import lru_cache import json def extended_noun_chunks(sentence): noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks} np_start, cur_np = 0, 'NONE' for i, token in enumerate(sentence): np_type = token.pos_ if token.pos_ in {'NOUN', 'PROPN'} else 'NONE' if np_type != cur_np: if cur_np != 'NONE': noun_chunks.add((np_start, i)) if np_type != 'NONE': np_start = i cur_np = np_type if cur_np != 'NONE': noun_chunks.add((np_start, len(sentence))) return [sentence[s:e] for (s, e) in sorted(noun_chunks)]
null
19,017
from functools import lru_cache import json def find_token(sentence, start_pos): def find_span(sentence, search_text, start=0): def get_detokenizer(): def get_spacy_nlp(): def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False): detok = get_detokenizer() nlp = get_spacy_nlp() with open(input_fname) as fin: for line in fin: sample = json.loads(line.strip()) if positive_only and 'label' in sample and not sample['label']: # only consider examples where the query is correct continue target = sample['target'] # clean up the query query = target['span1_text'] if query is not None: if '\n' in query: continue if query.endswith('.') or query.endswith(','): query = query[:-1] # split tokens tokens = sample['text'].split(' ') def strip_pronoun(x): return x.rstrip('.,"') # find the pronoun pronoun_idx = target['span2_index'] pronoun = strip_pronoun(target['span2_text']) if strip_pronoun(tokens[pronoun_idx]) != pronoun: # hack: sometimes the index is misaligned if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun: pronoun_idx += 1 else: raise Exception('Misaligned pronoun!') assert strip_pronoun(tokens[pronoun_idx]) == pronoun # split tokens before and after the pronoun before = tokens[:pronoun_idx] after = tokens[pronoun_idx + 1:] # the GPT BPE attaches leading spaces to tokens, so we keep track # of whether we need spaces before or after the pronoun leading_space = ' ' if pronoun_idx > 0 else '' trailing_space = ' ' if len(after) > 0 else '' # detokenize before = detok.detokenize(before, return_str=True) pronoun = detok.detokenize([pronoun], return_str=True) after = detok.detokenize(after, return_str=True) # hack: when the pronoun ends in a period (or comma), move the # punctuation to the "after" part if pronoun.endswith('.') or pronoun.endswith(','): after = pronoun[-1] + trailing_space + after pronoun = pronoun[:-1] # hack: when the "after" part begins with a comma or period, remove # the trailing space if after.startswith('.') or after.startswith(','): trailing_space = '' # parse sentence with spacy sentence = nlp(before + leading_space + pronoun + trailing_space + after) # find pronoun span start = len(before + leading_space) first_pronoun_tok = find_token(sentence, start_pos=start) pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i) assert pronoun_span.text == pronoun if eval: # convert to format where pronoun is surrounded by "[]" and # query is surrounded by "_" query_span = find_span(sentence, query) query_with_ws = '_{}_{}'.format( query_span.text, (' ' if query_span.text_with_ws.endswith(' ') else '') ) pronoun_with_ws = '[{}]{}'.format( pronoun_span.text, (' ' if pronoun_span.text_with_ws.endswith(' ') else '') ) if query_span.start < pronoun_span.start: first = (query_span, query_with_ws) second = (pronoun_span, pronoun_with_ws) else: first = (pronoun_span, pronoun_with_ws) second = (query_span, query_with_ws) sentence = ( sentence[:first[0].start].text_with_ws + first[1] + sentence[first[0].end:second[0].start].text_with_ws + second[1] + sentence[second[0].end:].text ) yield sentence, sample.get('label', None) else: yield sentence, pronoun_span, query, sample.get('label', None)
null
19,018
from functools import lru_cache import json def winogrande_jsonl_iterator(input_fname, eval=False): with open(input_fname) as fin: for line in fin: sample = json.loads(line.strip()) sentence, option1, option2 = sample['sentence'], sample['option1'],\ sample['option2'] pronoun_span = (sentence.index('_'), sentence.index('_') + 1) if eval: query, cand = option1, option2 else: query = option1 if sample['answer'] == '1' else option2 cand = option2 if sample['answer'] == '1' else option1 yield sentence, pronoun_span, query, cand
null
19,019
from functools import lru_cache import json def filter_noun_chunks(chunks, exclude_pronouns=False, exclude_query=None, exact_match=False): if exclude_pronouns: chunks = [ np for np in chunks if ( np.lemma_ != '-PRON-' and not all(tok.pos_ == 'PRON' for tok in np) ) ] if exclude_query is not None: excl_txt = [exclude_query.lower()] filtered_chunks = [] for chunk in chunks: lower_chunk = chunk.text.lower() found = False for excl in excl_txt: if ( (not exact_match and (lower_chunk in excl or excl in lower_chunk)) or lower_chunk == excl ): found = True break if not found: filtered_chunks.append(chunk) chunks = filtered_chunks return chunks
null
19,020
import argparse import json import os import re class InputExample: def __init__(self, paragraph, qa_list, label): self.paragraph = paragraph self.qa_list = qa_list self.label = label The provided code snippet includes necessary dependencies for implementing the `get_examples` function. Write a Python function `def get_examples(data_dir, set_type)` to solve the following problem: Extract paragraph and question-answer list from each json file Here is the function: def get_examples(data_dir, set_type): """ Extract paragraph and question-answer list from each json file """ examples = [] levels = ["middle", "high"] set_type_c = set_type.split('-') if len(set_type_c) == 2: levels = [set_type_c[1]] set_type = set_type_c[0] for level in levels: cur_dir = os.path.join(data_dir, set_type, level) for filename in os.listdir(cur_dir): cur_path = os.path.join(cur_dir, filename) with open(cur_path, 'r') as f: cur_data = json.load(f) answers = cur_data["answers"] options = cur_data["options"] questions = cur_data["questions"] context = cur_data["article"].replace("\n", " ") context = re.sub(r'\s+', ' ', context) for i in range(len(answers)): label = ord(answers[i]) - ord("A") qa_list = [] question = questions[i] for j in range(4): option = options[i][j] if "_" in question: qa_cat = question.replace("_", option) else: qa_cat = " ".join([question, option]) qa_cat = re.sub(r'\s+', ' ', qa_cat) qa_list.append(qa_cat) examples.append(InputExample(context, qa_list, label)) return examples
Extract paragraph and question-answer list from each json file
19,021
import argparse from itertools import chain import sys import random import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def dictolist(d): a = sorted(d.items(), key=lambda i: i[0]) return [i[1] for i in a] def load_sys(paths): src, tgt, hypos, log_probs = {}, {}, {}, {} for path in paths: with open(path) as f: for line in f: line = line.rstrip() if line.startswith(('S-', 'T-', 'H-')): i = int(line[line.find('-')+1:line.find('\t')]) if line.startswith('S-'): src[i] = line.split('\t')[1] if line.startswith('T-'): tgt[i] = line.split('\t')[1] if line.startswith('H-'): if i not in hypos: hypos[i] = [] log_probs[i] = [] hypos[i].append(line.split('\t')[2]) log_probs[i].append(float(line.split('\t')[1])) return dictolist(src), dictolist(tgt), dictolist(hypos), dictolist(log_probs)
null
19,022
import argparse from itertools import chain import sys import random import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def load_ref(path): with open(path) as f: lines = f.readlines() src, tgt, refs = [], [], [] i = 0 while i < len(lines): if lines[i].startswith('S-'): src.append(lines[i].split('\t')[1].rstrip()) i += 1 elif lines[i].startswith('T-'): tgt.append(lines[i].split('\t')[1].rstrip()) i += 1 else: a = [] while i < len(lines) and lines[i].startswith('R'): a.append(lines[i].split('\t')[1].rstrip()) i += 1 refs.append(a) return src, tgt, refs
null
19,023
import argparse from itertools import chain import sys import random import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def merge(src, tgt, hypos, log_probs, path): with open(path, 'w') as f: for s, t, hs, lps in zip(src, tgt, hypos, log_probs): f.write(s + '\n') f.write(t + '\n') f.write('\n') for h, lp in zip(hs, lps): f.write('\t%f\t%s\n' % (lp, h.strip())) f.write('------------------------------------------------------\n')
null
19,024
import argparse from itertools import chain import sys import random import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def corpus_bleu(sys_stream, ref_streams): bleu = _corpus_bleu(sys_stream, ref_streams, tokenize='none') return bleu.score def sentence_bleu(hypothesis, reference): bleu = _corpus_bleu(hypothesis, reference) for i in range(1, 4): bleu.counts[i] += 1 bleu.totals[i] += 1 bleu = compute_bleu( bleu.counts, bleu.totals, bleu.sys_len, bleu.ref_len, smooth='exp', smooth_floor=0.0, ) return bleu.score def multi_ref(refs, hypos): _ref, _hypo = [], [] ref_cnt = 0 assert len(refs) == len(hypos) # count number of refs covered for rs, hs in zip(refs, hypos): a = set() for h in hs: s = [sentence_bleu(h, r) for r in rs] j = np.argmax(s) _ref.append(rs[j]) _hypo.append(h) best = [k for k in range(len(rs)) if s[k] == s[j]] a.add(random.choice(best)) ref_cnt += len(a) print('#refs covered: %.2f' % (ref_cnt / len(refs))) # transpose refs and hypos refs = list(zip(*refs)) hypos = list(zip(*hypos)) # compute multi-ref corpus BLEU (leave-one-out to be comparable to intra_ref) k = len(hypos) m = len(refs) flat_hypos = [hypos[j][i] for i in range(len(hypos[0])) for j in range(k)] duplicated_refs = [ [ref for ref in refs_i for _ in range(k)] for refs_i in refs ] loo_bleus = [] for held_out_ref in range(m): remaining_refs = duplicated_refs[:held_out_ref] + duplicated_refs[held_out_ref+1:] assert len(remaining_refs) == m - 1 loo_bleus.append(corpus_bleu(flat_hypos, remaining_refs)) print('average multi-reference BLEU (leave-one-out): %.2f' % np.mean(loo_bleus))
null
19,025
import argparse from itertools import chain import sys import random import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def corpus_bleu(sys_stream, ref_streams): def pairwise(sents): def intra_ref(refs): print('ref pairwise BLEU: %.2f' % pairwise(refs)) refs = list(zip(*refs)) m = len(refs) concat_h = [] concat_rest = [[] for j in range(m - 1)] for i, h in enumerate(refs): rest = refs[:i] + refs[i+1:] concat_h.append(h) for j in range(m - 1): concat_rest[j].extend(rest[j]) concat_h = list(chain.from_iterable(concat_h)) bleu = corpus_bleu(concat_h, concat_rest) print('multi-reference BLEU (leave-one-out): %.2f' % bleu)
null
19,026
from contextlib import redirect_stdout import math import os import re import subprocess from fairseq import options from fairseq_cli import eval_lm, preprocess The provided code snippet includes necessary dependencies for implementing the `reprocess` function. Write a Python function `def reprocess(fle)` to solve the following problem: reprocess generate.py output Here is the function: def reprocess(fle): # takes in a file of generate.py translation generate_output # returns a source dict and hypothesis dict, where keys are the ID num (as a string) # and values and the corresponding source and translation. There may be several translations # per source, so the values for hypothesis_dict are lists. # parses output of generate.py with open(fle, 'r') as f: txt = f.read() """reprocess generate.py output""" p = re.compile(r"[STHP][-]\d+\s*") hp = re.compile(r"(\s*[-]?\d+[.]?\d+\s*)|(\s*(-inf)\s*)") source_dict = {} hypothesis_dict = {} score_dict = {} target_dict = {} pos_score_dict = {} lines = txt.split("\n") for line in lines: line += "\n" prefix = re.search(p, line) if prefix is not None: assert len(prefix.group()) > 2, "prefix id not found" _, j = prefix.span() id_num = prefix.group()[2:] id_num = int(id_num) line_type = prefix.group()[0] if line_type == "H": h_txt = line[j:] hypo = re.search(hp, h_txt) assert hypo is not None, ("regular expression failed to find the hypothesis scoring") _, i = hypo.span() score = hypo.group() if id_num in hypothesis_dict: hypothesis_dict[id_num].append(h_txt[i:]) score_dict[id_num].append(float(score)) else: hypothesis_dict[id_num] = [h_txt[i:]] score_dict[id_num] = [float(score)] elif line_type == "S": source_dict[id_num] = (line[j:]) elif line_type == "T": target_dict[id_num] = (line[j:]) elif line_type == "P": pos_scores = (line[j:]).split() pos_scores = [float(x) for x in pos_scores] if id_num in pos_score_dict: pos_score_dict[id_num].append(pos_scores) else: pos_score_dict[id_num] = [pos_scores] return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
reprocess generate.py output
19,027
from contextlib import redirect_stdout import math import os import re import subprocess from fairseq import options from fairseq_cli import eval_lm, preprocess The provided code snippet includes necessary dependencies for implementing the `reprocess_nbest` function. Write a Python function `def reprocess_nbest(fle)` to solve the following problem: reprocess interactive.py output Here is the function: def reprocess_nbest(fle): """reprocess interactive.py output""" with open(fle, 'r') as f: txt = f.read() source_dict = {} hypothesis_dict = {} score_dict = {} target_dict = {} pos_score_dict = {} lines = txt.split("\n") hp = re.compile(r'[-]?\d+[.]?\d+') j = -1 for _i, line in enumerate(lines): line += "\n" line_type = line[0] if line_type == "H": hypo = re.search(hp, line) _, start_index = hypo.span() score = hypo.group() if j in score_dict: score_dict[j].append(float(score)) hypothesis_dict[j].append(line[start_index:].strip("\t")) else: score_dict[j] = [float(score)] hypothesis_dict[j] = [line[start_index:].strip("\t")] elif line_type == "O": j += 1 source_dict[j] = line[2:] # we don't have the targets for interactive.py target_dict[j] = "filler" elif line_type == "P": pos_scores = [float(pos_score) for pos_score in line.split()[1:]] if j in pos_score_dict: pos_score_dict[j].append(pos_scores) else: pos_score_dict[j] = [pos_scores] assert source_dict.keys() == hypothesis_dict.keys() assert source_dict.keys() == pos_score_dict.keys() assert source_dict.keys() == score_dict.keys() return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
reprocess interactive.py output
19,028
from contextlib import redirect_stdout import math import os import re import subprocess from fairseq import options from fairseq_cli import eval_lm, preprocess def remove_bpe(line, bpe_symbol): line = line.replace("\n", '') line = (line + ' ').replace(bpe_symbol, '').rstrip() return line+("\n") def remove_bpe_dict(pred_dict, bpe_symbol): new_dict = {} for i in pred_dict: if type(pred_dict[i]) == list: new_list = [remove_bpe(elem, bpe_symbol) for elem in pred_dict[i]] new_dict[i] = new_list else: new_dict[i] = remove_bpe(pred_dict[i], bpe_symbol) return new_dict
null
19,029
from contextlib import redirect_stdout import math import os import re import subprocess from fairseq import options from fairseq_cli import eval_lm, preprocess def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol): # return number of words, (not bpe tokens) that we want no_bpe_sen = remove_bpe(bpe_sentence, bpe_symbol) len_sen = len(no_bpe_sen.split()) num_words = math.ceil(len_sen * prefix_frac) prefix = get_prefix_no_bpe(bpe_sentence, bpe_symbol, num_words) num_bpe_tokens = len(prefix.split()) return num_words, prefix, num_bpe_tokens def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len): """given a prefix length in terms of words, return the number of bpe tokens""" prefix = get_prefix_no_bpe(sentence, bpe_symbol, prefix_len) assert len(remove_bpe(prefix, bpe_symbol).split()) <= prefix_len return len(prefix.split(" ")) def get_score_from_pos(pos_score_dict, prefix_len, hypo_dict, bpe_symbol, hypo_frac, backwards): score_dict = {} num_bpe_tokens_dict = {} assert prefix_len is None or hypo_frac is None for key in pos_score_dict: score_dict[key] = [] num_bpe_tokens_dict[key] = [] for i in range(len(pos_score_dict[key])): if prefix_len is not None and not backwards: num_bpe_tokens = get_num_bpe_tokens_from_len(hypo_dict[key][i], bpe_symbol, prefix_len) score_dict[key].append(sum(pos_score_dict[key][i][:num_bpe_tokens])) num_bpe_tokens_dict[key].append(num_bpe_tokens) elif hypo_frac is not None: num_words, shortened, hypo_prefix_len = calc_length_from_frac(hypo_dict[key][i], hypo_frac, bpe_symbol) score_dict[key].append(sum(pos_score_dict[key][i][:hypo_prefix_len])) num_bpe_tokens_dict[key].append(hypo_prefix_len) else: score_dict[key].append(sum(pos_score_dict[key][i])) num_bpe_tokens_dict[key].append(len(pos_score_dict[key][i])) return score_dict, num_bpe_tokens_dict
null
19,030
from contextlib import redirect_stdout import math import os import re import subprocess from fairseq import options from fairseq_cli import eval_lm, preprocess def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol): # return number of words, (not bpe tokens) that we want no_bpe_sen = remove_bpe(bpe_sentence, bpe_symbol) len_sen = len(no_bpe_sen.split()) num_words = math.ceil(len_sen * prefix_frac) prefix = get_prefix_no_bpe(bpe_sentence, bpe_symbol, num_words) num_bpe_tokens = len(prefix.split()) return num_words, prefix, num_bpe_tokens def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len): """given a prefix length in terms of words, return the number of bpe tokens""" prefix = get_prefix_no_bpe(sentence, bpe_symbol, prefix_len) assert len(remove_bpe(prefix, bpe_symbol).split()) <= prefix_len return len(prefix.split(" ")) def remove_bpe(line, bpe_symbol): line = line.replace("\n", '') line = (line + ' ').replace(bpe_symbol, '').rstrip() return line+("\n") The provided code snippet includes necessary dependencies for implementing the `parse_lm` function. Write a Python function `def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix_frac=None)` to solve the following problem: parse output of eval_lm Here is the function: def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix_frac=None): """parse output of eval_lm""" with open(input_file, 'r') as f: text = f.readlines() text = text[7:] cleaned_text = text[:-2] sentences = {} sen_scores = {} sen_pos_scores = {} no_bpe_sentences = {} num_bpe_tokens_dict = {} for _i, line in enumerate(cleaned_text): tokens = line.split() if tokens[0].isdigit(): line_id = int(tokens[0]) scores = [float(x[1:-1]) for x in tokens[2::2]] sentences[line_id] = " ".join(tokens[1::2][:-1])+"\n" if bpe_symbol is not None: # exclude <eos> symbol to match output from generate.py bpe_sen = " ".join(tokens[1::2][:-1])+"\n" no_bpe_sen = remove_bpe(bpe_sen, bpe_symbol) no_bpe_sentences[line_id] = no_bpe_sen if prefix_len is not None: num_bpe_tokens = get_num_bpe_tokens_from_len(bpe_sen, bpe_symbol, prefix_len) sen_scores[line_id] = sum(scores[:num_bpe_tokens]) num_bpe_tokens_dict[line_id] = num_bpe_tokens elif target_prefix_frac is not None: num_words, shortened, target_prefix_len = calc_length_from_frac(bpe_sen, target_prefix_frac, bpe_symbol) sen_scores[line_id] = sum(scores[:target_prefix_len]) num_bpe_tokens_dict[line_id] = target_prefix_len else: sen_scores[line_id] = sum(scores) num_bpe_tokens_dict[line_id] = len(scores) sen_pos_scores[line_id] = scores return sentences, sen_scores, sen_pos_scores, no_bpe_sentences, num_bpe_tokens_dict
parse output of eval_lm
19,031
from contextlib import redirect_stdout import os from fairseq import options from fairseq_cli import generate from . import rerank_options, rerank_utils def score_bw(args): if args.backwards1: scorer1_src = args.target_lang scorer1_tgt = args.source_lang else: scorer1_src = args.source_lang scorer1_tgt = args.target_lang if args.score_model2 is not None: if args.backwards2: scorer2_src = args.target_lang scorer2_tgt = args.source_lang else: scorer2_src = args.source_lang scorer2_tgt = args.target_lang rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1) if args.score_model2 is not None: score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2) if args.right_to_left1: rerank_data1 = right_to_left_preprocessed_dir elif args.backwards1: rerank_data1 = backwards_preprocessed_dir else: rerank_data1 = left_to_right_preprocessed_dir gen_param = ["--batch-size", str(128), "--score-reference", "--gen-subset", "train"] if not rerank1_is_gen and not os.path.isfile(score1_file): print("STEP 4: score the translations for model 1") model_param1 = ["--path", args.score_model1, "--source-lang", scorer1_src, "--target-lang", scorer1_tgt] gen_model1_param = [rerank_data1] + gen_param + model_param1 gen_parser = options.get_generation_parser() input_args = options.parse_args_and_arch(gen_parser, gen_model1_param) with open(score1_file, 'w') as f: with redirect_stdout(f): generate.main(input_args) if args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen: print("STEP 4: score the translations for model 2") if args.right_to_left2: rerank_data2 = right_to_left_preprocessed_dir elif args.backwards2: rerank_data2 = backwards_preprocessed_dir else: rerank_data2 = left_to_right_preprocessed_dir model_param2 = ["--path", args.score_model2, "--source-lang", scorer2_src, "--target-lang", scorer2_tgt] gen_model2_param = [rerank_data2] + gen_param + model_param2 gen_parser = options.get_generation_parser() input_args = options.parse_args_and_arch(gen_parser, gen_model2_param) with open(score2_file, 'w') as f: with redirect_stdout(f): generate.main(input_args) def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) score_bw(args)
null
19,032
import math from multiprocessing import Pool import numpy as np from fairseq import bleu, options from fairseq.data import dictionary from . import ( rerank_generate, rerank_score_bw, rerank_score_lm, rerank_options, rerank_utils, ) def rerank(args): if type(args.lenpen) is not list: args.lenpen = [args.lenpen] if type(args.weight1) is not list: args.weight1 = [args.weight1] if type(args.weight2) is not list: args.weight2 = [args.weight2] if type(args.weight3) is not list: args.weight3 = [args.weight3] if args.all_shards: shard_ids = list(range(args.num_shards)) else: shard_ids = [args.shard_id] for shard_id in shard_ids: pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) rerank_generate.gen_and_reprocess_nbest(args) rerank_score_bw.score_bw(args) rerank_score_lm.score_lm(args) if args.write_hypos is None: write_targets = pre_gen+"/matched_targets" write_hypos = pre_gen+"/matched_hypos" else: write_targets = args.write_hypos+"_targets" + args.gen_subset write_hypos = args.write_hypos+"_hypos" + args.gen_subset if args.all_shards: write_targets += "_all_shards" write_hypos += "_all_shards" best_lenpen, best_weight1, best_weight2, best_weight3, best_score = \ match_target_hypo(args, write_targets, write_hypos) return best_lenpen, best_weight1, best_weight2, best_weight3, best_score def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) rerank(args)
null
19,033
import argparse import random import numpy as np from fairseq import options from . import rerank, rerank_options def random_search(args): param_values = [] tuneable_parameters = ['lenpen', 'weight1', 'weight2', 'weight3'] initial_params = [args.lenpen, args.weight1, args.weight2, args.weight3] for i, elem in enumerate(initial_params): if type(elem) is not list: initial_params[i] = [elem] else: initial_params[i] = elem tune_parameters = args.tune_param.copy() for i in range(len(args.tune_param)): assert args.upper_bound[i] >= args.lower_bound[i] index = tuneable_parameters.index(args.tune_param[i]) del tuneable_parameters[index] del initial_params[index] tune_parameters += tuneable_parameters param_values += initial_params random.seed(args.seed) random_params = np.array([ [random.uniform(args.lower_bound[i], args.upper_bound[i]) for i in range(len(args.tune_param))] for k in range(args.num_trials) ]) set_params = np.array([ [initial_params[i][0] for i in range(len(tuneable_parameters))] for k in range(args.num_trials) ]) random_params = np.concatenate((random_params, set_params), 1) rerank_args = vars(args).copy() if args.nbest_list: rerank_args['gen_subset'] = 'test' else: rerank_args['gen_subset'] = args.tune_subset for k in range(len(tune_parameters)): rerank_args[tune_parameters[k]] = list(random_params[:, k]) if args.share_weights: k = tune_parameters.index('weight2') rerank_args['weight3'] = list(random_params[:, k]) rerank_args = argparse.Namespace(**rerank_args) best_lenpen, best_weight1, best_weight2, best_weight3, best_score = rerank.rerank(rerank_args) rerank_args = vars(args).copy() rerank_args['lenpen'] = [best_lenpen] rerank_args['weight1'] = [best_weight1] rerank_args['weight2'] = [best_weight2] rerank_args['weight3'] = [best_weight3] # write the hypothesis from the valid set from the best trial if args.gen_subset != "valid": rerank_args['gen_subset'] = "valid" rerank_args = argparse.Namespace(**rerank_args) rerank.rerank(rerank_args) # test with the best hyperparameters on gen subset rerank_args = vars(args).copy() rerank_args['gen_subset'] = args.gen_subset rerank_args['lenpen'] = [best_lenpen] rerank_args['weight1'] = [best_weight1] rerank_args['weight2'] = [best_weight2] rerank_args['weight3'] = [best_weight3] rerank_args = argparse.Namespace(**rerank_args) rerank.rerank(rerank_args) def cli_main(): parser = rerank_options.get_tuning_parser() args = options.parse_args_and_arch(parser) random_search(args)
null
19,034
import os from fairseq import options from . import rerank_options, rerank_utils def score_lm(args): using_nbest = args.nbest_list is not None pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) predictions_bpe_file = pre_gen+"/generate_output_bpe.txt" if using_nbest: print("Using predefined n-best list from interactive.py") predictions_bpe_file = args.nbest_list gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe, nbest=using_nbest) if args.language_model is not None: lm_score_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.lm_name, lm_file=True) if args.language_model is not None and not os.path.isfile(lm_score_file): print("STEP 4.5: language modeling for P(T)") if args.lm_bpe_code is None: bpe_status = "no bpe" elif args.lm_bpe_code == "shared": bpe_status = "shared" else: bpe_status = "different" rerank_utils.lm_scoring(lm_preprocessed_dir, bpe_status, gen_output, pre_gen, args.lm_dict, args.lm_name, args.language_model, args.lm_bpe_code, 128, lm_score_file, args.target_lang, args.source_lang, prefix_len=args.prefix_len) def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) score_lm(args)
null
19,035
from contextlib import redirect_stdout import os import subprocess from fairseq import options from fairseq_cli import generate, preprocess from . import rerank_options, rerank_utils def gen_and_reprocess_nbest(args): if args.score_dict_dir is None: args.score_dict_dir = args.data if args.prefix_len is not None: assert args.right_to_left1 is False, "prefix length not compatible with right to left models" assert args.right_to_left2 is False, "prefix length not compatible with right to left models" if args.nbest_list is not None: assert args.score_model2 is None if args.backwards1: scorer1_src = args.target_lang scorer1_tgt = args.source_lang else: scorer1_src = args.source_lang scorer1_tgt = args.target_lang store_data = os.path.join(os.path.dirname(__file__))+"/rerank_data/"+args.data_dir_name if not os.path.exists(store_data): os.makedirs(store_data) pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) assert not (args.right_to_left1 and args.backwards1), "backwards right to left not supported" assert not (args.right_to_left2 and args.backwards2), "backwards right to left not supported" assert not (args.prefix_len is not None and args.target_prefix_frac is not None), \ "target prefix frac and target prefix len incompatible" # make directory to store generation results if not os.path.exists(pre_gen): os.makedirs(pre_gen) rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None if args.nbest_list is not None: rerank2_is_gen = True # make directories to store preprossed nbest list for reranking if not os.path.exists(left_to_right_preprocessed_dir): os.makedirs(left_to_right_preprocessed_dir) if not os.path.exists(right_to_left_preprocessed_dir): os.makedirs(right_to_left_preprocessed_dir) if not os.path.exists(lm_preprocessed_dir): os.makedirs(lm_preprocessed_dir) if not os.path.exists(backwards_preprocessed_dir): os.makedirs(backwards_preprocessed_dir) score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1) if args.score_model2 is not None: score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2) predictions_bpe_file = pre_gen+"/generate_output_bpe.txt" using_nbest = args.nbest_list is not None if using_nbest: print("Using predefined n-best list from interactive.py") predictions_bpe_file = args.nbest_list else: if not os.path.isfile(predictions_bpe_file): print("STEP 1: generate predictions using the p(T|S) model with bpe") print(args.data) param1 = [args.data, "--path", args.gen_model, "--shard-id", str(args.shard_id), "--num-shards", str(args.num_shards), "--nbest", str(args.num_rescore), "--batch-size", str(args.batch_size), "--beam", str(args.num_rescore), "--max-sentences", str(args.num_rescore), "--gen-subset", args.gen_subset, "--source-lang", args.source_lang, "--target-lang", args.target_lang] if args.sampling: param1 += ["--sampling"] gen_parser = options.get_generation_parser() input_args = options.parse_args_and_arch(gen_parser, param1) print(input_args) with open(predictions_bpe_file, 'w') as f: with redirect_stdout(f): generate.main(input_args) gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe, nbest=using_nbest, prefix_len=args.prefix_len, target_prefix_frac=args.target_prefix_frac) if args.diff_bpe: rerank_utils.write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo, gen_output.no_bpe_target, pre_gen+"/source_gen_bpe."+args.source_lang, pre_gen+"/target_gen_bpe."+args.target_lang, pre_gen+"/reference_gen_bpe."+args.target_lang) bitext_bpe = args.rescore_bpe_code bpe_src_param = ["-c", bitext_bpe, "--input", pre_gen+"/source_gen_bpe."+args.source_lang, "--output", pre_gen+"/rescore_data."+args.source_lang] bpe_tgt_param = ["-c", bitext_bpe, "--input", pre_gen+"/target_gen_bpe."+args.target_lang, "--output", pre_gen+"/rescore_data."+args.target_lang] subprocess.call(["python", os.path.join(os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py")] + bpe_src_param, shell=False) subprocess.call(["python", os.path.join(os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py")] + bpe_tgt_param, shell=False) if (not os.path.isfile(score1_file) and not rerank1_is_gen) or \ (args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen): print("STEP 2: process the output of generate.py so we have clean text files with the translations") rescore_file = "/rescore_data" if args.prefix_len is not None: prefix_len_rescore_file = rescore_file + "prefix"+str(args.prefix_len) if args.target_prefix_frac is not None: target_prefix_frac_rescore_file = rescore_file + "target_prefix_frac"+str(args.target_prefix_frac) if args.source_prefix_frac is not None: source_prefix_frac_rescore_file = rescore_file + "source_prefix_frac"+str(args.source_prefix_frac) if not args.right_to_left1 or not args.right_to_left2: if not args.diff_bpe: rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+rescore_file+"."+args.source_lang, pre_gen+rescore_file+"."+args.target_lang, pre_gen+"/reference_file", bpe_symbol=args.remove_bpe) if args.prefix_len is not None: bw_rescore_file = prefix_len_rescore_file rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+prefix_len_rescore_file+"."+args.source_lang, pre_gen+prefix_len_rescore_file+"."+args.target_lang, pre_gen+"/reference_file", prefix_len=args.prefix_len, bpe_symbol=args.remove_bpe) elif args.target_prefix_frac is not None: bw_rescore_file = target_prefix_frac_rescore_file rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+target_prefix_frac_rescore_file+"."+args.source_lang, pre_gen+target_prefix_frac_rescore_file+"."+args.target_lang, pre_gen+"/reference_file", bpe_symbol=args.remove_bpe, target_prefix_frac=args.target_prefix_frac) else: bw_rescore_file = rescore_file if args.source_prefix_frac is not None: fw_rescore_file = source_prefix_frac_rescore_file rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+source_prefix_frac_rescore_file+"."+args.source_lang, pre_gen+source_prefix_frac_rescore_file+"."+args.target_lang, pre_gen+"/reference_file", bpe_symbol=args.remove_bpe, source_prefix_frac=args.source_prefix_frac) else: fw_rescore_file = rescore_file if args.right_to_left1 or args.right_to_left2: rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+"/right_to_left_rescore_data."+args.source_lang, pre_gen+"/right_to_left_rescore_data."+args.target_lang, pre_gen+"/right_to_left_reference_file", right_to_left=True, bpe_symbol=args.remove_bpe) print("STEP 3: binarize the translations") if not args.right_to_left1 or args.score_model2 is not None and not args.right_to_left2 or not rerank1_is_gen: if args.backwards1 or args.backwards2: if args.backwards_score_dict_dir is not None: bw_dict = args.backwards_score_dict_dir else: bw_dict = args.score_dict_dir bw_preprocess_param = ["--source-lang", scorer1_src, "--target-lang", scorer1_tgt, "--trainpref", pre_gen+bw_rescore_file, "--srcdict", bw_dict + "/dict." + scorer1_src + ".txt", "--tgtdict", bw_dict + "/dict." + scorer1_tgt + ".txt", "--destdir", backwards_preprocessed_dir] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(bw_preprocess_param) preprocess.main(input_args) preprocess_param = ["--source-lang", scorer1_src, "--target-lang", scorer1_tgt, "--trainpref", pre_gen+fw_rescore_file, "--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt", "--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt", "--destdir", left_to_right_preprocessed_dir] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_param) preprocess.main(input_args) if args.right_to_left1 or args.right_to_left2: preprocess_param = ["--source-lang", scorer1_src, "--target-lang", scorer1_tgt, "--trainpref", pre_gen+"/right_to_left_rescore_data", "--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt", "--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt", "--destdir", right_to_left_preprocessed_dir] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_param) preprocess.main(input_args) return gen_output def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) gen_and_reprocess_nbest(args)
null
19,036
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, ) from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer ) def base_monotonic_rchitecture(args): base_architecture(args) args.encoder_unidirectional = getattr( args, 'encoder_unidirectional', False) 'transformer_monotonic', 'transformer_monotonic_iwslt_de_en' def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) def transformer_monotonic_iwslt_de_en(args): transformer_iwslt_de_en(args) base_monotonic_rchitecture(args)
null
19,037
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, ) from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer ) def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) def transformer_monotonic_vaswani_wmt_en_de_big(args): transformer_vaswani_wmt_en_de_big(args)
null
19,038
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, ) from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer ) def transformer_monotonic_vaswani_wmt_en_fr_big(args): transformer_monotonic_vaswani_wmt_en_fr_big(args)
null
19,039
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, ) from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer ) def transformer_iwslt_de_en(args): def transformer_unidirectional_iwslt_de_en(args): transformer_iwslt_de_en(args)
null
19,040
import torch def safe_cumprod(tensor, dim: int, eps: float = 1e-10): """ An implementation of cumprod to prevent precision issue. cumprod(x) = [x1, x1x2, x1x2x3, ....] = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...] = exp(cumsum(log(x))) """ if (tensor + eps < 0).any().item(): raise RuntimeError( "Safe cumprod can only take non-negative tensors as input." "Consider use torch.cumprod if you want to calculate negative values." ) log_tensor = torch.log(tensor + eps) cumsum_log_tensor = torch.cumsum(log_tensor, dim) exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor) return exp_cumsum_log_tensor The provided code snippet includes necessary dependencies for implementing the `exclusive_cumprod` function. Write a Python function `def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10)` to solve the following problem: Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] Here is the function: def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10): """ Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] """ tensor_size = list(tensor.size()) tensor_size[dim] = 1 return_tensor = safe_cumprod( torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim), dim=dim, eps=eps ) if dim == 0: return return_tensor[:-1] elif dim == 1: return return_tensor[:, :-1] elif dim == 2: return return_tensor[:, :, :-1] else: raise RuntimeError("Cumprod on dimension 3 and more is not implemented")
Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
19,041
import torch The provided code snippet includes necessary dependencies for implementing the `lengths_to_mask` function. Write a Python function `def lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False)` to solve the following problem: Convert a tensor of lengths to mask For example, lengths = [[2, 3, 4]], max_len = 5 mask = [[1, 1, 1], [1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0]] Here is the function: def lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False): """ Convert a tensor of lengths to mask For example, lengths = [[2, 3, 4]], max_len = 5 mask = [[1, 1, 1], [1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0]] """ assert len(lengths.size()) <= 2 if len(lengths) == 2: if dim == 1: lengths = lengths.t() lengths = lengths else: lengths = lengths.unsqueeze(1) # lengths : batch_size, 1 lengths = lengths.view(-1, 1) batch_size = lengths.size(0) # batch_size, max_len mask = torch.arange(max_len).expand(batch_size, max_len).type_as(lengths) < lengths if negative_mask: mask = ~mask if dim == 0: # max_len, batch_size mask = mask.t() return mask
Convert a tensor of lengths to mask For example, lengths = [[2, 3, 4]], max_len = 5 mask = [[1, 1, 1], [1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0]]
19,042
import torch The provided code snippet includes necessary dependencies for implementing the `moving_sum` function. Write a Python function `def moving_sum(x, start_idx: int, end_idx: int)` to solve the following problem: From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] Here is the function: def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ assert start_idx > 0 and end_idx > 0 assert len(x.size()) == 2 src_len, batch_size = x.size() # batch_size, 1, src_len x = x.t().unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1]) moving_sum = torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ).squeeze(1).t() moving_sum = moving_sum[end_idx: -start_idx] assert src_len == moving_sum.size(0) assert batch_size == moving_sum.size(1) return moving_sum
From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]]
19,043
import argparse import sys import json from tornado import web, ioloop from scorers import build_scorer DEFAULT_HOSTNAME = 'localhost' DEFAULT_PORT = 12321 def add_args(): parser = argparse.ArgumentParser() # fmt: off parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME, help='Server hostname') parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='Server port number') args, _ = parser.parse_known_args() # fmt: on return args
null
19,044
import argparse import sys import json from tornado import web, ioloop from scorers import build_scorer DEFAULT_HOSTNAME = 'localhost' DEFAULT_PORT = 12321 class EvalSessionHandler(ScorerHandler): def post(self): self.scorer.reset() def get(self): r = json.dumps(self.scorer.get_info()) self.write(r) class ResultHandler(ScorerHandler): def get(self): r = json.dumps(self.scorer.score()) self.write(r) class SourceHandler(ScorerHandler): def get(self): sent_id = int(self.get_argument('sent_id')) segment_size = None if "segment_size" in self.request.arguments: string = self.get_argument('segment_size') if len(string) > 0: segment_size = int(string) r = json.dumps(self.scorer.send_src(int(sent_id), segment_size)) self.write(r) class HypothesisHandler(ScorerHandler): def put(self): sent_id = int(self.get_argument('sent_id')) list_of_tokens = self.request.body.decode('utf-8').strip().split() self.scorer.recv_hyp(sent_id, list_of_tokens) def start_server(scorer, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, debug=False): app = web.Application([ (r'/result', ResultHandler, dict(scorer=scorer)), (r'/src', SourceHandler, dict(scorer=scorer)), (r'/hypo', HypothesisHandler, dict(scorer=scorer)), (r'/', EvalSessionHandler, dict(scorer=scorer)), ], debug=debug) app.listen(port, max_buffer_size=1024 ** 3) sys.stdout.write(f"Evaluation Server Started. Listening to port {port}\n") ioloop.IOLoop.current().start()
null
19,045
import argparse from client import SimulSTEvaluationService, SimulSTLocalEvaluationService from fairseq.registry import REGISTRIES from agents import build_agent DEFAULT_HOSTNAME = 'localhost' DEFAULT_PORT = 12321 REGISTRIES = {} def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME, help='server hostname') parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='server port number') parser.add_argument('--agent-type', default='simul_trans_text', help='Agent type') parser.add_argument('--scorer-type', default='text', help='Scorer type') parser.add_argument('--start-idx', type=int, default=0, help='Start index of the sentence to evaluate') parser.add_argument('--end-idx', type=int, default=float('inf'), help='End index of the sentence to evaluate') parser.add_argument('--scores', action="store_true", help='Request scores from server') parser.add_argument('--reset-server', action="store_true", help='Reset the server') parser.add_argument('--num-threads', type=int, default=10, help='Number of threads used by agent') parser.add_argument('--local', action="store_true", default=False, help='Local evaluation') args, _ = parser.parse_known_args() for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) args = parser.parse_args() return args
null
19,046
import argparse import fileinput import hashlib from multiprocessing import Pool import sys def get_hashes_and_lines(raw_line): hash = hashlib.md5(raw_line).hexdigest() return hash, raw_line
null
19,047
import os.path as op import argparse import os from multiprocessing import cpu_count from collections import namedtuple from typing import Optional, List import sentencepiece as sp from fairseq.data.encoders.moses_tokenizer import MosesTokenizer from fairseq.data.encoders.byte_utils import byte_encode from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE from fairseq.data.encoders.characters import Characters from fairseq.data.encoders.byte_bpe import ByteBPE from fairseq.data.encoders.bytes import Bytes SPLITS = ['train', 'valid', 'test'] def _convert_xml(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: ss = s.strip() if not ss.startswith('<seg'): continue ss = ss.replace('</seg>', '').split('">') assert len(ss) == 2 f_o.write(ss[1].strip() + '\n') def _convert_train(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: ss = s.strip() if ss.startswith('<'): continue f_o.write(ss.strip() + '\n') def _get_bytes(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(Bytes.encode(s.strip()) + '\n') def _get_chars(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(Characters.encode(s.strip()) + '\n') def pretokenize(in_path: str, out_path: str, src: str, tgt: str): Args = namedtuple('Args', ['moses_source_lang', 'moses_target_lang', 'moses_no_dash_splits', 'moses_no_escape']) args = Args(moses_source_lang=src, moses_target_lang=tgt, moses_no_dash_splits=False, moses_no_escape=False) pretokenizer = MosesTokenizer(args) with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(pretokenizer.encode(s.strip()) + '\n') def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str): with open(out_path, 'w') as f_o: for lang in [src, tgt]: with open(f'{in_path_prefix}.{lang}') as f: for s in f: f_o.write(byte_encode(s.strip()) + '\n') def _get_bpe(in_path: str, model_prefix: str, vocab_size: int): arguments = [ f'--input={in_path}', f'--model_prefix={model_prefix}', f'--model_type=bpe', f'--vocab_size={vocab_size}', '--character_coverage=1.0', '--normalization_rule_name=identity', f'--num_threads={cpu_count()}' ] sp.SentencePieceTrainer.Train(' '.join(arguments)) def _apply_bbpe(model_path: str, in_path: str, out_path: str): Args = namedtuple('Args', ['sentencepiece_model_path']) args = Args(sentencepiece_model_path=model_path) tokenizer = ByteBPE(args) with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + '\n') def _apply_bpe(model_path: str, in_path: str, out_path: str): Args = namedtuple('Args', ['sentencepiece_vocab']) args = Args(sentencepiece_vocab=model_path) tokenizer = SentencepieceBPE(args) with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + '\n') def _concat_files(in_paths: List[str], out_path: str): with open(out_path, 'w') as f_o: for p in in_paths: with open(p) as f: for r in f: f_o.write(r) def preprocess_iwslt17(root: str, src: str, tgt: str, bpe_size: Optional[int], need_chars: bool, bbpe_size: Optional[int], need_bytes: bool): # extract bitext in_root = op.join(root, f'{src}-{tgt}') for lang in [src, tgt]: _convert_train( op.join(in_root, f'train.tags.{src}-{tgt}.{lang}'), op.join(root, f'train.{lang}') ) _convert_xml( op.join(in_root, f'IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml'), op.join(root, f'valid.{lang}') ) _convert_xml( op.join(in_root, f'IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml'), op.join(root, f'test.{lang}') ) # pre-tokenize for lang in [src, tgt]: for split in SPLITS: pretokenize(op.join(root, f'{split}.{lang}'), op.join(root, f'{split}.moses.{lang}'), src, tgt) # tokenize with BPE vocabulary if bpe_size is not None: # learn vocabulary concated_train_path = op.join(root, 'train.all') _concat_files( [op.join(root, 'train.moses.fr'), op.join(root, 'train.moses.en')], concated_train_path ) bpe_model_prefix = op.join(root, f'spm_bpe{bpe_size}') _get_bpe(concated_train_path, bpe_model_prefix, bpe_size) os.remove(concated_train_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bpe( bpe_model_prefix + '.model', op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bpe{bpe_size}.{lang}') ) # tokenize with bytes vocabulary if need_bytes: for lang in [src, tgt]: for split in SPLITS: _get_bytes(op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bytes.{lang}')) # tokenize with characters vocabulary if need_chars: for lang in [src, tgt]: for split in SPLITS: _get_chars(op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.chars.{lang}')) # tokenize with byte-level BPE vocabulary if bbpe_size is not None: # learn vocabulary bchar_path = op.join(root, 'train.bchar') _convert_to_bchar(op.join(root, 'train.moses'), src, tgt, bchar_path) bbpe_model_prefix = op.join(root, f'spm_bbpe{bbpe_size}') _get_bpe(bchar_path, bbpe_model_prefix, bbpe_size) os.remove(bchar_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bbpe( bbpe_model_prefix + '.model', op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bbpe{bbpe_size}.{lang}') )
null
19,048
import torch.nn as nn import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel, TransformerEncoder def gru_transformer_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.layer_wise_attention = getattr(args, "layer_wise_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) def gru_transformer_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) gru_transformer_base_architecture(args)
null
19,049
import argparse import glob import os import soundfile import random def get_parser(): parser = argparse.ArgumentParser() parser.add_argument('root', metavar='DIR', help='root directory containing flac files to index') parser.add_argument('--valid-percent', default=0.01, type=float, metavar='D', help='percentage of data to use as validation set (between 0 and 1)') parser.add_argument('--dest', default='.', type=str, metavar='DIR', help='output directory') parser.add_argument('--ext', default='flac', type=str, metavar='EXT', help='extension to look for') parser.add_argument('--seed', default=42, type=int, metavar='N', help='random seed') parser.add_argument('--path-must-contain', default=None, type=str, metavar='FRAG', help='if set, path must contain this substring for a file to be included in the manifest') return parser
null
19,050
import argparse import glob import os from shutil import copy import h5py import soundfile as sf import numpy as np import torch from torch import nn import tqdm from fairseq.models.wav2vec import Wav2VecModel The provided code snippet includes necessary dependencies for implementing the `read_audio` function. Write a Python function `def read_audio(fname)` to solve the following problem: Load an audio file and return PCM along with the sample rate Here is the function: def read_audio(fname): """ Load an audio file and return PCM along with the sample rate """ wav, sr = sf.read(fname) assert sr == 16e3 return wav, 16e3
Load an audio file and return PCM along with the sample rate
19,051
import torch class ScalarBias(torch.autograd.Function): """ Adds a vector of scalars, used in self-attention mechanism to allow the model to optionally attend to this vector instead of the past """ def forward(ctx, input, dim, bias_init): size = list(input.size()) size[dim] += 1 output = input.new(*size).fill_(bias_init) output.narrow(dim, 1, size[dim] - 1).copy_(input) ctx.dim = dim return output def backward(ctx, grad): return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None def scalar_bias(input, dim, bias_init=0): return ScalarBias.apply(input, dim, bias_init)
null
19,052
import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `unfold1d` function. Write a Python function `def unfold1d(x, kernel_size, padding_l, pad_value=0)` to solve the following problem: unfold T x B x C to T x B x C x K Here is the function: def unfold1d(x, kernel_size, padding_l, pad_value=0): '''unfold T x B x C to T x B x C x K''' if kernel_size > 1: T, B, C = x.size() x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value) x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C)) else: x = x.unsqueeze(3) return x
unfold T x B x C to T x B x C x K
19,053
from typing import Dict, List, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.modules import LayerNorm, MultiheadAttention from fairseq.modules.quant_noise import quant_noise from torch import Tensor def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
null
19,054
import numpy as np import torch import torch.nn as nn def logsumexp(x, dim=1): return torch.logsumexp(x.float(), dim=dim).type_as(x)
null
19,055
def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = filters.size(0); const auto filterSize = filters.size(1); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ sequence_if = """ if (sequenceLength <= {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{ lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ final_return = """ } return {output}; } """ with open("lightconv_cuda_forward.cu", 'w') as forward: forward.write(head) for seq in seqs: forward.write(sequence_if.format(seq=seq)) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(con_else) forward.write(final_else) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(final_return)
null
19,056
def gen_backward(): head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_backward( at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor filters) { // gradWrtInput const int minibatch = input.size(0); const int numFeatures = input.size(1); const int sequenceLength = input.size(2); const int numHeads = filters.size(0); const int filterSize = filters.size(1); const dim3 gradBlocks(minibatch, numFeatures); const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads); const dim3 weightGradSecondpassBlocks(numHeads, filterSize); const int numFiltersInBlock = numFeatures / numHeads; auto gradInput = at::zeros_like(input); auto gradFilters = at::zeros_like(filters); at::DeviceGuard g(input.device()); auto stream = at::cuda::getCurrentCUDAStream(); switch(filterSize) { """ sequence_if = """ if (sequenceLength <= {seq}) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{ lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, gradInput.data<scalar_t>()); """ weight_grad_short = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t> <<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ weight_grad = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } """ breakout = """ break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradFilters}; } """ kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] thresh = [32, 32, 64, 128, 256, -1, -1, -1] max_mem = [-1, -1, -1, -1, -1, 192, 96, 64] with open("lightconv_cuda_backward.cu", 'w') as backward: backward.write(head) for (k, t, mem) in zip(kernels, thresh, max_mem): backward.write(case_k.format(k=k)) for seq in seqs: if (t == -1 or seq <= t) and (mem == -1 or seq < mem): backward.write(sequence_if.format(seq=seq)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=seq, p=p)) backward.write(weight_grad_short.format(k=k, b_size=seq, p=p)) backward.write(bad_padding) else: for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=32, p=p)) backward.write(weight_grad.format(k=k, b_size=32, p=p)) backward.write(bad_padding) backward.write(breakout) break backward.write(con_else) backward.write(bad_filter) backward.write(last_return)
null
19,057
def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] blocks = [32, 64, 128, 256] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ switch = """ switch(filterSize) { """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{ dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break;\n """ end = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } return {output}; } """ with open("dynamicconv_cuda_forward.cu", 'w') as forward: forward.write(head) forward.write(switch) for k in kernels: b_size = 32 for b in blocks: if b > k: b_size = b break forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=b_size, pad=pad)) forward.write(bad_padding) forward.write(end)
null
19,058
def gen_backward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] thresh = [512, 512, 512, 512, 512, 380, 256, 256] min_block = [64, 64, 64, 64, 64, 64, 128, 256] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; auto numChunks = 1; auto gradInput = at::zeros_like(input); auto gradWeight = at::zeros_like(weight); auto stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(minibatch, numHeads, numChunks); """ sequence_if = """ if (sequenceLength < {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ chunks_reset = """ numChunks = int(ceilf(sequenceLength/float({b_size}))); blocks = dim3(minibatch, numHeads, numChunks); """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{ dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, gradWeight.data<scalar_t>(), gradInput.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } break;\n """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradWeight}; } """ with open("dynamicconv_cuda_backward.cu", 'w') as backward: backward.write(head) for seq in seqs: backward.write(sequence_if.format(seq=seq)) for k, t, m in zip(kernels, thresh, min_block): backward.write(case_k.format(k=k)) if seq <= t: b_size = seq else: b_size = m backward.write(chunks_reset.format(b_size=b_size)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=b_size, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(con_else) backward.write(final_else) for k, m in zip(kernels, min_block): backward.write(case_k.format(k=k)) backward.write(chunks_reset.format(b_size=m)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=m, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(last_return)
null
19,059
import torch import torch.nn as nn import torch.nn.functional as F try: from apex.normalization import FusedLayerNorm as _FusedLayerNorm has_fused_layernorm = True class FusedLayerNorm(_FusedLayerNorm): def forward(self, x): if not x.is_cuda: return super().forward(x) else: with torch.cuda.device(x.device): return super().forward(x) except ImportError: has_fused_layernorm = False def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): if not export and torch.cuda.is_available() and has_fused_layernorm: return FusedLayerNorm(normalized_shape, eps, elementwise_affine) return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
null
19,060
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.modules.unfold import unfold1d from fairseq.incremental_decoding_utils import with_incremental_state class LightweightConv1dTBC(nn.Module): '''Lightweight Convolution assuming the input is TxBxC Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution bias: use bias Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, bias=False): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout = weight_dropout self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() self.onnx_trace = False def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.) def forward(self, x, incremental_state=None, unfold=False): '''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead ''' unfold = unfold or (incremental_state is not None) if unfold: output = self._forward_unfolded(x, incremental_state) else: output = self._forward_expanded(x, incremental_state) if self.bias is not None: output = output + self.bias.view(1, 1, -1) return output def prepare_for_onnx_export_(self): self.onnx_trace = True def _forward_unfolded(self, x, incremental_state): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1) weight = F.dropout(weight, self.weight_dropout, training=self.training) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_state): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) P = self.padding_l if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training) output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def extra_repr(self): s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}'.format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.bias is not None ) if self.weight_dropout > 0.: s += ', weight_dropout={}'.format(self.weight_dropout) return s def LightweightConv(input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, bias=False): if torch.cuda.is_available(): try: from fairseq.modules.lightconv_layer import LightconvLayer return LightconvLayer(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) except ImportError as e: print(e) return LightweightConv1dTBC(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias)
null
19,061
import logging import torch import torch.nn.functional as F def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction='mean'): lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) return F.nll_loss( lprobs, target, ignore_index=ignore_index, reduction=reduction, ) def cross_entropy(logits, target, ignore_index=-100, reduction='mean'): if logits.device == torch.device('cpu'): return _cross_entropy_pytorch(logits, target, ignore_index, reduction) else: half_to_float = (logits.dtype == torch.half) losses = xentropy.SoftmaxCrossEntropyLoss.apply( logits, target, 0.0, ignore_index, half_to_float, ) if reduction == 'sum': return losses.sum() elif reduction == 'mean': if ignore_index >= 0: return losses.sum() / target.ne(ignore_index).sum() else: return losses.mean() elif reduction == 'none': return losses else: raise NotImplementedError
null
19,062
import logging import torch import torch.nn.functional as F def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction='mean'): lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) return F.nll_loss( lprobs, target, ignore_index=ignore_index, reduction=reduction, ) def cross_entropy(logits, target, ignore_index=-100, reduction='mean'): return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
null
19,063
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from .unfold import unfold1d from fairseq.incremental_decoding_utils import with_incremental_state class DynamicConv1dTBC(nn.Module): '''Dynamic lightweight convolution taking T x B x C inputs Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1) bias: use bias conv_bias: bias of the convolution query_size: specified when feeding a different input as the query in_proj: project the input and generate the filter together Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False): super().__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout = weight_dropout self.weight_softmax = weight_softmax self.renorm_padding = renorm_padding if in_proj: self.weight_linear = Linear(self.input_size, self.input_size + num_heads * kernel_size * 1) else: self.weight_linear = Linear(self.query_size, num_heads * kernel_size * 1, bias=bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() def in_proj(self): return self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size def reset_parameters(self): self.weight_linear.reset_parameters() if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.) def forward(self, x, incremental_state=None, query=None, unfold=None): '''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters ''' unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None or not self.in_proj if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def _forward_unfolded(self, x, incremental_state, query): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1) else: weight = self.weight_linear(query).view(T*B*H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K-1: weight = weight.narrow(1, K-T, T) K, padding_l = T, T-1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1) else: weight = self.weight_linear(query).view(T*B*H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf')) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training, inplace=False) else: P = self.padding_l # For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def extra_repr(self): s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}'.format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding, self.in_proj, ) if self.query_size != self.input_size: s += ', query_size={}'.format(self.query_size) if self.weight_dropout > 0.: s += ', weight_dropout={}'.format(self.weight_dropout) return s def DynamicConv(input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False): if torch.cuda.is_available(): try: from fairseq.modules.dynamicconv_layer import DynamicconvLayer return DynamicconvLayer(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) except ImportError as e: print(e) return DynamicConv1dTBC(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias)
null
19,064
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from .unfold import unfold1d from fairseq.incremental_decoding_utils import with_incremental_state def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m
null
19,065
from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules import ( LayerDropModuleList, LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ import random The provided code snippet includes necessary dependencies for implementing the `init_bert_params` function. Write a Python function `def init_bert_params(module)` to solve the following problem: Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). Here is the function: def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=0.02) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): module.q_proj.weight.data.normal_(mean=0.0, std=0.02) module.k_proj.weight.data.normal_(mean=0.0, std=0.02) module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated).
19,066
from __future__ import absolute_import, division, print_function, unicode_literals from collections.abc import Iterable from itertools import repeat import torch import torch.nn as nn def _pair(v): if isinstance(v, Iterable): assert len(v) == 2, "len(v) != 2" return v return tuple(repeat(v, 2))
null
19,067
from __future__ import absolute_import, division, print_function, unicode_literals from collections.abc import Iterable from itertools import repeat import torch import torch.nn as nn def infer_conv_output_dim(conv_op, input_dim, sample_inchannel): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim) # N x C x H x W # N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim x = conv_op(x) # N x C x H x W x = x.transpose(1, 2) # N x H x C x W bsz, seq = x.size()[:2] per_channel_dim = x.size()[3] # bsz: N, seq: H, CxW the rest return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim
null