id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
185,374 | from collections import Counter
from itertools import zip_longest
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
from multiprocessing import Pool
import os
import shutil
def dataset_dest_file(args, output_prefix, lang, extension):
base = da... | null |
185,375 | from collections import Counter
from itertools import zip_longest
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
from multiprocessing import Pool
import os
import shutil
class Binarizer:
def binarize(
filename,
dic... | null |
185,376 | from collections import Counter
from itertools import zip_longest
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
from multiprocessing import Pool
import os
import shutil
def main(args):
utils.import_user_module(args)
print(args)
os.... | null |
185,379 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.... | null |
185,380 | import math
import torch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) | null |
185,381 | import math
import torch
def gelu(x: torch.Tensor) -> torch.Tensor:
if hasattr(torch.nn.functional, 'gelu'):
return torch.nn.functional.gelu(x.float()).type_as(x)
else:
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | null |
185,386 | import torch
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if not export and torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:... | null |
185,387 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules.unfold import unfold1d
class LightweightConv1dTBC(nn.Module):
'''Lightweight Convolution assuming the input is TxBxC
Args:
input_size: # of channels of the input
kernel_size: convolu... | null |
185,388 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from .unfold import unfold1d
class DynamicConv1dTBC(nn.Module):
def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=False,
... | null |
185,389 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from .unfold import unfold1d
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
ret... | null |
185,390 | from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules import (
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
TransformerSentenceEncoderLayer,
)
import random
The provided code snippet includes necessary dependencies for implemen... | Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_... |
185,394 | import torch.nn as nn
from .learned_positional_embedding import LearnedPositionalEmbedding
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
class LearnedPositionalEmbedding(nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_d... | null |
185,395 | import collections
import logging
import os
import re
import shutil
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
def save_checkpoint(args, trainer, epoch_itr, ... | Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. |
185,396 | import collections
import logging
import os
import re
import shutil
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
def load_model_ensemble_and_task(filenames, ar... | Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading |
185,397 | import collections
import logging
import os
import re
import shutil
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
def torch_persistent_save(*args, **kwargs):
... | null |
185,398 | import collections
import logging
import os
import re
import shutil
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
The provided code snippet includes necessary ... | Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's ca... |
185,399 | import collections
import logging
import os
import re
import shutil
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
def load_checkpoint_to_cpu(path, arg_overrides... | Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. |
185,400 | import collections
import logging
import os
import re
import shutil
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
def verify_checkpoint_directory(save_dir: str... | null |
185,402 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def deprecation_warning... | null |
185,403 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def apply_to_sample(f, ... | null |
185,404 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def _get_full_increment... | Helper for getting incremental state for an nn.Module. |
185,405 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def _get_full_increment... | Helper for setting incremental state for an nn.Module. |
185,406 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def load_align_dict(re... | null |
185,407 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def print_embed_overla... | null |
185,408 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
The provided code snip... | Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 |
185,409 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def load_embedding(emb... | null |
185,410 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def replace_unk(hypo_st... | null |
185,411 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
The provided code snip... | Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. |
185,412 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def strip_pad(tensor, ... | null |
185,413 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def buffered_arange(max... | null |
185,414 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def item(tensor):
def ... | null |
185,415 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
The provided code snip... | FP16-compatible function that fills a tensor with -inf. |
185,416 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def item(tensor):
i... | Resolve max position constraints from multiple sources. |
185,417 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
import sys
sys.mo... | null |
185,418 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def softmax(x, dim, on... | null |
185,419 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def log_softmax(x, dim... | null |
185,420 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def get_perplexity(los... | null |
185,421 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def deprecation_warning... | Returns the activation function corresponding to `activation` |
185,422 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def get_available_acti... | null |
185,423 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def eval(model):
i... | null |
185,424 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def has_parameters(mod... | null |
185,425 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def set_torch_seed(see... | null |
185,426 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
The provided code snip... | Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). |
185,427 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def item(tensor):
i... | null |
185,428 | from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
The provided code snip... | Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. |
185,429 | from collections import Counter
import os
from fairseq.tokenizer import tokenize_line
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins | null |
185,430 | import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. I... | null |
185,431 | import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
The provided code snippet includes necessary dependencies for implementing the `filename_to_url` function. Write a Python function `def filename_to... | Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. |
185,432 | import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
The provided code snippet includes necessary dependencies for implementing the `s3_request` function. Write a Python function `def s3_request(func)... | Wrapper function for s3 requests in order to create more helpful error messages. |
185,433 | import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
The provided code snippet includes necessary dependencies for implementing the `read_set_from_file` function. Write a Python function `def read_set... | Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. |
185,434 | import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if... | null |
185,435 | import os
import pickle
import socket
import subprocess
import warnings
import torch
import torch.distributed as dist
from fairseq import utils
def infer_init_method(args):
if args.distributed_init_method is not None:
return
# support torch.distributed.launch
if all(key in os.environ for key in [
... | null |
185,436 | import os
import pickle
import socket
import subprocess
import warnings
import torch
import torch.distributed as dist
from fairseq import utils
def is_master(args):
def suppress_output(is_master):
def get_rank():
def all_reduce(tensor, group=None):
def distributed_init(args):
if args.distributed_world_size == 1:
... | null |
185,437 | import os
import pickle
import socket
import subprocess
import warnings
import torch
import torch.distributed as dist
from fairseq import utils
def get_rank():
return dist.get_rank()
def get_world_size():
return dist.get_world_size()
def all_reduce(tensor, group=None):
if group is None:
group = get_... | Gathers arbitrary data from all nodes into a list. Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python data. Note that *data* must be picklable. Args: data (Any): data from the local worker to be gathered on other workers group (optional): group of the collective max_size (int, optional): maximum ... |
185,440 | import contextlib
import itertools
import os
import sys
import types
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `infer_language_pair` function. Write a Python function `def infer_language_pair(path)` to solve the following problem:
Infer language pair from filenam... | Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx |
185,441 | import contextlib
import itertools
import os
import sys
import types
import numpy as np
class ConcatDataset(FairseqDataset):
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
... | A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use ... |
185,442 | import contextlib
import itertools
import os
import sys
import types
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `numpy_seed` function. Write a Python function `def numpy_seed(seed, *addl_seeds)` to solve the following problem:
Context manager which seeds the NumPy... | Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward |
185,443 | import contextlib
import itertools
import os
import sys
import types
import numpy as np
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_posi... | Filter indices based on their size. Args: indices (List[int]): ordered list of dataset indices dataset (FairseqDataset): fairseq dataset instance max_positions (tuple): filter elements larger than this size. Comparisons are done component-wise. raise_exception (bool, optional): if ``True``, raise an exception if any el... |
185,444 | import contextlib
import itertools
import os
import sys
import types
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `batch_by_size_dep` function. Write a Python function `def batch_by_size_dep( indices, num_tokens_fn, max_tokens=None, max_sentences=None, requi... | Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index max_tokens (int, optional): max number of tokens in each batch (default: No... |
185,445 | import contextlib
import itertools
import os
import sys
import types
import numpy as np
def process_bpe_symbol(sentence: str, bpe_symbol: str):
if bpe_symbol == 'sentencepiece':
sentence = sentence.replace(' ', '').replace('\u2581', ' ').strip()
elif bpe_symbol == '_EOW':
sentence = sentence.re... | null |
185,446 | import contextlib
import itertools
import os
import sys
import types
import numpy as np
def _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
if len(batch) == 0:
return 0
if max_sentences > 0 and len(batch) == max_sentences:
return 1
if max_tokens > 0 and num_tokens > max_tokens:... | null |
185,447 | import numpy as np
import torch
from . import data_utils, FairseqDataset
def collate(
samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False,
input_feeding=True,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.... | null |
185,454 | from functools import lru_cache
import json
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unico... | null |
185,456 | from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap'] | null |
185,457 | from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
def index_file_path(prefix_path):
return prefix_path + '.idx'
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
d... | null |
185,458 | from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
class IndexedDatasetBuilder(objec... | null |
185,459 | from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super()._... | null |
185,460 | from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super()._... | null |
185,466 | import argparse
REGISTRIES = {}
def set_defaults(args, cls):
def setup_registry(
registry_name: str,
base_class=None,
default=None,
):
assert registry_name.startswith('--')
registry_name = registry_name[2:].replace('-', '_')
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
# maintain a regi... | null |
185,467 | import argparse
import copy
import os
import torch
from torch import nn
from fairseq import utils
from fairseq.data import encoders
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, fil... | null |
185,468 | import itertools
import os
from fairseq import options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
data_utils,
indexed_dataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
)
from . import FairseqTask, register_task
def load_lang... | null |
185,469 | from collections import OrderedDict
import os
import torch
from fairseq import options, utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair... | Return language token index. |
185,470 | from collections import OrderedDict
import os
from fairseq.data import (
BacktranslationDataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_gener... | null |
185,471 | from collections import OrderedDict
import os
from fairseq.data import (
BacktranslationDataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_gener... | null |
185,472 | from collections import OrderedDict
import os
from fairseq.data import (
BacktranslationDataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_gener... | Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease # to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 # iterations, then will linearly... |
185,473 | import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_parser(desc, default_task='translation'):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers,... | null |
185,474 | import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_parser(desc, default_task='translation'):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers,... | null |
185,475 | import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_generation_parser(interactive=False, default_task='translation'):
parser = get_parser('Generation', default_task)
add_dataset_args(parser, gen=True)
add_generation_ar... | null |
185,476 | import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_parser(desc, default_task='translation'):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers,... | null |
185,477 | import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_parser(desc, default_task='translation'):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers,... | null |
185,478 | import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default | null |
185,479 | import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
ARCH_MODEL_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
REGISTRIES = {}
TASK_REGISTRY = {}
class FairseqBMUF(FairseqOptimizer):
"""
Implements incremental block distributed data p... | null |
185,480 | import math
from fairseq import utils
from . import FairseqCriterion, register_criterion
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_lo... | null |
185,481 | import math
import torch
import torch.nn.functional as F
from fairseq import utils
from . import FairseqCriterion, register_criterion
The provided code snippet includes necessary dependencies for implementing the `compute_cross_entropy_loss` function. Write a Python function `def compute_cross_entropy_loss(logits, tar... | Function to compute the cross entropy loss. The default value of ignore_index is the same as the default value for F.cross_entropy in pytorch. |
185,483 | from collections import OrderedDict
import json
from numbers import Number
import os
import sys
from fairseq import distributed_utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
class json_progress_bar(progress_bar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None... | null |
185,484 | from collections import OrderedDict
import json
from numbers import Number
import os
import sys
from fairseq import distributed_utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, A... | null |
185,486 | from fairseq.models import register_model, register_model_architecture
from fairseq.models.nonautoregressive_transformer import NATransformerModel
from fairseq.utils import new_arange
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, ... | null |
185,487 | from fairseq.models import register_model, register_model_architecture
from fairseq.models.nonautoregressive_transformer import NATransformerModel
from fairseq.utils import new_arange
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getatt... | null |
185,496 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
TransformerSentenceEncoder,
)
from fairseq.modul... | null |
185,497 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
TransformerSentenceEncoder,
)
from fairseq.modul... | null |
185,498 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
TransformerSentenceEncoder,
)
from fairseq.modul... | null |
185,502 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,503 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,504 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,505 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,506 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,507 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,508 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,509 | from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from... | null |
185,510 | import torch.nn as nn
from fairseq import utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
def bart_larg... | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.