id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
184,826
import logging import math import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR The provided code snippet includes necessary dependencies for implementing the `get_linear_schedule_with_warmup` function. Write a Python function `def get_linear_schedule_with_warmup(optimizer, num_...
Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period.
184,827
import logging import math import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR The provided code snippet includes necessary dependencies for implementing the `get_cosine_with_hard_restarts_schedule_with_warmup` function. Write a Python function `def get_cosine_with_hard_restart...
Create a schedule with a learning rate that decreases following the values of the cosine function with several hard restarts, after a warmup period during which it increases linearly between 0 and 1.
184,828
import logging import numpy as np import tensorflow as tf from .configuration_xlnet import XLNetConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_list The provided code sn...
Implementation of the gelu activation function. XLNet is using OpenAI GPT's gelu Also see https://arxiv.org/abs/1606.08415
184,829
import logging import numpy as np import tensorflow as tf from .configuration_xlnet import XLNetConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_list def swish(x): re...
null
184,830
import argparse import logging from pathlib import Path import fairseq import torch from packaging import version from transformers import BartConfig, BartForMaskedLM, BartForSequenceClassification, BartModel, BartTokenizer SAMPLE_TEXT = " Hello world! cécé herlolip" rename_keys = [ ("model.classification_heads.mnl...
Copy/paste/tweak model's weights to our BERT structure.
184,831
import logging import numpy as np import tensorflow as tf from .configuration_gpt2 import GPT2Config from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import ( TFConv1D, TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, ...
Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied.
184,832
from __future__ import print_function from collections import Counter import string import re import argparse import json import sys def evaluate(dataset, predictions): f1 = exact_match = total = 0 for article in dataset: for paragraph in article['paragraphs']: for qa in paragraph['qas']: ...
null
184,833
import collections import json import logging import math import re import string from transformers.tokenization_bert import BasicTokenizer def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score...
null
184,834
import collections import json import logging import math import re import string from transformers.tokenization_bert import BasicTokenizer def get_raw_scores(examples, preds): """ Computes the exact and f1 scores from the examples and the model predictions """ exact_scores = {} f1_scores = {} f...
null
184,835
from __future__ import print_function from collections import Counter import string import re import argparse import json import sys import unicodedata def evaluate(dataset, predictions, lang): def evaluate_with_path(dataset_file, prediction_file, answer_language): with open(dataset_file) as dataset_file_reader: ...
null
184,836
from __future__ import print_function from collections import Counter import string import re import argparse import json import sys import unicodedata def evaluate(dataset, predictions, lang): f1 = exact_match = total = 0 for article in dataset: for paragraph in article['paragraphs']: for q...
null
184,837
import logging import os from ...file_utils import is_tf_available from .utils import DataProcessor, InputExample, InputFeatures if is_tf_available(): import tensorflow as tf logger = logging.getLogger(__name__) glue_processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedP...
Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processo...
184,838
import logging import os import random from ...file_utils import is_tf_available from .utils import DataProcessor, InputExample, InputFeatures if is_tf_available(): import tensorflow as tf logger = logging.getLogger(__name__) xglue_processors = { "xnli": XnliProcessor, "pawsx": PawsxProcessor, "qam": Qa...
Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processo...
184,839
import logging import os import random from ...file_utils import is_tf_available from .utils import DataProcessor, InputExample, InputFeatures if is_tf_available(): import tensorflow as tf logger = logging.getLogger(__name__) xglue_processors = { "xnli": XnliProcessor, "pawsx": PawsxProcessor, "qam": Qa...
Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processo...
184,840
import json import logging import os from functools import partial from multiprocessing import Pool, cpu_count import numpy as np from tqdm import tqdm from ...file_utils import is_tf_available, is_torch_available from ...tokenization_bert import whitespace_tokenize from .utils import DataProcessor from ..metrics.squad...
Check if this is the 'max context' doc span for the token.
184,841
import json import logging import os from functools import partial from multiprocessing import Pool, cpu_count import numpy as np from tqdm import tqdm from ...file_utils import is_tf_available, is_torch_available from ...tokenization_bert import whitespace_tokenize from .utils import DataProcessor from ..metrics.squad...
null
184,842
import logging import os import random from ...file_utils import is_tf_available from .utils import DataProcessor, InputExample, InputFeatures if is_tf_available(): import tensorflow as tf logger = logging.getLogger(__name__) xtreme_processors = { "xnli": XnliProcessor, "pawsx": PawsxProcessor, } xtreme_out...
Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processo...
184,843
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel The provided code snippet includes necessary dependencies for implementing the `convert_pytorch_checkpoint_to_tf` function. Write a Python function `def convert_pytorch_checkpoint_to_tf(model: BertModel...
:param model:BertModel Pytorch model instance to be converted :param ckpt_dir: Tensorflow model directory :param model_name: model name :return: Currently supported HF models: Y BertModel N BertForMaskedLM N BertForPreTraining N BertForMultipleChoice N BertForNextSentencePrediction N BertForSequenceClassification N Ber...
184,844
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F from .activations import gelu_new, swish from .configuration_xlnet import XLNetConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils i...
Load tf checkpoints in a pytorch model
184,845
import itertools import logging import math import numpy as np import tensorflow as tf from .configuration_xlm import XLMConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_l...
null
184,846
import itertools import logging import math import numpy as np import tensorflow as tf from .configuration_xlm import XLMConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_l...
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see ...
184,847
import itertools import logging import math import numpy as np import tensorflow as tf from .configuration_xlm import XLMConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_l...
Generate hidden states mask, and optionally an attention mask.
184,848
import argparse import logging import os import torch from transformers import ( CONFIG_NAME, WEIGHTS_NAME, XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) GLUE_TASKS_NUM_LABELS = { "cola": 2, "mnli": 3, "mrpc"...
null
184,849
import logging import math import os import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from .activations import gelu_new from .configuration_gpt2 import GPT2Config from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import Conv1D, PreTrainedModel, S...
Load tf checkpoints in a pytorch model
184,850
import itertools import logging import math import numpy as np import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F from .activations import gelu from .configuration_xlm import XLMConfig from .file_utils import add_start_docstrings, add_start_docstrings_t...
null
184,851
import itertools import logging import math import numpy as np import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F from .activations import gelu from .configuration_xlm import XLMConfig from .file_utils import add_start_docstrings, add_start_docstrings_t...
Generate hidden states mask, and optionally an attention mask.
184,852
import math import torch import torch.nn.functional as F if torch.__version__ < "1.4.0": gelu = _gelu_python else: gelu = F.gelu def swish(x): return x * torch.sigmoid(x)
null
184,853
import math import torch import torch.nn.functional as F if torch.__version__ < "1.4.0": gelu = _gelu_python else: gelu = F.gelu The provided code snippet includes necessary dependencies for implementing the `_gelu_python` function. Write a Python function `def _gelu_python(x)` to solve the following problem: ...
Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in torch.nn....
184,854
import math import torch import torch.nn.functional as F if torch.__version__ < "1.4.0": gelu = _gelu_python else: gelu = F.gelu The provided code snippet includes necessary dependencies for implementing the `gelu_new` function. Write a Python function `def gelu_new(x)` to solve the following problem: Implemen...
Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415
184,855
import math import torch import torch.nn.functional as F ACT2FN = { "relu": F.relu, "swish": swish, "gelu": gelu, "tanh": F.tanh, "gelu_new": gelu_new, } def get_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyEr...
null
184,856
import copy import logging import math import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from .activations import gelu from .configuration_distilbert import DistilBertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils impo...
null
184,857
import logging import unicodedata import six from .tokenization_xlm import XLMTokenizer The provided code snippet includes necessary dependencies for implementing the `convert_to_unicode` function. Write a Python function `def convert_to_unicode(text)` to solve the following problem: Converts `text` to Unicode (if it'...
Converts `text` to Unicode (if it's not already), assuming UTF-8 input.
184,858
import logging import numpy as np import tensorflow as tf from .configuration_bert import BertConfig from .file_utils import MULTIPLE_CHOICE_DUMMY_INPUTS, add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list The provided code snippet inclu...
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see ...
184,859
import logging import numpy as np import tensorflow as tf from .configuration_bert import BertConfig from .file_utils import MULTIPLE_CHOICE_DUMMY_INPUTS, add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list The provided code snippet inclu...
Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied.
184,860
import logging import numpy as np import tensorflow as tf from .configuration_bert import BertConfig from .file_utils import MULTIPLE_CHOICE_DUMMY_INPUTS, add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list def swish(x): return x * tf...
null
184,861
import logging import math import numpy as np import tensorflow as tf from .configuration_distilbert import DistilBertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, get_initializer, shape_list The provided code...
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see ...
184,862
import logging import math import numpy as np import tensorflow as tf from .configuration_distilbert import DistilBertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, get_initializer, shape_list The provided code...
Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied.
184,863
import argparse import logging import os import pickle import sys import torch import transformers.tokenization_transfo_xl as data_utils from transformers import ( CONFIG_NAME, WEIGHTS_NAME, TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl, ) from transformers.tokenization_transf...
null
184,864
import argparse import logging import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertS...
Copy/paste/tweak roberta's weights to our BERT structure.
184,865
import copy import itertools import logging import math import os import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss from .configuration_t5 import T5Config from .file_utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings from .modeling_utils import PreTrainedMo...
Load tf checkpoints in a pytorch model.
184,866
import json import logging import math import os import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from .activations import gelu_new, swish from .configuration_openai import OpenAIGPTConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import ...
Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
184,867
import argparse import json import logging import numpy import torch from transformers import CONFIG_NAME, WEIGHTS_NAME from transformers.tokenization_xlm import VOCAB_FILES_NAMES VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } def convert_xlm_checkpoint_to_pytorch(xlm_checkpo...
null
184,868
import logging import os import re import numpy logger = logging.getLogger(__name__) def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False): """ Load pytorch state_dict in a TF 2.0 model. """ try: import torch # noqa: F401 import tensorflow ...
Load pytorch checkpoints in a TF 2.0 model
184,869
import logging import os import re import numpy def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False): """ Load pytorch state_dict in a TF 2.0 model. """ try: import torch # noqa: F401 import tensorflow as tf # noqa: F401 from tens...
Load pytorch checkpoints in a TF 2.0 model
184,870
import logging import os import re import numpy logger = logging.getLogger(__name__) def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False): """ Load TF 2.0 model in a pytorch model """ weights = tf_model.weights return load_tf2_weights_in_pytorch_model(pt_model, weights, allo...
Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
184,871
import argparse import logging import os from transformers import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,...
null
184,872
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,873
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,874
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,875
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,876
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,877
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,878
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,879
import logging import torch.nn as nn from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForSequenceClassification, RobertaForMultiTaskSequenceClassification, Roberta...
null
184,880
import logging import math import os import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.configuration_albert import AlbertConfig from transformers.modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer from transformers.modeling_utils import P...
Load tf checkpoints in a pytorch model.
184,881
import logging import math import os import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .activations import gelu, gelu_new, swish from .configuration_bert import BertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTr...
Load tf checkpoints in a pytorch model.
184,882
import logging import math import os import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .activations import gelu, gelu_new, swish from .configuration_bert import BertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTr...
null
184,883
import logging import os import typing import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .activations import get_activation from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, TF_WEIGHTS_N...
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering)...
184,884
import logging import os import typing import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .activations import get_activation from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, TF_WEIGHTS_N...
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor:
184,885
import logging import os import typing import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .activations import get_activation from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, TF_WEIGHTS_N...
Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads.
184,886
import copy import itertools import json import logging import os import re import random from collections import defaultdict from contextlib import contextmanager from tokenizers.implementations import BaseTokenizer from .file_utils import cached_path, hf_bucket_url, is_remote_url, is_tf_available, is_torch_available ...
This contextmanager is in charge of defining the truncation and the padding strategies and then restore the tokenizer settings afterwards. This contextmanager assumes the provider tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, the...
184,887
import json import logging import os import re import sys import unicodedata from typing import List, Optional import sacremoses as sm from .tokenization_utils import PreTrainedTokenizer The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def ge...
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings)
184,888
import json import logging import os import re import sys import unicodedata from typing import List, Optional import sacremoses as sm from .tokenization_utils import PreTrainedTokenizer The provided code snippet includes necessary dependencies for implementing the `lowercase_and_remove_accent` function. Write a Pytho...
Lowercase and strips accents from a piece of text based on https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py
184,889
import json import logging import os import re import sys import unicodedata from typing import List, Optional import sacremoses as sm from .tokenization_utils import PreTrainedTokenizer The provided code snippet includes necessary dependencies for implementing the `replace_unicode_punct` function. Write a Python func...
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
184,890
import json import logging import os import re import sys import unicodedata from typing import List, Optional import sacremoses as sm from .tokenization_utils import PreTrainedTokenizer The provided code snippet includes necessary dependencies for implementing the `remove_non_printing_char` function. Write a Python f...
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
184,891
import json import logging import os import re import sys import unicodedata from typing import List, Optional import sacremoses as sm from .tokenization_utils import PreTrainedTokenizer The provided code snippet includes necessary dependencies for implementing the `romanian_preprocessing` function. Write a Python fun...
Sennrich's WMT16 scripts for Romanian preprocessing, used by model `xlm-mlm-enro-1024`
184,892
import json import logging import os from functools import lru_cache import regex as re from tokenizers import ByteLevelBPETokenizer from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. W...
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like...
184,893
import json import logging import os from functools import lru_cache import regex as re from tokenizers import ByteLevelBPETokenizer from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a ...
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
184,894
The provided code snippet includes necessary dependencies for implementing the `prepare_encoder_decoder_model_kwargs` function. Write a Python function `def prepare_encoder_decoder_model_kwargs(**kwargs)` to solve the following problem: Prepare the encoder and decoder's keyword arguments. Keyword arguments come in 3 ...
Prepare the encoder and decoder's keyword arguments. Keyword arguments come in 3 flavors: - encoder-specific (prefixed by `encoder_`) - decoder-specific (prefixed by `decoder_`) - those that apply to the model as whole. We let the specific kwargs override the common ones in case of conflict.
184,895
import collections import logging import os import unicodedata from typing import List, Optional from tokenizers import BertWordPieceTokenizer from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast The provided code snippet includes necessary dependencies for implementing the `load_vocab` functio...
Loads a vocabulary file into a dictionary.
184,896
import collections import logging import os import unicodedata from typing import List, Optional from tokenizers import BertWordPieceTokenizer from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` fun...
Checks whether `chars` is a whitespace character.
184,897
import collections import logging import os import unicodedata from typing import List, Optional from tokenizers import BertWordPieceTokenizer from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast The provided code snippet includes necessary dependencies for implementing the `_is_control` functi...
Checks whether `chars` is a control character.
184,898
import collections import logging import os import unicodedata from typing import List, Optional from tokenizers import BertWordPieceTokenizer from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` fu...
Checks whether `chars` is a punctuation character.
184,900
from __future__ import absolute_import, division, print_function import argparse from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer import os from collections import defaultdict import csv import random import os import shutil import json def panx_preprocess(args): def _process_one_file(infile...
null
184,902
from __future__ import absolute_import, division, print_function import argparse from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer import os from collections import defaultdict import csv import random import os import shutil import json def udpos_preprocess(args): def _read_one_file(file): ...
null
184,903
from __future__ import absolute_import, division, print_function import argparse from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer import os from collections import defaultdict import csv import random import os import shutil import json def pawsx_preprocess(args): def _preprocess_one_file(in...
null
184,904
from __future__ import absolute_import, division, print_function import argparse from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer import os from collections import defaultdict import csv import random import os import shutil import json def xnli_preprocess(args): def _preprocess_file(infile,...
null
184,906
from __future__ import absolute_import, division, print_function import argparse from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer import os from collections import defaultdict import csv import random import os import shutil import json def xquad_preprocess(args): # Remove the test annotatio...
null
184,907
from __future__ import absolute_import, division, print_function import argparse from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer import os from collections import defaultdict import csv import random import os import shutil import json def mlqa_preprocess(args): # Remove the test annotation...
null
184,910
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.models import create_model from optim_factory import create_optimizer from datasets import build_beit_pretraining_dataset from engine_for_pretraining ...
null
184,911
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.models import create_model from optim_factory import create_optimizer from datasets import build_beit_pretraining_dataset from engine_for_pretraining ...
null
184,916
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from modeling_discrete_vae import Dalle_VAE, Discr...
null
184,917
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from modeling_discrete_vae import Dalle_VAE, Discr...
null
184,918
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from modeling_discrete_vae import Dalle_VAE, Discr...
null
184,919
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from modeling_discrete_vae import Dalle_VAE, Discr...
null
184,920
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from modeling_discrete_vae import Dalle_VAE, Discr...
null
184,921
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from modeling_discrete_vae import Dalle_VAE, Discr...
null
184,923
import torch from torch import optim as optim from timm.optim.adafactor import Adafactor from timm.optim.adahessian import Adahessian from timm.optim.adamp import AdamP from timm.optim.lookahead import Lookahead from timm.optim.nadam import Nadam from timm.optim.novograd import NovoGrad from timm.optim.nvnovograd impor...
null
184,924
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input...
null
184,925
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input...
null
184,926
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input...
null
184,927
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input...
null
184,928
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input...
null
184,929
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.data.mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils ...
null
184,930
import attr import math import torch import torch.nn as nn import torch.nn.functional as F logit_laplace_eps: float = 0.1 def map_pixels(x: torch.Tensor) -> torch.Tensor: if x.dtype != torch.float: raise ValueError('expected input to have type float') return (1 - 2 * logit_laplace_eps) * x + logit_laplace_eps
null
184,938
import io import os import os.path as osp import pkgutil import time import warnings from collections import OrderedDict from importlib import import_module from tempfile import TemporaryDirectory import torch import torchvision from torch.optim import Optimizer from torch.utils import model_zoo from torch.nn import fu...
Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. m...
184,940
import os import torch from torchvision import datasets, transforms from timm.data.constants import \ IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from transforms import RandomResizedCropAndInterpolationWithTwoPic from timm.data import create_transform from dall_e.uti...
null
184,941
import os import torch from torchvision import datasets, transforms from timm.data.constants import \ IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from transforms import RandomResizedCropAndInterpolationWithTwoPic from timm.data import create_transform from dall_e.uti...
null
184,945
import math import torch import torch.nn as nn from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ class VisionTransformerForMaskedImageModeling(nn.M...
null
184,946
import math import torch import torch.nn as nn from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ class VisionTransformerForMaskedImageModeling(nn.M...
null