id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
185,097
import itertools import logging import os import sys from typing import Any, List, Optional, Union import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils from fairseq.data.fairseq_d...
null
185,098
import itertools import logging import os import sys from typing import Any, List, Optional, Union import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils from fairseq.data.fairseq_d...
null
185,099
import itertools import logging import os import sys from typing import Any, List, Optional, Union import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils from fairseq.data.fairseq_d...
Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will b...
185,100
import math import numpy as np import torch from fairseq.data import FairseqDataset, data_utils def collate( samples, pad_idx, eos_idx, vocab, left_pad_source=False, left_pad_target=False, input_feeding=True, pad_to_length=None, ): assert input_feeding if len(samples) == 0: ...
null
185,101
import itertools import logging import os from typing import Any, List, Optional import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils, Dictionary from fairseq.data.fairseq_dataset...
Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
185,102
import itertools import logging import os from typing import Any, List, Optional import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils, Dictionary from fairseq.data.fairseq_dataset...
null
185,103
import itertools import logging import os from typing import Any, List, Optional import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils, Dictionary from fairseq.data.fairseq_dataset...
null
185,104
import itertools import logging import os from typing import Any, List, Optional import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils, Dictionary from fairseq.data.fairseq_dataset...
null
185,105
import itertools import logging import os from typing import Any, List, Optional import numpy as np import torch import torch.nn.functional as F import librosa from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform from fairseq.data import data_utils, Dictionary from fairseq.data.fairseq_dataset...
Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will b...
185,106
import math from argparse import Namespace from dataclasses import dataclass, field from omegaconf import II from typing import Optional import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import Fairs...
null
185,107
from typing import Dict, List import numpy as np import torch import torch.nn as nn import contextlib from fairseq import utils from fairseq.models import ( FairseqEncoder, ) from fairseq.modules import ( FairseqDropout, LayerNorm, TransformerEncoderLayer, ) from torch import Tensor from .transformer_la...
null
185,108
from fairseq.models import ( register_model_architecture, ) from fairseq.models.transformer_lm import base_lm_architecture def base_lm_architecture(args): def transformer_lm_t5(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1280) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_...
null
185,109
import logging from ast import literal_eval from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from .modul...
null
185,110
import logging from ast import literal_eval from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from .modul...
null
185,111
import logging from ast import literal_eval from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from .modul...
null
185,112
import ast import logging import os import os.path as op import sys from argparse import Namespace import numpy as np import torch from fairseq import checkpoint_utils, options, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from omegaconf import...
null
185,113
import ast import logging import os import os.path as op import sys from argparse import Namespace import numpy as np import torch from fairseq import checkpoint_utils, options, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from omegaconf import...
null
185,137
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
Parse boolean arguments from the command line.
185,138
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,139
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,140
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,141
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,142
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,143
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,144
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,145
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,146
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,147
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,148
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,149
import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from to...
null
185,150
from torch import optim as optim from timm.optim.lookahead import Lookahead import json def get_num_layer_for_vit(var_name, num_max_layer): if "embed" in var_name: return 0 elif var_name in ( "cls_token", "mask_token", "pos_embed", "language_pos_embed", "word_embeddings.weight", "visio...
null
185,153
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForImageClassification(BEiT3Wrapper): def __init__( self, ...
null
185,154
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForImageClassification(BEiT3Wrapper): def __init__( self, ...
null
185,155
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForVisualReasoning(BEiT3Wrapper): def __init__( self, ...
null
185,156
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForVisualReasoning(BEiT3Wrapper): def __init__( self, ar...
null
185,157
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper): def __init__( self, ...
null
185,158
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper): def __init__( self, ...
null
185,159
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper): def __init__( self, ...
null
185,160
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper): def __init__( self, ...
null
185,161
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper): def __init__( self, ...
null
185,162
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForCaptioning(BEiT3Wrapper): def __init__( self, args, ...
null
185,163
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForCaptioning(BEiT3Wrapper): def __init__( self, args, ...
null
185,164
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForCaptioning(BEiT3Wrapper): def __init__( self, ...
null
185,165
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForRetrieval(BEiT3Wrapper): def __init__( self, args, ...
null
185,166
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForRetrieval(BEiT3Wrapper): def __init__( self, args, ...
null
185,167
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class BEiT3ForRetrieval(BEiT3Wrapper): def __init__( self, args, ...
null
185,168
import re contractions = { "aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", "couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've": ...
null
185,169
import os import json import random import torch import glob from collections import defaultdict, Counter from torchvision import transforms from torchvision.datasets.folder import default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD...
null
185,170
import os import json import random import torch import glob from collections import defaultdict, Counter from torchvision import transforms from torchvision.datasets.folder import default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD...
null
185,171
import os import json import random import torch import glob from collections import defaultdict, Counter from torchvision import transforms from torchvision.datasets.folder import default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD...
null
185,172
import os import json import random import torch import glob from collections import defaultdict, Counter from torchvision import transforms from torchvision.datasets.folder import default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD...
null
185,173
import math import sys import json from typing import Iterable, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.utils import ModelEma from timm.utils import accuracy, ModelEma from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from datasets import get_sentence...
null
185,174
import math import sys import json from typing import Iterable, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.utils import ModelEma from timm.utils import accuracy, ModelEma from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from datasets import get_sentence...
null
185,175
import math import sys import json from typing import Iterable, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.utils import ModelEma from timm.utils import accuracy, ModelEma from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from datasets import get_sentence...
null
185,176
import math import torch import torch.nn as nn from timm.models.layers import trunc_normal_ as __call_trunc_normal_ from torchscale.model.BEiT3 import BEiT3 from torchscale.architecture.config import EncoderConfig def trunc_normal_(tensor, mean=0., std=1.): __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, ...
null
185,177
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.data.mixup import Mixup from timm.models import create_model from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter...
null
185,178
from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import json import numpy as np import torch from seqeval.metrics import f1_score, precision_score, recall_score from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, ...
Train the model
185,179
from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import json import time import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch....
Train the model
185,180
from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import json import numpy as np import torch from sklearn.metrics import matthews_corrcoef, f1_score from sklearn.metrics import cohen_kappa_score, precision_score, recall_score, precision_...
Train the model
185,181
import logging import os from tqdm import * def get_labels(path): if path: with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-OR...
null
185,182
import logging import os import csv import sys import copy import json from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score from sklearn.preprocessing import MultiLabelBinarizer logger = logging.getLogger(__name__) class InputFeatures(object): """ A single set of f...
Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processo...
185,183
import logging import os import csv import sys import copy import json from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score from sklearn.preprocessing import MultiLabelBinarizer def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, l...
null
185,184
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import sys import unicodedata import six import logging from six.moves import range import time import glob _ALPHANUMERIC_CHAR_SET = set( six.unichr(i) for i in range(sys.maxunicode) ...
Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string
185,185
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import sys import unicodedata import six import logging from six.moves import range import time import glob _native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s) lo...
Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Retu...
185,186
from __future__ import absolute_import from __future__ import division from numpy.core.fromnumeric import argsort from text_encoder import SubwordTextEncoder import tokenizer import tempfile import argparse from transformers import BertTokenizer import random import math import numpy as np def merge_output_file_with_be...
@description : The function to get the incremental vocabulary for @param : @Returns :
185,187
from __future__ import absolute_import from __future__ import division from numpy.core.fromnumeric import argsort from text_encoder import SubwordTextEncoder import tokenizer import tempfile import argparse from transformers import BertTokenizer import random import math import numpy as np def get_args(): parser =...
null
185,188
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from itertools import chain import re import time import logging import six from six.moves import range logger = logging.getLogger(__name__) def is_unicode(s): return isinstance(s, six.text...
null
185,189
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from itertools import chain import re import time import logging import six from six.moves import range if six.PY2: RESERVED_TOKENS_BYTES = RESERVED_TOKENS else: RESERVED_TOKENS_BYTES = [...
null
185,190
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from itertools import chain import re import time import logging import six from six.moves import range if six.PY2: RESERVED_TOKENS_BYTES = RESERVED_TOKENS else: RESERVED_TOKENS_BYTES = [...
Escape away underscores and OOV characters and append '_'. This allows the token to be expressed as the concatenation of a list of subtokens from the vocabulary. The underscore acts as a sentinel which allows us to invertibly concatenate multiple such lists. Args: token: A unicode string to be escaped. alphabet: A set ...
185,191
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from itertools import chain import re import time import logging import six from six.moves import range if six.PY2: RESERVED_TOKENS_BYTES = RESERVED_TOKENS else: RESERVED_TOKENS_BYTES = [...
null
185,192
from __future__ import absolute_import from __future__ import division from text_encoder import SubwordTextEncoder import tokenizer import os import tempfile import tensorflow as tf def merge_output_file_with_bert_vocab(output_filename, bert_vocab, temp_path): writer = open(output_filename, 'w', encoding='utf-8') ...
null
185,193
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.funsd import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.d...
null
185,194
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.xfun import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.da...
null
185,195
import logging import os import sys import numpy as np from datasets import ClassLabel, load_dataset import layoutlmft.data.datasets.xfun import transformers from layoutlmft import AutoModelForRelationExtraction from layoutlmft.data.data_args import XFUNDataTrainingArguments from layoutlmft.data.data_collator import Da...
null
185,205
from __future__ import print_function from bleu.bleu import Bleu from meteor.meteor import Meteor from rouge.rouge import Rouge from cider.cider import Cider from collections import defaultdict from argparse import ArgumentParser import string import sys _tok_dict = {"(": "-lrb-", ")": "-rrb-", "[": "-lsb-...
null
185,206
from __future__ import print_function from bleu.bleu import Bleu from meteor.meteor import Meteor from rouge.rouge import Rouge from cider.cider import Cider from collections import defaultdict from argparse import ArgumentParser import string import sys def detokenize(tk_list): r_list = [] for tk in tk_list: ...
Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt
185,207
from __future__ import print_function from bleu.bleu import Bleu from meteor.meteor import Meteor from rouge.rouge import Rouge from cider.cider import Cider from collections import defaultdict from argparse import ArgumentParser import string import sys def fix_tokenization(text): input_tokens = text.split() o...
Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt
185,208
import torch from torch.nn import DataParallel from torch.cuda._utils import _get_device_index from torch.nn.parallel._functions import Scatter from itertools import chain def scatter_imbalance(inputs, target_gpus, dim=0): r""" Slices tensors into approximately equal chunks and distributes them across given...
r"""Scatter with support for kwargs dictionary
185,209
import os import logging import shutil import tempfile import json from urllib.parse import urlparse from pathlib import Path from typing import Optional, Tuple, Union, IO, Callable, Set from hashlib import sha256 from functools import wraps from tqdm import tqdm import boto3 from botocore.exceptions import ClientError...
Return the url and etag (which may be ``None``) stored for `filename`. Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
185,210
import os import logging import shutil import tempfile import json from urllib.parse import urlparse from pathlib import Path from typing import Optional, Tuple, Union, IO, Callable, Set from hashlib import sha256 from functools import wraps from tqdm import tqdm import boto3 from botocore.exceptions import ClientError...
Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path.
185,214
import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ from collections import defaultdict from torch._six import container_abcs from copy import deepcopy from itertools import chain def warmup_cosine(x, warmup=0.002): if x <...
null
185,215
import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ from collections import defaultdict from torch._six import container_abcs from copy import deepcopy from itertools import chain def warmup_constant(x, warmup=0.002): if x...
null
185,216
import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ from collections import defaultdict from torch._six import container_abcs from copy import deepcopy from itertools import chain def warmup_linear(x, warmup=0.002): if x <...
null
185,217
import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ from collections import defaultdict from torch._six import container_abcs from copy import deepcopy from itertools import chain def find_state_dict_subset_finetune(org_state_...
null
185,218
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import os import logging from .file_utils import cached_path The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a P...
Loads a vocabulary file into a dictionary.
185,223
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from scipy.stats import truncnorm import torch from torch import nn from torch.nn impo...
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
185,224
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from scipy.stats import truncnorm import torch from torch import nn from torch.nn impo...
null
185,225
from random import randint, shuffle, choice from random import random as rand import math import torch from biunilm.loader_utils import get_random_word, batch_list_to_batch_tensors, Pipeline def truncate_tokens_pair(tokens_a, tokens_b, max_len, max_len_a=0, max_len_b=0, trunc_seg=None, always_truncate_tail=False): ...
null
185,226
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import argparse import math from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler from torch.utils.data.distrib...
null
185,227
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import argparse import math from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler from torch.utils.data.distrib...
null
185,228
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import math import json import argparse import random from pathlib import Path from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import Rand...
null
185,229
from random import randint, shuffle from random import random as rand import numpy as np import torch import torch.utils.data def get_random_word(vocab_words): i = randint(0, len(vocab_words)-1) return vocab_words[i]
null
185,230
from random import randint, shuffle from random import random as rand import numpy as np import torch import torch.utils.data def batch_list_to_batch_tensors(batch): batch_tensors = [] for x in zip(*batch): if x[0] is None: batch_tensors.append(None) elif isinstance(x[0], torch.Tens...
null
185,231
from random import randint, shuffle from random import random as rand import numpy as np import torch import torch.utils.data def _get_word_split_index(tokens, st, end): split_idx = [] i = st while i < end: if (not tokens[i].startswith('##')) or (i == st): split_idx.append(i) i ...
null
185,232
from random import randint, shuffle from random import random as rand import numpy as np import torch import torch.utils.data def _expand_whole_word(tokens, st, end): new_st, new_end = st, end while (new_st >= 0) and tokens[new_st].startswith('##'): new_st -= 1 while (new_end < len(tokens)) and tok...
null
185,233
import pickle import math import argparse import glob from pathlib import Path from tqdm import tqdm import unicodedata from pytorch_pretrained_bert.tokenization import BertTokenizer def read_traces_from_file(file_name): with open(file_name, "rb") as fin: meta = pickle.load(fin) num_samples = meta[...
null
185,234
import pickle import math import argparse import glob from pathlib import Path from tqdm import tqdm import unicodedata from pytorch_pretrained_bert.tokenization import BertTokenizer def get_best_sequence(sample, eos_id, pad_id, length_penalty=None, alpha=None, expect=None, min_len=None): # if not any((length_pena...
null
185,235
import pickle import math import argparse import glob from pathlib import Path from tqdm import tqdm import unicodedata from pytorch_pretrained_bert.tokenization import BertTokenizer def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-...
null
185,236
import pickle import math import argparse import glob from pathlib import Path from tqdm import tqdm import unicodedata from pytorch_pretrained_bert.tokenization import BertTokenizer def simple_postprocess(tk_list): # truncate duplicate punctuations while tk_list and len(tk_list) > 4 and len(tk_list[-1]) == 1 ...
null
185,238
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np im...
null