code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def get_corpus_freq(dataset, filepath, filetype='tsv'):
"""
Take corpus, split it into sentences, and extract word frequencies.
Write frequencies to `filepath` as a tsv. Only write the first
MAX_SENTENCEPIECE_SENTENCES most common words to the file.
"""
nltk.download('punkt', download_dir="./nltk")
if filetype == 'tsv':
delimiter = '\t'
else:
delimiter = ','
print("compute corpus frequency\n", flush=True)
total_sentence_count = 0
maxlen = 0
freqs = {}
for entry in dataset:
if isinstance(entry, dict):
entry = entry['text']
lines = entry.strip().split('\n')
for line in lines:
sentences = nltk_tokenize.sent_tokenize(line)
total_sentence_count += len(sentences)
for sentence in sentences:
maxlen = max(len(line), maxlen)
for word in sentence.split():
if word not in freqs:
freqs[word] = 0
freqs[word] += 1
print("length of freqs before truncating " + str(len(freqs)), flush=True)
print("file path for freq " + str(filepath), flush=True)
freqs_sorted = {}
counter = 0
for word, count in sorted(freqs.items(), key=lambda x: x[1], reverse=True):
if counter >= MAX_SENTENCEPIECE_SENTENCES:
break
counter += 1
freqs_sorted[word] = count
print("length of freqs after trancating " + str(len(freqs_sorted)), flush=True)
with open(filepath, 'w') as f:
writer = csv.writer(f, delimiter=delimiter)
for k, v in freqs_sorted.items():
writer.writerow([str(k), str(v)])
return total_sentence_count, maxlen
|
Take corpus, split it into sentences, and extract word frequencies.
Write frequencies to `filepath` as a tsv. Only write the first
MAX_SENTENCEPIECE_SENTENCES most common words to the file.
|
get_corpus_freq
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def load_spm_model(self):
"""load sentencepiece model and parse vocab"""
if not os.path.exists(self.spm_model) and not self.spm_model.endswith('.model'):
self.spm_model = self.spm_model + '.model'
self.sp = spm.SentencePieceProcessor()
self.sp.Load(self.spm_model)
self.vocab_size = self.num_text_tokens = len(self.sp)
self._tokens = [self.IdToToken(t) for t in range(self.vocab_size)]
self._vocab = {t: i for i, t in enumerate(self._tokens)}
|
load sentencepiece model and parse vocab
|
load_spm_model
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def Train(self, corpus, num_text_tokens):
"""train sentencepiece model on corpus using word frequencies"""
self.num_text_tokens = num_text_tokens
use_model_path = self.spm_model
random_hash = str(random.randint(0, 2147483647))
if use_model_path is None:
use_model_path = random_hash
if use_model_path.endswith('.model'):
use_model_path = use_model_path[:use_model_path.rfind('.model')]
input_path = use_model_path + '.tsv.' + random_hash
line_count, maxlenline = get_corpus_freq(corpus, input_path)
line_count = min(line_count, MAX_SENTENCEPIECE_SENTENCES)
print('line count used as input_sentence_size ', line_count, flush=True)
print('training sentencepiece model', flush=True)
train_string = '--input={file_path} --model_prefix={model_prefix} --vocab_size={vocab_size}' \
+ ' --model_type={model_type} --character_coverage={character_coverage} ' \
+ '--input_sentence_size={input_sentence_size} ' \
+ '--input_format=tsv'
train_string = train_string.format(file_path=input_path, model_prefix=use_model_path,
vocab_size=num_text_tokens,
model_type=self.model_type, character_coverage=self.character_coverage,
input_sentence_size=int(line_count)) # , #)#,
print("calling spm.SentencePieceTrainer.Train(%s)" % (train_string), flush=True)
spm.SentencePieceTrainer.Train(train_string)
os.remove(input_path)
self.spm_model = use_model_path + '.model'
print('sentencepiece model written to ' + self.spm_model, flush=True)
|
train sentencepiece model on corpus using word frequencies
|
Train
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens):
"""converts sentencepiece tokens to a text string"""
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return self.sp.DecodeTokens(Tokens)
|
converts sentencepiece tokens to a text string
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeIds(self, Ids, type_token=False):
"""converts ids to wordpiece tokens and joins them as a text string"""
if type_token:
return ' '.join(Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token for Id in Ids)
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
Tokens = []
for Id in Ids:
if Id in self.command_id_map:
Tokens.append(self.command_id_map[Id].token)
elif Id in self.text_tokenizer.ids_to_tokens:
Tokens.append(self.text_tokenizer.ids_to_tokens[Id])
new_tokens = []
for token in Tokens:
if token.startswith('##') and len(new_tokens) > 0:
new_tokens[-1] += token[2:]
else:
new_tokens.append(token)
return ' '.join(new_tokens)
|
converts ids to wordpiece tokens and joins them as a text string
|
DecodeIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens, type_token=False):
"""converts wordpiece tokens to a text string"""
if type_token:
return ' '.join(t.token if isinstance(t, TypeToken) else t for t in Tokens)
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return ' '.join(Tokens)
|
converts wordpiece tokens to a text string
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
|
get_pairs
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info("loading special tokens file {}".format(special_tokens_file))
# redirect to the cache, if necessary
# try:
# resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
# resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
# except EnvironmentError:
# logger.error(
# "Model name '{}' was not found in model name list ({}). "
# "We assumed '{}' was a path or url but couldn't find files {} and {} "
# "at this path or url.".format(
# pretrained_model_name_or_path,
# ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
# pretrained_model_name_or_path,
# vocab_file, merges_file))
# return None
# if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
# logger.info("loading vocabulary file {}".format(vocab_file))
# logger.info("loading merges file {}".format(merges_file))
# else:
# logger.info("loading vocabulary file {} from cache at {}".format(
# vocab_file, resolved_vocab_file))
# logger.info("loading merges file {} from cache at {}".format(
# merges_file, resolved_merges_file))
resolved_vocab_file = vocab_file
resolved_merges_file = merges_file
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and 'special_tokens' not in kwargs:
special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
else:
special_tokens = kwargs.pop('special_tokens', [])
tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
return tokenizer
|
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
|
from_pretrained
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def set_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
logger.info("Special tokens {}".format(self.special_tokens))
|
Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
|
set_special_tokens
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def convert_tokens_to_ids(self, tokens):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(len(ids), self.max_len)
)
return ids
|
Converts a sequence of tokens into ids using the vocab.
|
convert_tokens_to_ids
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
|
Converts a sequence of ids in BPE tokens using the vocab.
|
convert_ids_to_tokens
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
index = len(self.encoder)
with open(special_tokens_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(special_tokens_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file, merge_file, special_tokens_file
|
Save the tokenizer vocabulary and merge files to a directory.
|
save_vocabulary
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
|
Runs basic whitespace cleaning and splitting on a piece of text.
|
whitespace_tokenize
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
|
Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
|
__init__
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
|
Converts a sequence of tokens into ids using the vocab.
|
convert_tokens_to_ids
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
|
Converts a sequence of ids in wordpiece tokens using the vocab.
|
convert_ids_to_tokens
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
|
Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
|
__init__
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
|
Strips accents from a piece of text.
|
_run_strip_accents
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
|
Splits punctuation on a piece of text.
|
_run_split_on_punc
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
|
Adds whitespace around any CJK character.
|
_tokenize_chinese_chars
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
|
Checks whether CP is the codepoint of a CJK character.
|
_is_chinese_char
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
|
Performs invalid character removal and whitespace cleanup on text.
|
_clean_text
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
|
tokenize
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
|
Checks whether `chars` is a whitespace character.
|
_is_whitespace
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
|
Checks whether `chars` is a control character.
|
_is_control
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
Checks whether `chars` is a punctuation character.
|
_is_punctuation
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def get_dataset(name, tokenizer, pre_tokenize, data_parallel_rank, loader_scatter=None, no_lazy_loader=False,
half_lazy_loader=False):
"""gets dataset object based on keyword args and file at `path`"""
global_rank = torch.distributed.get_rank()
if not supported_corpus(name):
raise NotImplementedError('dataset %s is not supported' % name)
dataset = corpora.NAMED_CORPORA[name]
path = dataset.PATH
if issubclass(dataset, corpora.PromptReader):
if not (exists_lazy(path, data_type='prompt') and exists_lazy(path, data_type='text')) and not (
loader_scatter is not None and exists_scatter(path, data_type='prompt',
scatter_num=loader_scatter) and exists_scatter(path,
data_type='text',
scatter_num=loader_scatter)):
# create cached version of dataset for lazy loading if it doesn't exist
if global_rank == 0:
print(f"Creating lazy loader for dataset {name}")
prompt_writer = LazyWriter(path, data_type='prompt', is_array=pre_tokenize)
text_writer = LazyWriter(path, data_type='text', is_array=pre_tokenize)
writers = {'prompt': prompt_writer, 'text': text_writer}
reader = dataset(writers=writers, tokenizer=tokenizer, tokenize=pre_tokenize)
reader.process()
prompt_writer.close()
text_writer.close()
else:
while not os.path.exists(LazyWriter.get_len_path(path, data_type='prompt')):
time.sleep(1)
map_fn = (lambda x: x.tolist()) if pre_tokenize else None
if loader_scatter is not None:
if not (exists_scatter(path, data_type='prompt', scatter_num=loader_scatter) and exists_scatter(path,
data_type='text',
scatter_num=loader_scatter)):
if global_rank == 0:
print(f"Creating scatter loader for dataset {name}")
prompts = LazyLoader(path, data_type='prompt', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize)
texts = LazyLoader(path, data_type='text', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize)
indices = list(range(len(texts)))
random.shuffle(indices)
segment_length = (len(indices) - 1) // loader_scatter + 1
for i in range(loader_scatter):
scatter_path = get_scatter_path(path, scatter_rank=i)
prompt_writer = LazyWriter(scatter_path, data_type='prompt', is_array=pre_tokenize)
text_writer = LazyWriter(scatter_path, data_type='text', is_array=pre_tokenize)
for idx in indices[i * segment_length: (i + 1) * segment_length]:
prompt_writer.write(prompts[idx])
text_writer.write(texts[idx])
prompt_writer.close()
text_writer.close()
else:
while not (
exists_scatter(path, data_type='prompt', scatter_num=loader_scatter) and exists_scatter(
path, data_type='text', scatter_num=loader_scatter)):
time.sleep(1)
scatter_path = get_scatter_path(path, scatter_rank=data_parallel_rank % loader_scatter)
print(f"Rank {global_rank} is using scatter from {scatter_path}")
prompts = LazyLoader(scatter_path, data_type='prompt', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
texts = LazyLoader(scatter_path, data_type='text', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
else:
prompts = LazyLoader(path, data_type='prompt', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
texts = LazyLoader(path, data_type='text', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
text = corpora.PromptDataset(prompt_loader=prompts, text_loader=texts, tokenizer=tokenizer,
to_tokenize=not pre_tokenize)
if loader_scatter is None:
if global_rank == 0:
print(f"Create dataset {name} with {len(text)} documents")
for i in range(10):
rand_id = i if i < 5 else random.randrange(len(text))
sample_tokens = text[rand_id]['tokens'][:1024]
print(sample_tokens)
print(tokenizer.DecodeIds(sample_tokens).encode('utf-8'))
else:
for scatter_id in range(loader_scatter):
if data_parallel_rank % loader_scatter == scatter_id and data_parallel_rank // loader_scatter == 0:
print(f"Create dataset {name} at scatter {scatter_id} with {len(text)} documents")
for i in range(10):
sample_tokens = text[i]['tokens'][:1024]
print(sample_tokens)
print(tokenizer.DecodeIds(sample_tokens))
torch.distributed.barrier()
return text
elif issubclass(dataset, corpora.KeyReader):
if not (exists_lazy(path, data_type='text') and exists_lazy(path, data_type='mask')):
# create cached version of dataset for lazy loading if it doesn't exist
if global_rank == 0:
text_writer = LazyWriter(path, data_type='text', is_array=pre_tokenize)
mask_writer = LazyWriter(path, data_type='mask', is_array=True)
writers = {'mask': mask_writer, 'text': text_writer}
dataset(writers=writers, tokenizer=tokenizer, tokenize=pre_tokenize)
mask_writer.close()
text_writer.close()
else:
while not os.path.exists(LazyWriter.get_len_path(path, data_type='mask')):
time.sleep(1)
map_fn = (lambda x: x.tolist()) if pre_tokenize else None
masks = LazyLoader(path, data_type='mask', map_fn=map_fn, mem_map=True, is_array=True)
texts = LazyLoader(path, data_type='text', map_fn=map_fn, mem_map=True, is_array=pre_tokenize)
text = corpora.KeyDataset(mask_loader=masks, text_loader=texts, tokenizer=tokenizer,
to_tokenize=not pre_tokenize)
return text
|
gets dataset object based on keyword args and file at `path`
|
get_dataset
|
python
|
THUDM/GLM
|
data_utils/__init__.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/__init__.py
|
MIT
|
def make_dataset(path, seq_length, mem_length, shuffle=True, split=None, tokenizer=None,
sample_one_document=False, pre_tokenize=False, ds_type='', save_splits=None, load_splits=None,
save_test_data=None, no_lazy_loader=False, loader_scatter=None, data_parallel_rank=None,
filter_english=False, non_sentence_start=0.0, half_lazy_loader=False, **kwargs):
"""function to create datasets+tokenizers for common options"""
if split is None:
split = [1.]
# get one or multiple datasets and concatenate
if isinstance(path, str):
ds = get_dataset(path, tokenizer=tokenizer, pre_tokenize=pre_tokenize, no_lazy_loader=no_lazy_loader,
loader_scatter=loader_scatter, data_parallel_rank=data_parallel_rank,
half_lazy_loader=half_lazy_loader)
else:
ds = [get_dataset(p, tokenizer=tokenizer, pre_tokenize=pre_tokenize, no_lazy_loader=no_lazy_loader,
loader_scatter=loader_scatter, data_parallel_rank=data_parallel_rank,
half_lazy_loader=half_lazy_loader) for p in path]
ds = ConcatDataset(ds)
# Split dataset into train/val/test (and wrap bert dataset)
def wrap_dataset(dataset):
if ds_type.lower() == 'bert':
presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False
dataset = BertSentencepairDataset(dataset, max_seq_len=seq_length, presplit_sentences=presplit_sentences)
elif ds_type.lower() == 'gpt-xl':
assert pre_tokenize
dataset = XLDataset(dataset, tokenizer, max_seq_len=seq_length, mem_len=mem_length,
sample_across_doc=not sample_one_document)
elif ds_type.lower() == 'gpt2':
dataset = GPT2Dataset(dataset, tokenizer, max_seq_len=seq_length, sample_across_doc=not sample_one_document)
elif ds_type.lower() == 'block':
dataset = BlockDataset(dataset, tokenizer, max_seq_len=seq_length,
sample_across_doc=not sample_one_document, filter_english=filter_english,
non_sentence_start=non_sentence_start)
return dataset
if should_split(split):
ds = split_ds(ds, split, shuffle=shuffle, save_splits=save_splits, load_splits=load_splits)
if save_test_data is not None and torch.distributed.get_rank() == 0:
test_ds = ds[-1]
with open(save_test_data, "w", encoding='utf-8') as output:
for data in test_ds:
text = data['tokens']
text = tokenizer.DecodeIds(text)
output.write(text)
output.write("\n")
print(f"Write test data to {save_test_data}")
ds = [wrap_dataset(d) if d is not None else None for d in ds]
else:
ds = wrap_dataset(ds)
return ds
|
function to create datasets+tokenizers for common options
|
make_dataset
|
python
|
THUDM/GLM
|
data_utils/__init__.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/__init__.py
|
MIT
|
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
|
Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure.
|
conversion_helper
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
|
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
|
clip_master_grads
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
|
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
|
state_dict
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
|
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
|
load_state_dict
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self.maybe_print("OVERFLOW! Skipping step. Attempted loss scale: {}, reducing to {}"
.format(scale, self.loss_scale))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
retval = self.optimizer.step()
self._master_params_to_model_params()
return retval
|
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
|
step
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def backward(self, loss, update_master_grads=True, retain_graph=False):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
|
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
|
backward
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def update_master_grads(self):
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._model_grads_to_master_grads()
self._downscale_master()
|
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
|
update_master_grads
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
# The optimizer owns only references to master params.
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if param.grad is not None:
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
|
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
|
inspect_master_grad_data
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
|
Utility function for network_to_half().
Retained for legacy purposes.
|
BN_convert_float
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
|
Converts a module's immediate parameters and buffers to dtype.
|
convert_module
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
return network
|
Converts a network's parameters and buffers to dtype.
|
convert_network
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
|
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
|
prep_param_lists
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
|
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
|
model_grads_to_master_grads
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
|
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
|
master_params_to_model_params
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def scaled_init_method(mean, std, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = std / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=mean, std=std)
return init_
|
Init method based on N(0, sigma/sqrt(2*num_layers).
|
scaled_init_method
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
|
Load tf checkpoints in a pytorch model
|
load_tf_weights_in_bert
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
deep_init=False,
fp32_layernorm=False,
fp32_embedding=False,
fp32_tokentypes=False,
layernorm_epsilon=1e-12):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.deep_init = deep_init
self.fp32_layernorm = fp32_layernorm
self.fp32_embedding = fp32_embedding
self.layernorm_epsilon = layernorm_epsilon
self.fp32_tokentypes = fp32_tokentypes
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
|
Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
|
__init__
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
|
Constructs a `BertConfig` from a Python dictionary of parameters.
|
from_dict
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
|
Constructs a `BertConfig` from a json file of parameters.
|
from_json_file
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
|
Construct a layernorm module in the TF style (epsilon inside the square root).
|
__init__
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None,
fp32_layernorm=False, fp32_embedding=False, layernorm_epsilon=1e-12,
fp32_tokentypes=False, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(archive, tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
config.fp32_layernorm = fp32_layernorm
config.fp32_embedding = fp32_embedding
config.layernorm_epsilon = layernorm_epsilon
config.fp32_tokentypes = fp32_tokentypes
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
|
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
|
from_pretrained
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def init_method_normal(std=0.02):
"""Init method based on normal distribution.
This is only used for embeddings. The transformer has its
own initializer.
"""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
|
Init method based on normal distribution.
This is only used for embeddings. The transformer has its
own initializer.
|
init_method_normal
|
python
|
THUDM/GLM
|
model/modeling_glm.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_glm.py
|
MIT
|
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert data[key].dtype == target_dtype, '{} has data type {} which '\
'is different than {}'.format(key, data[key].dtype, target_dtype)
|
Check that all the keys have the same target data type.
|
_check_data_types
|
python
|
THUDM/GLM
|
mpu/data.py
|
https://github.com/THUDM/GLM/blob/master/mpu/data.py
|
MIT
|
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, 'you should increase MAX_DATA_DIM'
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(sizes_cuda, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
|
Build the size on rank 0 and broadcast.
|
_build_key_size_numel_dictionaries
|
python
|
THUDM/GLM
|
mpu/data.py
|
https://github.com/THUDM/GLM/blob/master/mpu/data.py
|
MIT
|
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys,
data)
# Pack on rank zero.
if get_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0).cuda()
else:
flatten_data = torch.empty(total_numel,
device=torch.cuda.current_device(),
dtype=datatype)
# Boradcast
torch.distributed.broadcast(flatten_data, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output
|
Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
|
broadcast_data
|
python
|
THUDM/GLM
|
mpu/data.py
|
https://github.com/THUDM/GLM/blob/master/mpu/data.py
|
MIT
|
def clip_grad_norm(parameters, max_norm, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all GPUs.
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if p.model_parallel or (get_model_parallel_rank() == 0):
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
|
Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
|
clip_grad_norm
|
python
|
THUDM/GLM
|
mpu/grads.py
|
https://github.com/THUDM/GLM/blob/master/mpu/grads.py
|
MIT
|
def initialize_model_parallel(model_parallel_size_):
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel grous as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
if torch.distributed.get_rank() == 0:
print('> initializing model parallel with size {}'.format(
model_parallel_size_))
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
model_parallel_size = min(model_parallel_size_, world_size)
ensure_divisibility(world_size, model_parallel_size)
rank = torch.distributed.get_rank()
# Build the data parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, \
'data parallel group is already initialized'
for i in range(model_parallel_size):
ranks = range(i, world_size, model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank % model_parallel_size):
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, \
'model parallel group is already initialized'
for i in range(world_size // model_parallel_size):
ranks = range(i * model_parallel_size,
(i + 1) * model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank // model_parallel_size):
_MODEL_PARALLEL_GROUP = group
|
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel grous as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
|
initialize_model_parallel
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:
return False
return True
|
Check if model and data parallel groups are initialized.
|
model_parallel_is_initialized
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
|
Get the model parallel group the caller rank belongs to.
|
get_model_parallel_group
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, \
'data parallel group is not initialized'
return _DATA_PARALLEL_GROUP
|
Get the data parallel group the caller rank belongs to.
|
get_data_parallel_group
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def get_model_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zeor
in the model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
|
Calculate the global rank corresponding to a local rank zeor
in the model parallel group.
|
get_model_parallel_src_rank
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def _initialize_affine_weight(weight, output_size, input_size,
per_partition_size, partition_dim, init_method,
stride=1, return_master_weight=False):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
# If we only use 1 process for model parallelism, bypass scatter.
world_size = get_model_parallel_world_size()
if world_size == 1:
init_method(weight)
if return_master_weight:
return weight
return None
# Initialize master weight
master_weight = torch.empty(output_size, input_size,
dtype=weight.dtype,
requires_grad=False)
init_method(master_weight)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(master_weight, per_partition_per_stride_size,
dim=partition_dim)
rank = get_model_parallel_rank()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
|
Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk.
|
_initialize_affine_weight
|
python
|
THUDM/GLM
|
mpu/layers.py
|
https://github.com/THUDM/GLM/blob/master/mpu/layers.py
|
MIT
|
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=group)
return input_
|
All-reduce the the input tensor across model parallel group.
|
_reduce
|
python
|
THUDM/GLM
|
mpu/mappings.py
|
https://github.com/THUDM/GLM/blob/master/mpu/mappings.py
|
MIT
|
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = torch.distributed.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = torch.distributed.get_rank(group=group)
output = input_list[rank].contiguous()
return output
|
Split the tensor along its last dimension and keep the
corresponding slice.
|
_split
|
python
|
THUDM/GLM
|
mpu/mappings.py
|
https://github.com/THUDM/GLM/blob/master/mpu/mappings.py
|
MIT
|
def _gather(input_):
"""Gather tensors and concatinate along the last dimension."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
|
Gather tensors and concatinate along the last dimension.
|
_gather
|
python
|
THUDM/GLM
|
mpu/mappings.py
|
https://github.com/THUDM/GLM/blob/master/mpu/mappings.py
|
MIT
|
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device('cuda')
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
|
Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
|
_set_cuda_rng_state
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
|
Set to the initial state (no tracker).
|
reset
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
|
Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary.
|
get_states
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception('cuda rng state {} is not added'.format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
|
Fork the cuda rng state, perform operations, and exit with
the original state.
|
fork
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + get_model_parallel_rank()
# Data parallel gets the original sedd.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
print('> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(
torch.distributed.get_rank(), get_model_parallel_rank(),
get_data_parallel_rank(), model_parallel_seed,
data_parallel_seed), flush=True)
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
model_parallel_seed)
|
Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
|
model_parallel_cuda_manual_seed
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def _transpose_for_scores(self, tensor):
"""Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
"""
new_tensor_shape = tensor.size()[:-1] + \
(self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head)
tensor = tensor.view(*new_tensor_shape)
return tensor.permute(0, 2, 1, 3)
|
Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
|
_transpose_for_scores
|
python
|
THUDM/GLM
|
mpu/transformer.py
|
https://github.com/THUDM/GLM/blob/master/mpu/transformer.py
|
MIT
|
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
|
Ensure that numerator is divisible by the denominator and return
the division value.
|
divide
|
python
|
THUDM/GLM
|
mpu/utils.py
|
https://github.com/THUDM/GLM/blob/master/mpu/utils.py
|
MIT
|
def split_tensor_along_last_dim(tensor, num_partitions,
contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
|
Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
|
split_tensor_along_last_dim
|
python
|
THUDM/GLM
|
mpu/utils.py
|
https://github.com/THUDM/GLM/blob/master/mpu/utils.py
|
MIT
|
def update_cmd(cmd, config):
'''
@param cmd str
@param configs list of dicts
'''
for k, v in config.items():
if v is None:
continue
if type(v) == bool:
if v:
cmd += "--{} ".format(k)
else:
cmd += "--{} {} ".format(k, v)
return cmd
|
@param cmd str
@param configs list of dicts
|
update_cmd
|
python
|
THUDM/GLM
|
scripts/dispatcher.py
|
https://github.com/THUDM/GLM/blob/master/scripts/dispatcher.py
|
MIT
|
def clean_text(text):
"""Remove new lines and multiple spaces and adjust end of sentence dot."""
text = text.replace("\n", " ")
text = re.sub(r'\s+', ' ', text)
for _ in range(3):
text = text.replace(' . ', '. ')
return text
|
Remove new lines and multiple spaces and adjust end of sentence dot.
|
clean_text
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def __init__(self, guid, text_a, text_b=None, label=None, logits=None, meta: Optional[Dict] = None, idx=-1,
num_choices=1):
"""
Create a new InputExample.
:param guid: a unique textual identifier
:param text_a: the sequence of text
:param text_b: an optional, second sequence of text
:param label: an optional label
:param logits: an optional list of per-class logits
:param meta: an optional dictionary to store arbitrary meta information
:param idx: an optional numeric index
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.logits = logits
self.idx = idx
self.num_choices = num_choices
self.meta = meta if meta else {}
|
Create a new InputExample.
:param guid: a unique textual identifier
:param text_a: the sequence of text
:param text_b: an optional, second sequence of text
:param label: an optional label
:param logits: an optional list of per-class logits
:param meta: an optional dictionary to store arbitrary meta information
:param idx: an optional numeric index
|
__init__
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def build_sample(ids, types=None, paddings=None, positions=None, masks=None, label=None, unique_id=None, target=None,
logit_mask=None, segment_ids=None, prompt_ids=None):
"""Convert to numpy and return a sample consumed by the batch producer."""
ids_np = np.array(ids, dtype=np.int64)
sample = {'text': ids_np, 'label': int(label)}
if types is not None:
types_np = np.array(types, dtype=np.int64)
sample['types'] = types_np
if paddings is not None:
paddings_np = np.array(paddings, dtype=np.int64)
sample['padding_mask'] = paddings_np
if positions is not None:
positions_np = np.array(positions, dtype=np.int64)
sample['position'] = positions_np
if masks is not None:
masks_np = np.array(masks, dtype=np.int64)
sample['mask'] = masks_np
if target is not None:
target_np = np.array(target, dtype=np.int64)
sample['target'] = target_np
if logit_mask is not None:
logit_mask_np = np.array(logit_mask, dtype=np.int64)
sample['logit_mask'] = logit_mask_np
if segment_ids is not None:
segment_ids = np.array(segment_ids, dtype=np.int64)
sample['segment_id'] = segment_ids
if prompt_ids is not None:
prompt_ids = np.array(prompt_ids, dtype=np.int64)
sample['prompt_pos'] = prompt_ids
if unique_id is not None:
sample['uid'] = unique_id
return sample
|
Convert to numpy and return a sample consumed by the batch producer.
|
build_sample
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def build_data_loader(dataset, batch_size, num_workers, drop_last, shuffle=True, only_rank0=False):
"""Data loader. Note that batch-size is the local (per GPU) batch-size."""
# Sampler.
if only_rank0:
rank, world_size = 0, 1
else:
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle)
# Data loader. Note that batch size is the per GPU batch size.
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
shuffle=False,
num_workers=num_workers,
drop_last=drop_last,
pin_memory=True,
collate_fn=my_collate)
return data_loader
|
Data loader. Note that batch-size is the local (per GPU) batch-size.
|
build_data_loader
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def multichoice_evaluate(model, dataloader, example_dict, args):
"""Calculate correct over total answers and return prediction if the
`output_predictions` is true."""
model.eval()
results = {}
with torch.no_grad():
# For all the batches in the dataset.
for _, batch in enumerate(dataloader):
# Run the model forward.
data = process_batch(batch, args)
if args.pretrained_bert:
tokens, types, labels_, attention_mask = data['text'], data['types'], data['label'], data[
'padding_mask']
inputs = [tokens, types, attention_mask]
elif args.cloze_eval:
tokens, labels_, position_ids = data['text'], data['label'], data['position']
attention_mask, target_ids, logit_mask = data['mask'], data['target'], data['logit_mask']
if not args.fast_decode:
inputs = [tokens, position_ids, attention_mask, target_ids, logit_mask]
if args.continuous_prompt:
prompt_pos = data["prompt_pos"]
inputs.append(prompt_pos)
else:
dec_input_ids, dec_position_ids, dec_attention_mask = data['dec_text'], data['dec_position'], data[
'dec_mask']
dec_target_ids, dec_logit_mask = data['dec_target'], data['dec_logit_mask']
inputs = [tokens, position_ids, attention_mask, dec_input_ids, dec_position_ids, dec_attention_mask,
dec_target_ids, dec_logit_mask]
else:
tokens, labels_, position_ids, attention_mask = data['text'], data['label'], data['position'], data[
'mask']
inputs = [tokens, position_ids, attention_mask]
if len(inputs[0].shape) == 3 and inputs[0].size(1) > segment_length:
logit_list = []
for i in range((inputs[0].size(1) - 1) // segment_length + 1):
input_batch = [arg[:, i * segment_length: (i + 1) * segment_length] for arg in inputs]
if args.pretrained_bert:
logits = model(*input_batch)
else:
logits, *mems = model(*input_batch)
logit_list.append(logits)
logits = torch.cat(logit_list, dim=1)
elif args.cloze_eval and args.fast_decode:
logit_list = []
num_choices = inputs[3].size(1)
for i in range((num_choices - 1) // segment_length + 1):
input_batch = inputs[:3] + [arg[:, i * segment_length: (i + 1) * segment_length] for arg in
inputs[3:]]
logits, *mems = model(*input_batch)
logit_list.append(logits)
logits = torch.cat(logit_list, dim=1)
else:
if args.pretrained_bert:
logits = model(*inputs)
else:
logits, *mems = model(*inputs)
if "segment_id" in data:
from torch_scatter import scatter_sum
if "loss_mask" in data:
logits = logits * data["loss_mask"]
logits = scatter_sum(logits, data["segment_id"], dim=1)
elif "loss_mask" in data:
loss_mask = data["loss_mask"]
logits = logits * loss_mask - 10000.0 * (1.0 - loss_mask)
uid_list = batch['uid']
if isinstance(uid_list, torch.Tensor):
uid_list = uid_list.cpu().numpy().tolist()
predicted = torch.argmax(logits, dim=-1).tolist()
labels = labels_.tolist()
if args.task.lower() == 'wsc':
predicted = [1 if pred == 0 else 0 for pred in predicted]
for uid, prediction, label in zip(uid_list, predicted, labels):
results[uid] = (prediction, label)
model.train()
torch.distributed.barrier()
results_gathered = [None for _ in range(mpu.get_data_parallel_world_size())]
torch.distributed.all_gather_object(results_gathered, results, group=mpu.get_data_parallel_group())
results = {}
for result in results_gathered:
results.update(result)
predictions, labels, examples = [], [], []
for uid, example in example_dict.items():
prediction, label = results[uid]
predictions.append(prediction)
labels.append(label)
examples.append(example)
torch.distributed.barrier()
return predictions, labels, examples
|
Calculate correct over total answers and return prediction if the
`output_predictions` is true.
|
multichoice_evaluate
|
python
|
THUDM/GLM
|
tasks/eval_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/eval_utils.py
|
MIT
|
def evaluate_and_print_results(data_loader, model, eval_metric, args):
"""Evaluate and print results on screen."""
# Evaluate and get results.
output, _ = evaluate(model, data_loader, eval_metric, args)
string = ""
if eval_metric == 'loss':
output = output['loss']
num_tokenized_tokens = data_loader.dataset.num_tokenized_tokens
num_original_tokens = data_loader.dataset.num_original_tokens
val_loss = output / (num_tokenized_tokens - 1)
ppl = math.exp(min(20, val_loss))
token_ratio = (num_tokenized_tokens - 1) / (num_original_tokens - 1)
adjusted_ppl = math.exp(min(20, val_loss * token_ratio))
string += 'avg loss: {:.4E} | '.format(val_loss)
string += 'ppl: {:.4E} | '.format(ppl)
string += 'adjusted ppl: {:.4E} | '.format(adjusted_ppl)
string += 'token ratio: {} |'.format(token_ratio)
score_dict = {"avg loss": val_loss, "ppl": ppl, "adjusted ppl": adjusted_ppl}
elif eval_metric == 'accuracy':
output = output['accuracy']
num_examples = len(data_loader.dataset)
acc = output / num_examples * 100
string += 'number correct: {} | '.format(output)
string += 'total examples: {} | '.format(num_examples)
string += 'avg accuracy: {:.2f}'.format(acc)
score_dict = {"accuracy": acc}
else:
raise NotImplementedError('evaluation method for {} metric is not '
'implemented yet.'.format(eval_metric))
length = len(string) + 1
print_rank_0('-' * length)
print_rank_0(string)
print_rank_0('-' * length)
return score_dict
|
Evaluate and print results on screen.
|
evaluate_and_print_results
|
python
|
THUDM/GLM
|
tasks/language_model/finetune.py
|
https://github.com/THUDM/GLM/blob/master/tasks/language_model/finetune.py
|
MIT
|
def evaluate(self, model, dataloader, example_dict, args):
"""Calculate correct over total answers and return prediction if the
`output_predictions` is true."""
model.eval()
local_predictions = {}
print_rank_0("Distributed store created")
with torch.no_grad():
# For all the batches in the dataset.
for idx, data in enumerate(dataloader):
tokens, attention_mask, position_ids = process_batch(data, args)
batch_size = tokens.size(0)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=args.out_seq_length,
num_beams=args.num_beams,
device=tokens.device,
length_penalty=args.length_penalty,
do_early_stopping=False,
)
beam_scores = torch.zeros((batch_size, args.num_beams), dtype=torch.float, device=tokens.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * args.num_beams,))
# Run the model forward.
counter = 0
context_length = tokens.size(1)
while counter < args.tgt_seq_length:
if counter == 0:
next_token_logits, *mems = model(tokens, position_ids, attention_mask, return_memory=True)
seq_length = next_token_logits.size(1)
next_token_logits = next_token_logits[:, -1]
next_token_logits = next_token_logits.unsqueeze(1).repeat(1, args.num_beams, 1).view(
batch_size * args.num_beams, -1)
mems = [mem.unsqueeze(1).repeat(1, args.num_beams, 1, 1).view(batch_size * args.num_beams,
seq_length, -1) for mem in mems]
position_ids = tokens.new_ones(batch_size, args.num_beams, 2, 1)
for i, text in enumerate(tokens.tolist()):
mask_pos = text.index(self.mask_token)
position_ids[i, :, 0] = mask_pos
position_ids = position_ids.reshape(batch_size * args.num_beams, 2, 1)
tokens = tokens.new_zeros(batch_size * args.num_beams, 0)
else:
if not args.no_block_position:
position_ids[:, 1] = counter + 1
last_token = tokens[:, -1:]
if self.mask_pad_token:
cur_attention_mask = attention_mask[:, :, -1:, :].unsqueeze(1).expand(-1, args.num_beams, -1,
-1, -1).reshape(
batch_size * args.num_beams, 1, 1, context_length)
cur_attention_mask = torch.cat(
(cur_attention_mask, attention_mask.new_ones((batch_size * args.num_beams, 1, 1, counter))),
dim=-1)
else:
cur_attention_mask = tokens.new_zeros([batch_size * args.num_beams])
next_token_logits, *mems = model(last_token, position_ids, cur_attention_mask, *mems,
return_memory=True)
next_token_logits = next_token_logits[:, -1]
next_token_logits = top_k_logits(next_token_logits, top_k=args.top_k, top_p=args.top_p)
next_token_scores = F.log_softmax(next_token_logits, dim=-1)
next_token_scores = self.processors(tokens, next_token_scores)
next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores)
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, args.num_beams * vocab_size)
probs = F.softmax(next_token_scores, dim=-1)
if args.select_topk:
_, next_tokens = torch.topk(probs, k=2 * args.num_beams, dim=-1, largest=True)
else:
next_tokens = torch.multinomial(probs, num_samples=2 * args.num_beams)
next_token_scores = torch.gather(next_token_scores, -1, next_tokens)
next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, _indices)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
tokens,
next_token_scores,
next_tokens,
next_indices,
eos_token_id=self.end_token,
pad_token_id=self.pad_token
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
beam_next_tokens = beam_next_tokens.unsqueeze(-1)
tokens = torch.cat([tokens[beam_idx, :], beam_next_tokens], dim=-1)
mems = [mem[beam_idx] for mem in mems] if mems else []
if beam_scorer.is_done:
break
counter += 1
tokens, _, scores = beam_scorer.finalize(tokens, beam_scores, next_tokens, next_indices,
eos_token_id=self.end_token, pad_token_id=self.pad_token)
uid_list = data['uid']
if isinstance(uid_list, torch.Tensor):
uid_list = uid_list.cpu().numpy().tolist()
predictions = []
for i, text in enumerate(tokens.tolist()):
text = [token for token in text if token not in [self.end_token, self.pad_token]]
if args.task in ['squad', 'squad_v1'] and args.tokenizer_model_type.startswith('bert'):
uid = uid_list[i]
example = example_dict[uid]
text = squad_decode(example, text, self.tokenizer)
else:
text = self.tokenizer.DecodeIds(text)
predictions.append(text)
for uid, prediction in zip(uid_list, predictions):
local_predictions[uid] = prediction
if (idx + 1) % args.log_interval == 0:
print_rank_0(f"Iteration {idx + 1} / {len(dataloader)}")
model.train()
torch.distributed.barrier()
print_rank_0("Evaluation completed")
gathered_predictions = [None for i in range(torch.distributed.get_world_size())]
torch.distributed.all_gather_object(gathered_predictions, local_predictions)
gathered_predictions = {uid: pred for preds in gathered_predictions for uid, pred in preds.items() }
predictions, examples, scores = [], [], []
for uid, example in example_dict.items():
prediction = gathered_predictions[uid]
predictions.append(prediction)
examples.append(example)
torch.distributed.barrier()
return predictions, [], examples
|
Calculate correct over total answers and return prediction if the
`output_predictions` is true.
|
evaluate
|
python
|
THUDM/GLM
|
tasks/seq2seq/evaluate.py
|
https://github.com/THUDM/GLM/blob/master/tasks/seq2seq/evaluate.py
|
MIT
|
def multirc_em(predictions, labels, examples: List[InputExample]):
"""Compute the exact match (EM) for a sequence of predictions and actual labels"""
question_ids = [example.meta["question_idx"] for example in examples]
unique_questions = set(question_ids)
q_actuals = list(zip(question_ids, labels))
q_predictions = list(zip(question_ids, predictions))
actuals_per_question = defaultdict(list)
predictions_per_question = defaultdict(list)
for qid, val in q_actuals:
actuals_per_question[qid].append(val)
for qid, val in q_predictions:
predictions_per_question[qid].append(val)
em = 0
for qid in unique_questions:
if actuals_per_question[qid] == predictions_per_question[qid]:
em += 1
em /= len(unique_questions)
return em
|
Compute the exact match (EM) for a sequence of predictions and actual labels
|
multirc_em
|
python
|
THUDM/GLM
|
tasks/superglue/evaluate.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/evaluate.py
|
MIT
|
def __init__(self, args, tokenizer, label_list, max_seq_length, pattern_id: int = 0, verbalizer_file: str = None,
seed: int = 42, is_multi_token=False, max_segment_length=0, fast_decode: bool = False, split='train',
num_prompt_tokens=0):
"""
Create a new PVP.
:param args: the args
:param tokenizer: the tokenizer
:param label_list: the list of labels
:param max_seq_length: the maximum length of the sequence
:param pattern_id: the pattern id to use
:param seed: a seed to be used for generating random numbers if necessary
:param is_multi_token: if the verbalizers contain multiple tokens
:param fast_decode: whether to use the fast decode mode for multi-token tasks
:param continuous_prompt: whether to use continuous prompt optimization
"""
self.args = args
self.tokenizer = tokenizer
self.label_list = label_list
self.max_seq_length = max_seq_length
self.pattern_id = pattern_id
self.num_prompt_tokens = num_prompt_tokens
self.rng = random.Random(seed)
self.num_truncated = 0
self.fast_decode = fast_decode
self.split = split
self.max_dec_seq_length = 16
self._is_multi_token = is_multi_token
self.max_segment_length = max_segment_length
self.task_mask = args.task_mask
self.continuous_prompt = args.continuous_prompt
self.prefix_prompt = args.prefix_prompt
if self.continuous_prompt:
print_rank_0(f"Prompt tokens in pvp {self.num_prompt_tokens} spell length {self.spell_length}")
if verbalizer_file:
self.verbalize = PVP._load_verbalizer_from_file(verbalizer_file, self.pattern_id)
|
Create a new PVP.
:param args: the args
:param tokenizer: the tokenizer
:param label_list: the list of labels
:param max_seq_length: the maximum length of the sequence
:param pattern_id: the pattern id to use
:param seed: a seed to be used for generating random numbers if necessary
:param is_multi_token: if the verbalizers contain multiple tokens
:param fast_decode: whether to use the fast decode mode for multi-token tasks
:param continuous_prompt: whether to use continuous prompt optimization
|
__init__
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def encode(self, example: InputExample, priming: bool = False, labeled: bool = False):
"""
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
"""
if not priming:
assert not labeled, "'labeled' can only be set to true if 'priming' is also set to true"
tokenizer = self.tokenizer
raw_parts_a, raw_parts_b = self.get_parts(example)
raw_parts_a = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_a]
prompt_id = tokenizer.num_tokens
def encode_input(raw_parts):
parts = []
for x, s in raw_parts:
if isinstance(x, str):
x = tokenizer.EncodeAsIds(x)
elif isinstance(x, int):
x = [prompt_id] * x
else:
pass
parts.append((x, s))
return parts
parts_a = encode_input(raw_parts_a)
if self.prefix_prompt > 0:
parts_a = [([prompt_id] * self.prefix_prompt, False)] + parts_a
parts_b = None
if raw_parts_b:
raw_parts_b = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_b]
parts_b = encode_input(raw_parts_b)
if self.is_multi_token:
answers = self.get_answers(example)
if example.label is not None:
label = self.label_list.index(example.label)
else:
label = 0
if not self.fast_decode:
ids_list, positions_list, sep_list, mask_list, target_list, prompt_list = [], [], [], [], [], []
segment_id_list = []
if priming:
answer = answers[label]
answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)
self.num_truncated += self.truncate(parts_a, parts_b, answer_ids, max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in parts_a for token_id in part]
tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None
input_ids = tokens_a
if tokens_b:
input_ids += tokens_b
if labeled:
mask_idx = input_ids.index(self.mask_id)
input_ids = input_ids[:mask_idx] + answer_ids + input_ids[mask_idx + 1:]
return input_ids
else:
for idx, answer in enumerate(answers):
this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b)
answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)
answer_ids = answer_ids + [tokenizer.get_command('eop').Id]
self.num_truncated += self.truncate(this_parts_a, this_parts_b, answer_ids,
max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in this_parts_a for token_id in part]
tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None
if self.max_segment_length > 0:
num_segments = (len(answer_ids) - 1) // self.max_segment_length + 1
segments = [
answer_ids[index * self.max_segment_length: (index + 1) * self.max_segment_length]
for
index in range(num_segments)]
segment_id_list += [idx] * len(segments)
else:
segments = [answer_ids]
for segment in segments:
data = build_input_from_ids(tokens_a, tokens_b, segment, self.max_seq_length,
self.tokenizer,
args=self.args, add_cls=True, add_sep=False, add_piece=True,
mask_id=self.mask_id)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]
ids = [idx if idx != prompt_id else 0 for idx in ids]
prompt_list.append(prompt_pos)
ids_list.append(ids)
positions_list.append(position_ids)
sep_list.append(sep)
target_list.append(target_ids)
mask_list.append(loss_masks)
if self.mask in tokens_a:
mask_pos = tokens_a.index(self.mask)
tokens_a = tokens_a[:mask_pos] + segment + tokens_a[mask_pos:]
else:
mask_pos = tokens_b.index(self.mask)
tokens_b = tokens_b[:mask_pos] + segment + tokens_b[mask_pos:]
segment_id_list = segment_id_list if segment_id_list else None
sample = build_sample(ids_list, positions=positions_list, masks=sep_list, label=label,
logit_mask=mask_list, target=target_list,
unique_id=example.guid, segment_ids=segment_id_list, prompt_ids=prompt_list)
return sample
else:
this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b)
self.num_truncated += self.truncate(this_parts_a, this_parts_b, None, max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in this_parts_a for token_id in part]
tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None
data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer,
args=self.args, add_cls=True, add_sep=False, add_piece=False)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
sample = build_sample(ids, positions=position_ids, masks=sep, label=label, unique_id=example.guid)
ids_list, positions_list, mask_list, target_list, logit_mask_list = [], [], [], [], []
for answer in answers:
answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)
answer_ids = answer_ids + [tokenizer.get_command('eop').Id]
answer_ids = answer_ids[:self.max_dec_seq_length]
data = build_decoder_input(ids, answer_ids, self.max_seq_length, self.max_dec_seq_length, tokenizer)
dec_ids, _, _, dec_position_ids, _, dec_target_ids, dec_loss_masks = data
ids_list.append(dec_ids)
positions_list.append(dec_position_ids)
mask_list.append(sep)
target_list.append(dec_target_ids)
logit_mask_list.append(dec_loss_masks)
sample = build_decoder_sample(sample, ids_list, positions_list, mask_list, target_list, logit_mask_list)
return sample
else:
self.num_truncated += self.truncate(parts_a, parts_b, [], max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in parts_a for token_id in part]
tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None
if priming:
input_ids = tokens_a
if tokens_b:
input_ids += tokens_b
if labeled:
mask_idx = input_ids.index(self.mask_id)
verbalizer = self.verbalize(example.label)
assert len(verbalizer) == 1, 'priming only supports one verbalization per label'
verbalizer = verbalizer[0]
verbalizer_id = get_verbalization_ids(verbalizer, self.tokenizer, force_single_token=True)
input_ids[mask_idx] = verbalizer_id
return input_ids
data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer, args=self.args,
add_cls=True, add_sep=False, add_piece=True)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]
ids = [token if token != prompt_id else 0 for token in ids]
target_ids = self.get_verbalizer_ids()
if example.label is not None:
label = self.label_list.index(example.label)
else:
label = 0
sample = build_sample(ids=ids, positions=position_ids, target=target_ids, masks=sep, logit_mask=loss_masks,
label=label, unique_id=example.guid, prompt_ids=prompt_pos)
return sample
|
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
|
encode
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def truncate(self, parts_a: List[Tuple[List[int], bool]], parts_b: List[Tuple[List[int], bool]], answer: List[int],
max_length: int):
"""Truncate two sequences of text to a predefined total maximum length"""
total_len = self._seq_length(parts_a) + self._seq_length(parts_b)
if answer:
total_len += len(answer)
total_len += num_special_tokens_to_add(parts_a, parts_b, answer, add_cls=True, add_sep=False, add_piece=True)
num_tokens_to_remove = total_len - max_length
if num_tokens_to_remove <= 0:
return False
for _ in range(num_tokens_to_remove):
if self._seq_length(parts_a, only_shortenable=True) > self._seq_length(parts_b, only_shortenable=True):
self._remove_last(parts_a)
else:
self._remove_last(parts_b)
return True
|
Truncate two sequences of text to a predefined total maximum length
|
truncate
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def get_verbalization_ids(word: str, tokenizer, force_single_token: bool) -> Union[int, List[int]]:
"""
Get the token ids corresponding to a verbalization
:param word: the verbalization
:param tokenizer: the tokenizer to use
:param force_single_token: whether it should be enforced that the verbalization corresponds to a single token.
If set to true, this method returns a single int instead of a list and throws an error if the word
corresponds to multiple tokens.
:return: either the list of token ids or the single token id corresponding to this word
"""
ids = tokenizer.EncodeAsIds(word).tokenization
if not force_single_token:
return ids
assert len(ids) == 1, \
f'Verbalization "{word}" does not correspond to a single token, got {tokenizer.DecodeIds(ids)}'
verbalization_id = ids[0]
assert verbalization_id not in tokenizer.command_id_map, \
f'Verbalization {word} is mapped to a special token {tokenizer.IdToToken(verbalization_id)}'
return verbalization_id
|
Get the token ids corresponding to a verbalization
:param word: the verbalization
:param tokenizer: the tokenizer to use
:param force_single_token: whether it should be enforced that the verbalization corresponds to a single token.
If set to true, this method returns a single int instead of a list and throws an error if the word
corresponds to multiple tokens.
:return: either the list of token ids or the single token id corresponding to this word
|
get_verbalization_ids
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def search_github_code_byapi(token: str, peer_page: int = 50, page: int = 1, excludes: list = []) -> list[str]:
"""
curl -Ls -o response.json -H "Authorization: Bearer <token>" https://api.github.com/search/code?q=%22%2Fapi%2Fv1%2Fclient%2Fsubscribe%3Ftoken%3D%22&sort=indexed&order=desc&per_page=30&page=1
"""
if utils.isblank(token):
return []
peer_page, page = min(max(peer_page, 1), 100), max(1, page)
url = f"https://api.github.com/search/code?q=%22%2Fapi%2Fv1%2Fclient%2Fsubscribe%3Ftoken%3D%22&sort=indexed&order=desc&per_page={peer_page}&page={page}"
headers = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {token}",
# "X-GitHub-Api-Version": "2022-11-28"
}
content, links = utils.http_get(url=url, headers=headers), set()
if utils.isblank(content):
return []
try:
items = json.loads(content).get("items", [])
excludes = list(set(excludes))
for item in items:
if not item or type(item) != dict:
continue
link = item.get("html_url", "")
if utils.isblank(link):
continue
reponame = item.get("repository", {}).get("full_name", "") + "/"
if not intercept(text=reponame, excludes=excludes):
links.add(link)
return list(links)
except:
return []
|
curl -Ls -o response.json -H "Authorization: Bearer <token>" https://api.github.com/search/code?q=%22%2Fapi%2Fv1%2Fclient%2Fsubscribe%3Ftoken%3D%22&sort=indexed&order=desc&per_page=30&page=1
|
search_github_code_byapi
|
python
|
wzdnzd/aggregator
|
subscribe/crawl.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/crawl.py
|
Apache-2.0
|
def download_mmdb(repo: str, target: str, filepath: str, retry: int = 3) -> bool:
"""
Download GeoLite2-City.mmdb from github release
"""
repo = utils.trim(text=repo)
if not repo or len(repo.split("/", maxsplit=1)) != 2:
logger.error(f"invalid github repo name: {repo}")
return False
target = utils.trim(text=target)
if not target:
logger.error("invalid download target")
return False
# extract download url from github release page
release_api = f"https://api.github.com/repos/{repo}/releases/latest?per_page=1"
assets, content = None, utils.http_get(url=release_api)
try:
data = json.loads(content)
assets = data.get("assets", [])
except:
logger.error(f"failed download {target} due to cannot extract download url through Github API")
if not assets or not isinstance(assets, list):
logger.error(f"no assets found for {target} in github release")
return False
download_url = ""
for asset in assets:
if asset.get("name", "") == target:
download_url = asset.get("browser_download_url", "")
break
if not download_url:
logger.error(f"no download url found for {target} in github release")
return False
return download(download_url, filepath, target, retry)
|
Download GeoLite2-City.mmdb from github release
|
download_mmdb
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def download(url: str, filepath: str, filename: str, retry: int = 3) -> bool:
"""Download file from url to filepath with filename"""
if retry < 0:
logger.error(f"archieved max retry count for download, url: {url}")
return False
url = utils.trim(text=url)
if not url:
logger.error("invalid download url")
return False
filepath = utils.trim(text=filepath)
if not filepath:
logger.error(f"invalid save filepath, url: {url}")
return False
filename = utils.trim(text=filename)
if not filename:
logger.error(f"invalid save filename, url: {url}")
return False
if not os.path.exists(filepath) or not os.path.isdir(filepath):
os.makedirs(filepath)
fullpath = os.path.join(filepath, filename)
if os.path.exists(fullpath) and os.path.isfile(fullpath):
os.remove(fullpath)
# download target file from github release to fullpath
try:
urllib.request.urlretrieve(url=url, filename=fullpath)
except Exception:
return download(url, filepath, filename, retry - 1)
logger.info(f"download file {filename} to {fullpath} success")
return True
|
Download file from url to filepath with filename
|
download
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def query_ip_country(ip: str, reader: database.Reader) -> str:
"""
Query country information for an IP address using mmdb database
Args:
ip: The IP address to query
reader: The mmdb database reader
Returns:
The country name in Chinese
"""
if not ip or not reader:
return ""
try:
# fake ip
if ip.startswith("198.18.0."):
logger.warning("cannot get geolocation because IP address is faked")
return ""
response = reader.country(ip)
# Try to get country name in Chinese
country = response.country.names.get("zh-CN", "")
# If Chinese name is not available, try to convert ISO code to Chinese country name
if not country and response.country.iso_code:
iso_code = response.country.iso_code
# Try to get Chinese country name from ISO code mapping
country = ISO_TO_CHINESE.get(iso_code, iso_code)
# Special handling for well-known IPs
if not country:
if ip == "1.1.1.1" or ip == "1.0.0.1":
country = "Cloudflare"
elif ip.startswith("8.8.8.") or ip.startswith("8.8.4."):
country = "Google"
return country
except Exception as e:
logger.error(f"query ip country failed, ip: {ip}, error: {str(e)}")
return ""
|
Query country information for an IP address using mmdb database
Args:
ip: The IP address to query
reader: The mmdb database reader
Returns:
The country name in Chinese
|
query_ip_country
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def get_listening_ports() -> set:
"""Get the set of listening ports in the system, cross-platform compatible"""
listening_ports = set()
try:
# Windows system
if os.name == "nt":
try:
# Use 'cp437' encoding to handle Windows command line output
output = subprocess.check_output("netstat -an", shell=True).decode("cp437", errors="replace")
for line in output.split("\n"):
if "LISTENING" in line:
parts = line.split()
if len(parts) >= 2:
addr_port = parts[1]
if ":" in addr_port:
try:
port = int(addr_port.split(":")[-1])
listening_ports.add(port)
except ValueError:
pass
except Exception as e:
logger.warning(f"Windows netstat command failed: {str(e)}")
return listening_ports
# macOS system
elif sys.platform == "darwin":
try:
output = subprocess.check_output("lsof -i -P -n | grep LISTEN", shell=True).decode(
"utf-8", errors="replace"
)
for line in output.split("\n"):
if ":" in line:
try:
port_part = line.split(":")[-1].split(" ")[0]
port = int(port_part)
listening_ports.add(port)
except (ValueError, IndexError):
pass
except Exception as e:
logger.warning(f"macOS lsof command failed: {str(e)}")
return listening_ports
# Linux and other systems
else:
# Try using ss command (newer Linux systems)
try:
output = subprocess.check_output("ss -tuln", shell=True).decode("utf-8", errors="replace")
for line in output.split("\n"):
if "LISTEN" in line:
parts = line.split()
for part in parts:
if ":" in part:
try:
port = int(part.split(":")[-1])
listening_ports.add(port)
except ValueError:
pass
except Exception as e:
logger.warning(f"Linux ss command failed, trying netstat: {str(e)}")
# Fall back to netstat command (older Linux systems)
try:
output = subprocess.check_output("netstat -tuln", shell=True).decode("utf-8", errors="replace")
for line in output.split("\n"):
if "LISTEN" in line:
parts = line.split()
for part in parts:
if ":" in part:
try:
port = int(part.split(":")[-1])
listening_ports.add(port)
except ValueError:
pass
except Exception as e:
logger.warning(f"Linux netstat command also failed: {str(e)}")
return listening_ports
except Exception as e:
logger.warning(f"Failed to get listening ports: {str(e)}")
return listening_ports
|
Get the set of listening ports in the system, cross-platform compatible
|
get_listening_ports
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def scan_ports_batch(start_port: int, count: int = 100) -> dict:
"""Batch scan port statuses, return a dictionary of port statuses"""
global _PORT_STATUS_CACHE, _AVAILABLE_PORTS
# Create a list of ports to scan (excluding ports with known status)
ports_to_scan = [p for p in range(start_port, start_port + count) if p not in _PORT_STATUS_CACHE]
if not ports_to_scan:
# If all ports are already cached, return cached results directly
return {p: _PORT_STATUS_CACHE.get(p, True) for p in range(start_port, start_port + count)}
# Use a more efficient way to check ports in batch
results = {}
try:
# Get the ports that are currently listening in the system
listening_ports = get_listening_ports()
# Update results
for port in ports_to_scan:
in_use = port in listening_ports
results[port] = in_use
_PORT_STATUS_CACHE[port] = in_use
if not in_use:
_AVAILABLE_PORTS.add(port)
except Exception as e:
logger.warning(f"Batch port scanning failed, falling back to individual port checks: {str(e)}")
# If batch checking fails, fall back to individual port checks
for port in ports_to_scan:
in_use = check_single_port(port)
results[port] = in_use
_PORT_STATUS_CACHE[port] = in_use
if not in_use:
_AVAILABLE_PORTS.add(port)
# Merge cached and newly scanned results
return {
**{
p: _PORT_STATUS_CACHE.get(p, True) for p in range(start_port, start_port + count) if p in _PORT_STATUS_CACHE
},
**results,
}
|
Batch scan port statuses, return a dictionary of port statuses
|
scan_ports_batch
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def check_single_port(port: int) -> bool:
"""Helper function for checking a single port, checks if the port is listening"""
try:
# Use socket to check TCP port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.2)
result = sock.connect_ex(("127.0.0.1", port))
sock.close()
if result == 0:
return True
# Also check IPv6
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.settimeout(0.2)
result = sock.connect_ex(("::1", port))
sock.close()
return result == 0
except:
pass
return False
except:
# Assume port is not in use when an error occurs
return False
|
Helper function for checking a single port, checks if the port is listening
|
check_single_port
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def is_port_in_use(port: int) -> bool:
"""Check if a port is in use (using cache)"""
global _PORT_STATUS_CACHE, _AVAILABLE_PORTS
# If port is known to be available, return directly
if port in _AVAILABLE_PORTS:
return False
# If port status is already cached, return directly
if port in _PORT_STATUS_CACHE:
return _PORT_STATUS_CACHE[port]
# Otherwise check the port and cache the result
in_use = check_single_port(port)
_PORT_STATUS_CACHE[port] = in_use
if not in_use:
_AVAILABLE_PORTS.add(port)
return in_use
|
Check if a port is in use (using cache)
|
is_port_in_use
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def generate_mihomo_config(proxies: list[dict]) -> tuple[dict, dict]:
"""Generate mihomo configuration for the given proxies"""
# Base configuration
config = {
"mixed-port": 7890,
"allow-lan": True,
"mode": "global",
"log-level": "error",
"proxies": proxies,
"dns": {
"enable": True,
"enhanced-mode": "fake-ip",
"fake-ip-range": "198.18.0.1/16",
"default-nameserver": ["114.114.114.114", "223.5.5.5", "8.8.8.8"],
"nameserver": ["https://doh.pub/dns-query"],
},
"listeners": [],
}
# Record the port assigned to each proxy
records = dict()
# If there are no proxies, return directly
if not proxies:
return config, records
# Pre-scan ports in batch to improve efficiency
start_port = 32001
# Scan enough ports to ensure there are sufficient available ports
port_count = len(proxies) * 2
port_status = scan_ports_batch(start_port, port_count)
# Find all available ports
available_ports = [p for p, in_use in port_status.items() if not in_use]
# If available ports are insufficient, scan more ports
if len(available_ports) < len(proxies):
additional_ports = scan_ports_batch(start_port + port_count, port_count * 2)
available_ports.extend([p for p, in_use in additional_ports.items() if not in_use])
# Assign an available port to each proxy
for index, proxy in enumerate(proxies):
if index < len(available_ports):
port = available_ports[index]
else:
# If available ports are insufficient, use traditional method to find available ports
port = start_port + port_count + index
max_attempts = 1000
attempts = 0
while is_port_in_use(port) and attempts < max_attempts:
port += 1
attempts += 1
if attempts >= max_attempts:
logger.warning(
f"Could not find an available port for proxy {proxy['name']} after {max_attempts} attempts"
)
continue
listener = {
"name": f"http-{index}",
"type": "http",
"port": port,
"proxy": proxy["name"],
"listen": "127.0.0.1",
"users": [],
}
config["listeners"].append(listener)
records[proxy["name"]] = port
return config, records
|
Generate mihomo configuration for the given proxies
|
generate_mihomo_config
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def make_proxy_request(port: int, url: str, max_retries: int = 5, timeout: int = 10) -> tuple[bool, dict]:
"""
Make an HTTP request through a proxy and return the response
Args:
port: The port of the proxy
url: The URL to request
max_retries: Maximum number of retry attempts
timeout: Timeout for the request in seconds
Returns:
A tuple of (success, data) where:
- success: Whether the request was successful
- data: The parsed JSON data (empty dict if request failed)
"""
if not port:
logger.warning("No port provided for proxy")
return False, {}
# Configure the proxy for the request
proxy_url = f"http://127.0.0.1:{port}"
proxies_config = {"http": proxy_url, "https": proxy_url}
# Configure proxy handler
proxy_handler = urllib.request.ProxyHandler(proxies_config)
# Build opener with proxy handler
opener = urllib.request.build_opener(proxy_handler)
opener.addheaders = [
("User-Agent", utils.USER_AGENT),
("Accept", "application/json"),
("Connection", "close"),
]
# Try to get response with retry and backoff
attempt, success, data = 0, False, {}
while not success and attempt < max(max_retries, 1):
try:
# Random sleep to avoid being blocked by the API (increasing with each retry)
if attempt > 0:
wait_time = min(2**attempt * random.uniform(0.5, 1.5), 6)
time.sleep(wait_time)
# Make request
response = opener.open(url, timeout=timeout)
if response.getcode() == 200:
content = response.read().decode("utf-8")
data = json.loads(content)
success = True
except Exception as e:
logger.warning(f"Attempt {attempt+1} failed to request {url} through proxy port {port}: {str(e)}")
attempt += 1
return success, data
|
Make an HTTP request through a proxy and return the response
Args:
port: The port of the proxy
url: The URL to request
max_retries: Maximum number of retry attempts
timeout: Timeout for the request in seconds
Returns:
A tuple of (success, data) where:
- success: Whether the request was successful
- data: The parsed JSON data (empty dict if request failed)
|
make_proxy_request
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def get_ipv4(port: int, max_retries: int = 5) -> str:
"""
Get the IPv4 address by accessing https://api.ipify.org?format=json through a proxy
Args:
port: The port of the proxy
max_retries: Maximum number of retry attempts
Returns:
The IPv4 address or empty string if failed
"""
if not port:
logger.warning("No port provided for proxy")
return ""
success, data = make_proxy_request(port=port, url="https://api.ipify.org?format=json", max_retries=max_retries)
return data.get("ip", "") if success else ""
|
Get the IPv4 address by accessing https://api.ipify.org?format=json through a proxy
Args:
port: The port of the proxy
max_retries: Maximum number of retry attempts
Returns:
The IPv4 address or empty string if failed
|
get_ipv4
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def locate_by_ipinfo(name: str, port: int, reader: database.Reader = None) -> dict:
"""Check the location of a single proxy by making a request through it"""
result = {"name": name, "country": ""}
if not port:
logger.warning(f"No port found for proxy {name}")
return result
if reader:
# Get IP address through proxy
if ip := get_ipv4(port=port, max_retries=2):
country = query_ip_country(ip, reader)
if country:
result["country"] = country
return result
# Random sleep to avoid being blocked by the API
time.sleep(random.uniform(0.01, 0.5))
api_services = [
{"url": "https://ipinfo.io", "country_key": "country"},
{"url": "https://ipapi.co/json/", "country_key": "country_code"},
{"url": "https://ipwho.is", "country_key": "country_code"},
{"url": "https://freeipapi.com/api/json", "country_key": "countryCode"},
{"url": "https://api.country.is", "country_key": "country"},
{"url": "https://api.ip.sb/geoip", "country_key": "country_code"},
]
max_retries = 3
for attempt in range(max_retries):
service = random.choice(api_services)
# We're already handling retries in this loop
success, data = make_proxy_request(port=port, url=service["url"], max_retries=1, timeout=12)
if success:
# Extract country code from the response using the service-specific key
country_key = service["country_key"]
country_code = data.get(country_key, "")
if country_code:
# Convert ISO code to Chinese country name
result["country"] = ISO_TO_CHINESE.get(country_code, country_code)
break
# If request failed, wait before trying another service
if attempt < max_retries - 1:
wait_time = min(2**attempt * random.uniform(1, 2), 6)
logger.warning(
f"Attempt {attempt+1} failed for proxy {name} with {service['url']}, waiting {wait_time:.2f}s"
)
time.sleep(wait_time)
return result
|
Check the location of a single proxy by making a request through it
|
locate_by_ipinfo
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def get_messages(self, account: Account) -> list:
"""download a list of messages currently in the account."""
if not account or not self.auth_headers:
return []
content = utils.http_get(
url="{}/messages?page={}".format(self.api_address, 1),
headers=self.auth_headers,
retry=2,
)
messages = []
if not content:
return messages
try:
dataset = json.loads(content).get("hydra:member", [])
for message_data in dataset:
content = utils.http_get(
url=f"{self.api_address}/messages/{message_data['id']}",
headers=self.auth_headers,
)
if not content:
continue
data = json.loads(content)
text = data.get("text", "")
html = data.get("html", "")
messages.append(
Message(
id=message_data["id"],
sender=message_data["from"],
to=message_data["to"],
subject=message_data["subject"],
intro=message_data["intro"],
text=text,
html=html,
data=message_data,
)
)
except:
logger.error(f"failed to list messages, email: {self.address}")
return messages
|
download a list of messages currently in the account.
|
get_messages
|
python
|
wzdnzd/aggregator
|
subscribe/mailtm.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/mailtm.py
|
Apache-2.0
|
def delete_account(self, account: Account) -> bool:
"""try to delete the account. returns True if it succeeds."""
if account is None or not self.auth_headers:
return False
try:
request = urllib.request.Request(
url=f"{self.api_address}/accounts/{account.id}",
headers=self.auth_headers,
method="DELETE",
)
response = urllib.request.urlopen(request, timeout=10, context=utils.CTX)
status_code = response.getcode()
return status_code == 204
except Exception:
logger.info(f"[MailTMError] delete account failed, domain: {self.api_address}, address: {account.address}")
return False
|
try to delete the account. returns True if it succeeds.
|
delete_account
|
python
|
wzdnzd/aggregator
|
subscribe/mailtm.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/mailtm.py
|
Apache-2.0
|
def test_synthetic_arange_random_n_data():
"""Test if correct data quantity is generated by synthetic_arange_random."""
n_list = [10, 20]
for n in n_list:
y_pred, y_std, y_true, x = synthetic_arange_random(n)
assert len(y_pred) == n
assert len(y_std) == n
assert len(y_true) == n
assert len(x) == n
|
Test if correct data quantity is generated by synthetic_arange_random.
|
test_synthetic_arange_random_n_data
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_data.py
|
MIT
|
def test_synthetic_sine_heteroscedastic_n_data():
"""Test if correct data quantity is generated by synthetic_sine_heteroscedastic."""
n_list = [10, 20]
for n in n_list:
y_pred, y_std, y_true, x = synthetic_sine_heteroscedastic(n)
assert len(y_pred) == n
assert len(y_std) == n
assert len(y_true) == n
assert len(x) == n
|
Test if correct data quantity is generated by synthetic_sine_heteroscedastic.
|
test_synthetic_sine_heteroscedastic_n_data
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_data.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.