id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
161,587 | import sys
from collections import Counter
import numpy as np
def save_word_dict(dict_data, save_path):
with open(save_path, 'w', encoding='utf-8') as f:
for k, v in dict_data.items():
f.write(f'{k}\t{v}\n') | null |
161,588 | import sys
from collections import Counter
import numpy as np
def load_word_dict(save_path):
dict_data = dict()
with open(save_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip('\n')
items = line.split('\t')
dict_data[items[0]] = int(items[1])
return dict_data | null |
161,589 | import sys
from collections import Counter
import numpy as np
SOS_TOKEN = '<sos>'
EOS_TOKEN = '<eos>'
UNK_TOKEN = '<unk>'
PAD_TOKEN = '<pad>'
def read_vocab(input_texts, max_size=None, min_count=0):
token_counts = Counter()
special_tokens = [PAD_TOKEN, UNK_TOKEN, SOS_TOKEN, EOS_TOKEN]
for texts in input_texts:
for token in texts:
token_counts.update(token)
# Sort word count by value
count_pairs = token_counts.most_common()
vocab = [k for k, v in count_pairs if v >= min_count]
# Insert the special tokens to the beginning
vocab[0:0] = special_tokens
full_token_id = list(zip(vocab, range(len(vocab))))[:max_size]
vocab2id = dict(full_token_id)
return vocab2id | null |
161,590 | import sys
from collections import Counter
import numpy as np
def max_length(tensor):
return max(len(t) for t in tensor) | null |
161,591 | import sys
from collections import Counter
import numpy as np
def preprocess_sentence(sentence):
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
return [SOS_TOKEN] + list(sentence.lower()) + [EOS_TOKEN]
The provided code snippet includes necessary dependencies for implementing the `create_dataset` function. Write a Python function `def create_dataset(path, num_examples=None)` to solve the following problem:
# 1. Remove the accents # 2. Clean the sentences # 3. Return word pairs in the format: [ENGLISH, SPANISH] :param path: :param num_examples: :return:
Here is the function:
def create_dataset(path, num_examples=None):
"""
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
:param path:
:param num_examples:
:return:
"""
lines = open(path, 'r', encoding='utf-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(s) for s in l.split('\t')] for l in lines[:num_examples]]
return zip(*word_pairs) | # 1. Remove the accents # 2. Clean the sentences # 3. Return word pairs in the format: [ENGLISH, SPANISH] :param path: :param num_examples: :return: |
161,592 | import sys
from collections import Counter
import numpy as np
def show_progress(curr, total, time=""):
prog_ = int(round(100.0 * float(curr) / float(total)))
dstr = '[' + '>' * int(round(prog_ / 4)) + ' ' * (25 - int(round(prog_ / 4))) + ']'
sys.stdout.write(dstr + str(prog_) + '%' + time + '\r')
sys.stdout.flush() | null |
161,593 | import sys
from collections import Counter
import numpy as np
def get_minibatches(n, minibatch_size, shuffle=True):
idx_list = np.arange(0, n, minibatch_size) # [0, 1, ..., n-1]
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
for idx in idx_list:
minibatches.append(np.arange(idx, min(idx + minibatch_size, n)))
return minibatches
def prepare_data(seqs, max_length=None):
if max_length:
seqs = [seq[:max_length] for seq in seqs]
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples, max_len)).astype('int32')
x_lengths = np.array(lengths).astype("int32")
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
return x, x_lengths # x_mask
def gen_examples(src_sentences, trg_sentences, batch_size, max_length=None):
minibatches = get_minibatches(len(src_sentences), batch_size)
examples = []
for minibatch in minibatches:
mb_src_sentences = [src_sentences[t] for t in minibatch]
mb_trg_sentences = [trg_sentences[t] for t in minibatch]
mb_x, mb_x_len = prepare_data(mb_src_sentences, max_length)
mb_y, mb_y_len = prepare_data(mb_trg_sentences, max_length)
examples.append((mb_x, mb_x_len, mb_y, mb_y_len))
return examples | null |
161,594 | import sys
from collections import Counter
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `one_hot` function. Write a Python function `def one_hot(src_sentences, trg_sentences, src_dict, trg_dict, sort_by_len=True)` to solve the following problem:
vector the sequences.
Here is the function:
def one_hot(src_sentences, trg_sentences, src_dict, trg_dict, sort_by_len=True):
"""vector the sequences.
"""
out_src_sentences = [[src_dict.get(w, 0) for w in sent] for sent in src_sentences]
out_trg_sentences = [[trg_dict.get(w, 0) for w in sent] for sent in trg_sentences]
# sort sentences by english lengths
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
# sort length
if sort_by_len:
sorted_index = len_argsort(out_src_sentences)
out_src_sentences = [out_src_sentences[i] for i in sorted_index]
out_trg_sentences = [out_trg_sentences[i] for i in sorted_index]
return out_src_sentences, out_trg_sentences | vector the sequences. |
161,595 | import os
import pickle
from multiprocessing import Pool
from functools import partial
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers.models.bart.modeling_bart import shift_tokens_right
from datasets import Features, Sequence, Value, load_dataset
from datasets import Dataset as HFDataset
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
)
from loguru import logger
def preprocess_batch_for_hf_dataset(
dataset, encoder_tokenizer, decoder_tokenizer, args
):
if args.model_type == "bart":
input_ids = encoder_tokenizer.batch_encode_plus(
dataset["input_text"],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
target_ids = encoder_tokenizer.batch_encode_plus(
dataset["target_text"],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
return {
"source_ids": input_ids["input_ids"].squeeze(),
"source_mask": input_ids["attention_mask"].squeeze(),
"target_ids": target_ids["input_ids"].squeeze(),
}
elif args.model_type == "mbart":
tokenized_example = encoder_tokenizer.prepare_seq2seq_batch(
src_texts=dataset["input_text"],
tgt_texts=dataset["target_text"],
src_lang=args.src_lang,
tgt_lang=args.tgt_lang,
max_length=args.max_seq_length,
padding="max_length", # pad_to_max_length=True won't work in this case
return_tensors="np",
truncation=True,
)
decoder_input_ids = tokenized_example["labels"].clone()
decoder_input_ids = shift_tokens_right(
decoder_input_ids,
encoder_tokenizer.pad_token_id,
encoder_tokenizer.lang_code_to_id[args.tgt_lang],
)
labels = tokenized_example["labels"]
labels[labels == encoder_tokenizer.pad_token_id] = -100
return {
"input_ids": tokenized_example["input_ids"].squeeze(),
"attention_mask": tokenized_example["attention_mask"].squeeze(),
"decoder_input_ids": decoder_input_ids.squeeze(),
"labels": labels.squeeze(),
}
elif args.model_type in ["rag-token", "rag-sequence"]:
source_inputs = encoder_tokenizer(
dataset["input_text"],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
try:
target_inputs = encoder_tokenizer.generator(
dataset["target_text"],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
except (TypeError, ValueError) as e:
logger.warning(e)
logger.warning(
"""Error encountered while converting target_text.
All target_text values have been manually cast to String as a workaround.
This may have been caused by NaN values present in the data."""
)
dataset["target_text"] = [str(d) for d in dataset["target_text"]]
target_inputs = encoder_tokenizer.generator(
dataset["target_text"],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
source_ids = source_inputs["input_ids"].squeeze()
target_ids = target_inputs["input_ids"].squeeze()
src_mask = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
else:
source_inputs = encoder_tokenizer(
dataset["input_text"],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
target_inputs = decoder_tokenizer(
dataset["target_text"],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
source_ids = source_inputs["input_ids"].squeeze()
target_ids = target_inputs["input_ids"].squeeze()
src_mask = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
def load_hf_dataset(data, encoder_tokenizer, decoder_tokenizer, args):
if isinstance(data, str):
dataset = load_dataset(
"csv",
data_files=data,
delimiter="\t",
download_mode="force_redownload"
if args.reprocess_input_data
else "reuse_dataset_if_exists",
cache_dir=args.dataset_cache_dir,
)
else:
dataset = HFDataset.from_pandas(data)
dataset = dataset.map(
lambda x: preprocess_batch_for_hf_dataset(
x,
encoder_tokenizer=encoder_tokenizer,
decoder_tokenizer=decoder_tokenizer,
args=args,
),
batched=True,
)
if args.model_type == "bart":
column_names = [
"source_ids",
"source_mask",
"target_ids",
]
elif args.model_type == "mbart":
column_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"labels",
]
else:
column_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
]
dataset.set_format(type="pt", columns=column_names)
if isinstance(data, str):
# This is not necessarily a train dataset. The datasets library insists on calling it train.
return dataset["train"]
else:
return dataset | null |
161,596 | import os
import pickle
from multiprocessing import Pool
from functools import partial
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers.models.bart.modeling_bart import shift_tokens_right
from datasets import Features, Sequence, Value, load_dataset
from datasets import Dataset as HFDataset
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
)
from loguru import logger
def preprocess_data(data):
input_text, target_text, encoder_tokenizer, decoder_tokenizer, args = data
if args.model_type in ["rag-token", "rag-sequence"]:
source_inputs = encoder_tokenizer(
input_text,
max_length=args.max_seq_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
target_inputs = encoder_tokenizer.generator(
target_text,
max_length=args.max_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
source_ids = source_inputs["input_ids"].squeeze()
target_ids = target_inputs["input_ids"].squeeze()
src_mask = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
else:
input_text = encoder_tokenizer.encode(
input_text,
max_length=args.max_seq_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
target_text = decoder_tokenizer.encode(
target_text,
max_length=args.max_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
return (torch.flatten(input_text), torch.flatten(target_text)) | null |
161,597 | import os
import pickle
from multiprocessing import Pool
from functools import partial
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers.models.bart.modeling_bart import shift_tokens_right
from datasets import Features, Sequence, Value, load_dataset
from datasets import Dataset as HFDataset
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
)
from loguru import logger
def preprocess_data_bart(data):
input_text, target_text, tokenizer, args = data
input_ids = tokenizer.batch_encode_plus(
[input_text],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
target_ids = tokenizer.batch_encode_plus(
[target_text],
max_length=args.max_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
return {
"source_ids": input_ids["input_ids"].squeeze(),
"source_mask": input_ids["attention_mask"].squeeze(),
"target_ids": target_ids["input_ids"].squeeze(),
} | null |
161,598 | import os
import pickle
from multiprocessing import Pool
from functools import partial
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers.models.bart.modeling_bart import shift_tokens_right
from datasets import Features, Sequence, Value, load_dataset
from datasets import Dataset as HFDataset
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
)
from loguru import logger
def preprocess_data_mbart(data):
input_text, target_text, tokenizer, args = data
tokenized_example = tokenizer.prepare_seq2seq_batch(
src_texts=[input_text],
tgt_texts=[target_text],
src_lang=args.src_lang,
tgt_lang=args.tgt_lang,
max_length=args.max_seq_length,
max_target_length=args.max_length,
padding="max_length", # pad_to_max_length=True won't work in this case
return_tensors="pt",
truncation=True,
)
decoder_input_ids = tokenized_example["labels"].clone()
decoder_input_ids = shift_tokens_right(
decoder_input_ids,
tokenizer.pad_token_id,
tokenizer.lang_code_to_id[args.tgt_lang],
)
labels = tokenized_example["labels"]
labels[labels == tokenizer.pad_token_id] = -100
return {
"input_ids": tokenized_example["input_ids"].squeeze(),
"attention_mask": tokenized_example["attention_mask"].squeeze(),
"decoder_input_ids": decoder_input_ids.squeeze(),
"labels": labels.squeeze(),
} | null |
161,599 | import os
import pickle
from multiprocessing import Pool
from functools import partial
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers.models.bart.modeling_bart import shift_tokens_right
from datasets import Features, Sequence, Value, load_dataset
from datasets import Dataset as HFDataset
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
)
from loguru import logger
def split_documents(
documents, split_text_n=100, split_text_character=" ", include_title=True
):
"""Split documents into passages"""
titles, texts = [], []
if include_title:
for title, text in zip(documents["title"], documents["text"]):
if text is not None:
for passage in split_text(
text, n=split_text_n, character=split_text_character
):
titles.append(title if title is not None else "")
texts.append(passage)
else:
for text in documents["text"]:
if text is not None:
for passage in split_text(
text, n=split_text_n, character=split_text_character
):
titles.append("")
texts.append(passage)
return {"title": titles, "text": texts}
def embed(documents, ctx_encoder, ctx_tokenizer, device):
"""Compute the DPR embeddings of document passages"""
input_ids = ctx_tokenizer(
documents["title"],
documents["text"],
truncation=True,
padding="longest",
return_tensors="pt",
)["input_ids"]
embeddings = ctx_encoder(
input_ids.to(device=device), return_dict=True
).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
The provided code snippet includes necessary dependencies for implementing the `generate_faiss_index_dataset` function. Write a Python function `def generate_faiss_index_dataset(data, ctx_encoder_name, args, device)` to solve the following problem:
Adapted from Huggingface example script at https://github.com/huggingface/transformers/blob/master/examples/research_projects/rag/use_own_knowledge_dataset.py
Here is the function:
def generate_faiss_index_dataset(data, ctx_encoder_name, args, device):
"""
Adapted from Huggingface example script at https://github.com/huggingface/transformers/blob/master/examples/research_projects/rag/use_own_knowledge_dataset.py
"""
try:
import faiss
except ImportError:
raise ImportError("Please install Faiss to use this feature.")
if isinstance(data, str):
if args.include_title_in_knowledge_dataset:
dataset = load_dataset(
"csv",
data_files=data,
delimiter="\t",
column_names=["title", "text"],
cache_dir=args.dataset_cache_dir,
)
else:
dataset = load_dataset(
"csv",
data_files=data,
delimiter="\t",
column_names=["text"],
cache_dir=args.dataset_cache_dir,
)
else:
dataset = HFDataset.from_pandas(data)
dataset = dataset.map(
partial(
split_documents,
split_text_n=args.split_text_n,
split_text_character=args.split_text_character,
include_title=args.include_title_in_knowledge_dataset,
),
batched=True,
num_proc=args.process_count,
)
ctx_encoder = DPRContextEncoder.from_pretrained(ctx_encoder_name).to(device=device)
ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(ctx_encoder_name)
new_features = Features(
{
"text": Value("string"),
"title": Value("string"),
"embeddings": Sequence(Value("float32")),
}
) # optional, save as float32 instead of float64 to save space
dataset = dataset.map(
partial(
embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer, device=device
),
batched=True,
batch_size=args.rag_embed_batch_size,
features=new_features,
)
if isinstance(data, str):
dataset = dataset["train"]
if args.save_knowledge_dataset:
output_dataset_directory = os.path.join(args.output_dir, "knowledge_dataset")
os.makedirs(output_dataset_directory, exist_ok=True)
dataset.save_to_disk(output_dataset_directory)
index = faiss.IndexHNSWFlat(args.faiss_d, args.faiss_m, faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings", custom_index=index)
return dataset | Adapted from Huggingface example script at https://github.com/huggingface/transformers/blob/master/examples/research_projects/rag/use_own_knowledge_dataset.py |
161,600 | import os
import pickle
from multiprocessing import Pool
from functools import partial
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers.models.bart.modeling_bart import shift_tokens_right
from datasets import Features, Sequence, Value, load_dataset
from datasets import Dataset as HFDataset
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
)
from loguru import logger
def add_faiss_index_to_dataset(dataset):
try:
import faiss
except ImportError:
raise ImportError("Please install Faiss to use this feature.")
index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings", custom_index=index)
return dataset | null |
161,601 | import collections
import copy
import math
import numpy as np
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `get_data_idf` function. Write a Python function `def get_data_idf(tokenized_sentence_list)` to solve the following problem:
Compute the IDF score for each word. Then compute the TF-IDF score.
Here is the function:
def get_data_idf(tokenized_sentence_list):
"""Compute the IDF score for each word. Then compute the TF-IDF score."""
word_doc_freq = collections.defaultdict(int)
# Compute IDF
for cur_sent in tokenized_sentence_list:
cur_word_dict = {}
for word in cur_sent:
cur_word_dict[word] = 1
for word in cur_word_dict:
word_doc_freq[word] += 1
idf = {}
for word in word_doc_freq:
idf[word] = math.log(len(tokenized_sentence_list) * 1. / word_doc_freq[word])
# Compute TF-IDF
tf_idf = {}
for cur_sent in tokenized_sentence_list:
for word in cur_sent:
if word not in tf_idf:
tf_idf[word] = 0
tf_idf[word] += 1. / len(cur_sent) * idf[word]
return {
"idf": idf,
"tf_idf": tf_idf,
} | Compute the IDF score for each word. Then compute the TF-IDF score. |
161,602 | import jieba
import re
jieba.setLogLevel(log_level="ERROR")
def split_2_short_text(text, include_symbol=True):
"""
长句切分为短句
:param text: str
:param include_symbol: bool
:return: (sentence, idx)
"""
re_han = re.compile("([\u4E00-\u9F5a-zA-Z0-9+#&]+)", re.U)
result = []
blocks = re_han.split(text)
start_idx = 0
for blk in blocks:
if not blk:
continue
if include_symbol:
result.append((blk, start_idx))
else:
if re_han.match(blk):
result.append((blk, start_idx))
start_idx += len(blk)
return result
def is_chinese_string(string):
"""判断是否全为汉字"""
return all(is_chinese(c) for c in string)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
The provided code snippet includes necessary dependencies for implementing the `tokenize_words` function. Write a Python function `def tokenize_words(text)` to solve the following problem:
Word segmentation
Here is the function:
def tokenize_words(text):
"""Word segmentation"""
output = []
sentences = split_2_short_text(text, include_symbol=True)
for sentence, idx in sentences:
if is_chinese_string(sentence):
output.extend(jieba.lcut(sentence))
else:
output.extend(whitespace_tokenize(sentence))
return output | Word segmentation |
161,603 | import collections
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `convert_tokens_to_ids` function. Write a Python function `def convert_tokens_to_ids(vocab, tokens)` to solve the following problem:
Converts a sequence of tokens into ids using the vocab.
Here is the function:
def convert_tokens_to_ids(vocab, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(vocab[token])
return ids | Converts a sequence of tokens into ids using the vocab. |
161,604 | import collections
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `convert_ids_to_tokens` function. Write a Python function `def convert_ids_to_tokens(inv_vocab, ids)` to solve the following problem:
Converts a sequence of ids into tokens using the vocab.
Here is the function:
def convert_ids_to_tokens(inv_vocab, ids):
"""Converts a sequence of ids into tokens using the vocab."""
output = []
for item in ids:
output.append(inv_vocab[item])
return output | Converts a sequence of ids into tokens using the vocab. |
161,605 | import collections
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, 'r', encoding='utf-8') as f:
for token in f:
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
161,606 | import collections
from loguru import logger
def build_vocab(word_list, vocab_file=None):
vocab = {}
for word in word_list:
if word not in vocab:
vocab[word] = len(vocab)
if vocab_file:
with open(vocab_file, 'w', encoding='utf-8') as f:
for w, idx in vocab.items():
f.write(w + '\n')
logger.info('save vocab to %s' % vocab_file)
return vocab | null |
161,607 | import math
from textgen.augment import translate_api
from loguru import logger
def replace_with_length_check(
ori_text,
new_text,
use_min_length=10,
use_max_length_diff_ratio=0.5):
"""Use new_text if the text length satisfies several constraints."""
if len(ori_text) < use_min_length or len(new_text) < use_min_length:
logger.debug("not replacing due to short text: ori: {} new: {}".format(ori_text, new_text))
return ori_text
length_diff_ratio = 1.0 * (len(new_text) - len(ori_text)) / len(ori_text)
if math.fabs(length_diff_ratio) > use_max_length_diff_ratio:
logger.debug("not replacing due to too different text length: ori: {} new: {}".format(ori_text, new_text))
return ori_text
return new_text
The provided code snippet includes necessary dependencies for implementing the `back_translation` function. Write a Python function `def back_translation(sentence, from_lang='zh', use_min_length=10, use_max_length_diff_ratio=0.5)` to solve the following problem:
Run back translation with prob :param sentence: :param from_lang: :param use_min_length: :param use_max_length_diff_ratio: :return:
Here is the function:
def back_translation(sentence, from_lang='zh', use_min_length=10, use_max_length_diff_ratio=0.5):
"""
Run back translation with prob
:param sentence:
:param from_lang:
:param use_min_length:
:param use_max_length_diff_ratio:
:return:
"""
bt_result = translate_api.back_translate(sentence, from_lang=from_lang)
if bt_result:
bt_text = bt_result[0][0]
sentence = replace_with_length_check(
sentence,
bt_text,
use_min_length,
use_max_length_diff_ratio)
return sentence | Run back translation with prob :param sentence: :param from_lang: :param use_min_length: :param use_max_length_diff_ratio: :return: |
161,608 | import os
import pickle
from multiprocessing import Pool
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from tqdm.auto import tqdm
def preprocess_batch_for_hf_dataset(dataset, tokenizer, args):
def load_hf_dataset(data, tokenizer, args):
if isinstance(data, str):
dataset = load_dataset(
"csv",
data_files=data,
delimiter="\t",
download_mode="force_redownload"
if args.reprocess_input_data
else "reuse_dataset_if_exists",
)
else:
dataset = HFDataset.from_pandas(data)
dataset = dataset.map(
lambda x: preprocess_batch_for_hf_dataset(x, tokenizer=tokenizer, args=args),
batched=True,
)
dataset.set_format(type="pt", columns=["input_ids", "attention_mask"])
if isinstance(data, str):
# This is not necessarily a train dataset. The datasets library insists on calling it train.
return dataset["train"]
else:
return dataset | null |
161,609 | import os
import pickle
from multiprocessing import Pool
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from tqdm.auto import tqdm
def preprocess_data(data):
prefix, input_text, target_text, tokenizer, args = data
# Add EOS again if truncated?
if args.preprocess_inputs:
batch = tokenizer.prepare_seq2seq_batch(
src_texts=[prefix + ": " + input_text],
tgt_texts=[target_text],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
else:
batch = tokenizer.prepare_seq2seq_batch(
src_texts=[prefix + ": " + input_text],
tgt_texts=[target_text],
max_length=args.max_seq_length,
padding="max_length",
return_tensors="pt",
truncation=True,
)
input_ids = batch["input_ids"][0]
attention_mask = batch["attention_mask"][0]
labels = batch["labels"][0]
return (input_ids, attention_mask, labels) | null |
161,610 | import math
import os
import random
import warnings
from dataclasses import asdict
from multiprocessing import Pool
import numpy as np
import pandas as pd
import torch
from loguru import logger
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm.auto import tqdm, trange
from transformers import ByT5Tokenizer
from transformers import MT5Config, MT5ForConditionalGeneration
from transformers import T5Config, T5ForConditionalGeneration, T5Tokenizer
from transformers.optimization import AdamW, Adafactor
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from textgen.config.model_args import T5Args
from textgen.t5.t5_utils import T5Dataset, load_hf_dataset
The provided code snippet includes necessary dependencies for implementing the `chunks` function. Write a Python function `def chunks(lst, n)` to solve the following problem:
Yield successive n-sized chunks from lst.
Here is the function:
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i: i + n] | Yield successive n-sized chunks from lst. |
161,611 | import os
import pickle
from multiprocessing import Pool
import jieba
import torch.nn.functional as F
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import BertTokenizer
def mask_select(inputs, mask):
input_dim = inputs.ndim
mask_dim = mask.ndim
mask = mask.reshape(-1).bool()
if input_dim > mask_dim:
inputs = inputs.reshape((int(mask.size(-1)), -1))[mask]
else:
inputs = inputs.reshape(-1)[mask]
return inputs
def copy_loss(inputs, targets, mask, eps=1e-6):
mask = mask[:, 1:]
inputs = inputs[:, :-1]
targets = targets[:, 1:]
inputs = mask_select(inputs, mask)
targets = mask_select(targets, mask)
log_preds = (inputs + eps).log()
loss = F.nll_loss(log_preds, targets)
return loss | null |
161,612 | import os
import pickle
from multiprocessing import Pool
import jieba
import torch.nn.functional as F
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import BertTokenizer
def mask_select(inputs, mask):
input_dim = inputs.ndim
mask_dim = mask.ndim
mask = mask.reshape(-1).bool()
if input_dim > mask_dim:
inputs = inputs.reshape((int(mask.size(-1)), -1))[mask]
else:
inputs = inputs.reshape(-1)[mask]
return inputs
def ce_loss(inputs, targets, mask):
mask = mask[:, 1:]
inputs = inputs[:, :-1]
targets = targets[:, 1:]
inputs = mask_select(inputs, mask)
targets = mask_select(targets, mask)
loss = F.cross_entropy(inputs, targets)
return loss | null |
161,613 | import os
import pickle
from multiprocessing import Pool
import jieba
import torch.nn.functional as F
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import BertTokenizer
def preprocess_batch_for_hf_dataset(dataset, tokenizer, args):
if args.preprocess_inputs:
return tokenizer.prepare_seq2seq_batch(
src_texts=[
prefix + ": " + input_text
for prefix, input_text in zip(dataset["prefix"], dataset["input_text"])
],
tgt_texts=dataset["target_text"],
max_length=args.max_seq_length,
max_target_length=args.max_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
else:
return tokenizer.prepare_seq2seq_batch(
src_texts=[
prefix + input_text
for prefix, input_text in zip(dataset["prefix"], dataset["input_text"])
],
tgt_texts=dataset["target_text"],
max_length=args.max_seq_length,
max_target_length=args.max_length,
padding="max_length",
return_tensors="np",
truncation=True,
)
def load_hf_dataset(data, tokenizer, args):
if isinstance(data, str):
dataset = load_dataset(
"csv",
data_files=data,
delimiter="\t",
download_mode="force_redownload"
if args.reprocess_input_data
else "reuse_dataset_if_exists",
)
else:
dataset = HFDataset.from_pandas(data)
dataset = dataset.map(
lambda x: preprocess_batch_for_hf_dataset(x, tokenizer=tokenizer, args=args),
batched=True,
)
dataset.set_format(type="pt", columns=["input_ids", "attention_mask"])
if isinstance(data, str):
# This is not necessarily a train dataset. The datasets library insists on calling it train.
return dataset["train"]
else:
return dataset | null |
161,614 | import os
import pickle
from multiprocessing import Pool
import jieba
import torch.nn.functional as F
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import BertTokenizer
def preprocess_data(data):
prefix, input_text, target_text, tokenizer, args = data
src = tokenizer(
[prefix + ": " + input_text],
padding="max_length",
return_tensors='pt',
max_length=args.max_seq_length,
truncation=True,
return_attention_mask=True,
return_token_type_ids=False,
)
input_ids = src["input_ids"][0]
attention_mask = src["attention_mask"][0]
with tokenizer.as_target_tokenizer():
tgt = tokenizer(
[target_text],
padding="max_length",
return_tensors='pt',
max_length=args.max_seq_length,
truncation=True,
return_attention_mask=True,
return_token_type_ids=False,
)
labels = tgt["input_ids"][0]
decoder_attention_mask = tgt["attention_mask"][0]
decoder_input_ids = tgt['input_ids'][0]
return input_ids, attention_mask, labels, decoder_attention_mask, decoder_input_ids | null |
161,615 | import json
import os
import sys
from dataclasses import asdict, dataclass, field
from multiprocessing import cpu_count
from typing import Optional
from loguru import logger
from torch.utils.data import Dataset
def get_default_process_count():
process_count = cpu_count() - 2 if cpu_count() > 2 else 1
if sys.platform == "win32":
process_count = min(process_count, 61)
return process_count | null |
161,616 | import json
import os
import sys
from dataclasses import asdict, dataclass, field
from multiprocessing import cpu_count
from typing import Optional
from loguru import logger
from torch.utils.data import Dataset
def get_special_tokens():
return ["<s>", "<pad>", "</s>", "<unk>", "<mask>"] | null |
161,617 | import os
import sys
import random
import numpy as np
import torch
import shutil
import tarfile
import zipfile
import six
import requests
from tqdm.autonotebook import tqdm
ists2tensor(xs, tokenizer=None):
max_len = max(len(x) for x in xs)
ys = []
for x in xs:
if tokenizer is not None:
y = tokenizer.token2idx(x) + [tokenizer.padding_idx] * (max_len - len(x))
else:
y = x + [0] * (max_len - len(x))
ys.append(y)
return ys
from typing import Dict, Optional, Union, List
from pathlib import Path
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub import HfApi, hf_hub_url, cached_download
import fnmatch
def s2t(strs, tokenizer):
inp, msk = [], []
for x in strs:
inp.append(x)
msk.append([1 for i in range(len(x))])
inp = torch.LongTensor(lists2tensor(inp, tokenizer)).t_().contiguous()
msk = torch.FloatTensor(lists2tensor(msk)).t_().contiguous()
return inp, msk | null |
161,618 | import os
import sys
import random
import numpy as np
import torch
import shutil
import tarfile
import zipfile
import six
import requests
from tqdm.autonotebook import tqdm
batchify(data, tokenizer):
xs_tpl, xs_seg, xs_pos, \
ys_truth, ys_inp, \
ys_tpl, ys_seg, ys_pos, msk = [], [], [], [], [], [], [], [], []
# logger.debug(f"data:{data}, first: {data[0]}")
for xs_tpl_i, xs_seg_i, xs_pos_i, ys_i, ys_tpl_i, ys_seg_i, ys_pos_i in data:
# logger.debug(f"first is xs_tpl_i: {xs_tpl_i}")
xs_tpl.append(xs_tpl_i)
xs_seg.append(xs_seg_i)
xs_pos.append(xs_pos_i)
ys_truth.append(ys_i)
ys_inp.append([BOS] + ys_i[:-1])
ys_tpl.append(ys_tpl_i)
ys_seg.append(ys_seg_i)
ys_pos.append(ys_pos_i)
msk.append([1 for i in range(len(ys_i))])
xs_tpl = torch.LongTensor(lists2tensor(xs_tpl, tokenizer)).t_().contiguous()
xs_seg = torch.LongTensor(lists2tensor(xs_seg, tokenizer)).t_().contiguous()
xs_pos = torch.LongTensor(lists2tensor(xs_pos, tokenizer)).t_().contiguous()
ys_truth = torch.LongTensor(lists2tensor(ys_truth, tokenizer)).t_().contiguous()
ys_inp = torch.LongTensor(lists2tensor(ys_inp, tokenizer)).t_().contiguous()
ys_tpl = torch.LongTensor(lists2tensor(ys_tpl, tokenizer)).t_().contiguous()
ys_seg = torch.LongTensor(lists2tensor(ys_seg, tokenizer)).t_().contiguous()
ys_pos = torch.LongTensor(lists2tensor(ys_pos, tokenizer)).t_().contiguous()
msk = torch.FloatTensor(lists2tensor(msk)).t_().contiguous()
return xs_tpl, xs_seg, xs_pos, ys_truth, ys_inp, ys_tpl, ys_seg, ys_pos, msk
def parse_line(line, max_len, min_len=2):
line = line.strip()
if not line:
return None
fs = line.split("<s2>")
author, cipai = fs[0].split("<s1>")
sents = fs[1].strip()
if len(sents) > max_len:
sents = sents[:max_len]
if len(sents) < min_len:
return None
sents = sents.split("</s>")
ys = []
xs_tpl = []
xs_seg = []
xs_pos = []
ctx = cipai
ws = [w for w in ctx]
xs_tpl = ws + [EOC]
xs_seg = [SS[0] for w in ws] + [EOC]
xs_pos = [SS[i + 300] for i in range(len(ws))] + [EOC]
ys_tpl = []
ys_seg = []
ys_pos = []
for si, sent in enumerate(sents):
ws = []
sent = sent.strip()
if not sent:
continue
for w in sent:
ws.append(w)
if w.strip() and w not in PUNCS:
ys_tpl.append(CS[2])
else:
ys_tpl.append(CS[1])
ys += ws + [RS]
if ws[-1] in PUNCS:
ys_tpl[-2] = CS[3]
else:
ys_tpl[-1] = CS[3]
ys_tpl += [RS]
ys_seg += [SS[si + 1] for w in ws] + [RS]
ys_pos += [PS[len(ws) - i] for i in range(len(ws))] + [RS]
ys += [EOS]
ys_tpl += [EOS]
ys_seg += [EOS]
ys_pos += [EOS]
xs_tpl += ys_tpl
xs_seg += ys_seg
xs_pos += ys_pos
if len(ys) < min_len:
return None
return xs_tpl, xs_seg, xs_pos, ys, ys_tpl, ys_seg, ys_pos
from typing import Dict, Optional, Union, List
from pathlib import Path
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub import HfApi, hf_hub_url, cached_download
import fnmatch
def s2xy(lines, tokenizer, max_len, min_len):
data = []
for line in lines:
res = parse_line(line, max_len, min_len)
if not res:
continue
data.append(res)
return batchify(data, tokenizer) | null |
161,619 | import os
import sys
import random
import numpy as np
import torch
import shutil
import tarfile
import zipfile
import six
import requests
from tqdm.autonotebook import tqdm
batchify(data, tokenizer):
xs_tpl, xs_seg, xs_pos, \
ys_truth, ys_inp, \
ys_tpl, ys_seg, ys_pos, msk = [], [], [], [], [], [], [], [], []
# logger.debug(f"data:{data}, first: {data[0]}")
for xs_tpl_i, xs_seg_i, xs_pos_i, ys_i, ys_tpl_i, ys_seg_i, ys_pos_i in data:
# logger.debug(f"first is xs_tpl_i: {xs_tpl_i}")
xs_tpl.append(xs_tpl_i)
xs_seg.append(xs_seg_i)
xs_pos.append(xs_pos_i)
ys_truth.append(ys_i)
ys_inp.append([BOS] + ys_i[:-1])
ys_tpl.append(ys_tpl_i)
ys_seg.append(ys_seg_i)
ys_pos.append(ys_pos_i)
msk.append([1 for i in range(len(ys_i))])
xs_tpl = torch.LongTensor(lists2tensor(xs_tpl, tokenizer)).t_().contiguous()
xs_seg = torch.LongTensor(lists2tensor(xs_seg, tokenizer)).t_().contiguous()
xs_pos = torch.LongTensor(lists2tensor(xs_pos, tokenizer)).t_().contiguous()
ys_truth = torch.LongTensor(lists2tensor(ys_truth, tokenizer)).t_().contiguous()
ys_inp = torch.LongTensor(lists2tensor(ys_inp, tokenizer)).t_().contiguous()
ys_tpl = torch.LongTensor(lists2tensor(ys_tpl, tokenizer)).t_().contiguous()
ys_seg = torch.LongTensor(lists2tensor(ys_seg, tokenizer)).t_().contiguous()
ys_pos = torch.LongTensor(lists2tensor(ys_pos, tokenizer)).t_().contiguous()
msk = torch.FloatTensor(lists2tensor(msk)).t_().contiguous()
return xs_tpl, xs_seg, xs_pos, ys_truth, ys_inp, ys_tpl, ys_seg, ys_pos, msk
def parse_line_polish(line, max_len, min_len):
line = line.strip()
if not line:
return None
fs = line.split("<s2>")
author, cipai = fs[0].split("<s1>")
sents = fs[1].strip()
if len(sents) > max_len:
sents = sents[:max_len]
if len(sents) < min_len:
return None
sents = sents.split("</s>")
ys = []
xs_tpl = []
xs_seg = []
xs_pos = []
ctx = cipai
ws = [w for w in ctx]
xs_tpl = ws + [EOC]
xs_seg = [SS[0] for w in ws] + [EOC]
xs_pos = [SS[i + 300] for i in range(len(ws))] + [EOC]
ys_tpl = []
ys_seg = []
ys_pos = []
for si, sent in enumerate(sents):
ws = []
sent = sent.strip()
if not sent:
continue
for w in sent:
ws.append(w)
if w == "_":
ys_tpl.append(CS[2])
else:
ys_tpl.append(w)
ys += ws + [RS]
ys_tpl += [RS]
ys_seg += [SS[si + 1] for w in ws] + [RS]
ys_pos += [PS[len(ws) - i] for i in range(len(ws))] + [RS]
ys += [EOS]
ys_tpl += [EOS]
ys_seg += [EOS]
ys_pos += [EOS]
xs_tpl += ys_tpl
xs_seg += ys_seg
xs_pos += ys_pos
if len(ys) < min_len:
return None
return xs_tpl, xs_seg, xs_pos, ys, ys_tpl, ys_seg, ys_pos
from typing import Dict, Optional, Union, List
from pathlib import Path
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub import HfApi, hf_hub_url, cached_download
import fnmatch
def s2xy_polish(lines, tokenizer, max_len, min_len=2):
data = []
for line in lines:
res = parse_line_polish(line, max_len, min_len)
data.append(res)
return batchify(data, tokenizer) | null |
161,620 | import os
import sys
import random
import numpy as np
import torch
import shutil
import tarfile
import zipfile
import six
import requests
from tqdm.autonotebook import tqdm
CS = ['<c-1>'] + ['<c' + str(i) + '>' for i in range(32)]
SS = ['<s-1>'] + ['<s' + str(i) + '>' for i in range(512)]
PS = ['<p-1>'] + ['<p' + str(i) + '>' for i in range(512)]
PUNCS = {",", ".", "?", "!", ":", ",", "。", "?", "!", ":"}
from typing import Dict, Optional, Union, List
from pathlib import Path
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub import HfApi, hf_hub_url, cached_download
import fnmatch
The provided code snippet includes necessary dependencies for implementing the `preprocess_data` function. Write a Python function `def preprocess_data(line, max_length, min_length)` to solve the following problem:
Convert line text to idx
Here is the function:
def preprocess_data(line, max_length, min_length):
"""Convert line text to idx"""
fs = line.split("<s2>", 1)
author, cipai = fs[0].split("<s1>", 1)
sents = fs[1].strip()
if len(sents) > max_length:
sents = sents[:max_length]
if len(sents) < min_length:
return None
sents = sents.split("</s>")
ys = []
xs_tpl = []
xs_seg = []
xs_pos = []
ctx = cipai
ws = [w for w in ctx]
xs_tpl = ws + [EOC]
xs_seg = [SS[0] for w in ws] + [EOC]
xs_pos = [SS[i + 300] for i in range(len(ws))] + [EOC]
ys_tpl = []
ys_seg = []
ys_pos = []
for si, sent in enumerate(sents):
ws = []
sent = sent.strip()
if not sent:
continue
for w in sent:
ws.append(w)
if w.strip() and w not in PUNCS:
ys_tpl.append(CS[2])
else:
ys_tpl.append(CS[1])
ys += ws + [RS]
if ws[-1] in PUNCS:
ys_tpl[-2] = CS[3]
else:
ys_tpl[-1] = CS[3]
ys_tpl += [RS]
ys_seg += [SS[si + 1] for w in ws] + [RS]
ys_pos += [PS[len(ws) - i] for i in range(len(ws))] + [RS]
ys += [EOS]
ys_tpl += [EOS]
ys_seg += [EOS]
ys_pos += [EOS]
xs_tpl += ys_tpl
xs_seg += ys_seg
xs_pos += ys_pos
if len(ys) < min_length:
return None
return xs_tpl, xs_seg, xs_pos, ys, ys_tpl, ys_seg, ys_pos | Convert line text to idx |
161,621 | import os
import sys
import random
import numpy as np
import torch
import shutil
import tarfile
import zipfile
import six
import requests
from tqdm.autonotebook import tqdm
def _extract_archive(file_path, path='.', archive_format='auto'):
"""
Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
:param file_path: path to the archive file
:param path: path to extract the archive file
:param archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
:return: True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
from typing import Dict, Optional, Union, List
from pathlib import Path
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub import HfApi, hf_hub_url, cached_download
import fnmatch
The provided code snippet includes necessary dependencies for implementing the `http_get` function. Write a Python function `def http_get(url, path, extract: bool = True)` to solve the following problem:
Downloads a URL to a given path on disc
Here is the function:
def http_get(url, path, extract: bool = True):
"""
Downloads a URL to a given path on disc
"""
if os.path.dirname(path) != '':
os.makedirs(os.path.dirname(path), exist_ok=True)
req = requests.get(url, stream=True)
if req.status_code != 200:
print("Exception when trying to download {}. Response {}".format(url, req.status_code), file=sys.stderr)
req.raise_for_status()
return
download_filepath = path + "_part"
with open(download_filepath, "wb") as file_binary:
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total, unit_scale=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
file_binary.write(chunk)
os.rename(download_filepath, path)
progress.close()
if extract:
data_dir = os.path.dirname(os.path.abspath(path))
_extract_archive(path, data_dir, 'auto') | Downloads a URL to a given path on disc |
161,622 | import os
import sys
import random
import numpy as np
import torch
import shutil
import tarfile
import zipfile
import six
import requests
from tqdm.autonotebook import tqdm
from typing import Dict, Optional, Union, List
from pathlib import Path
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub import HfApi, hf_hub_url, cached_download
import fnmatch
The provided code snippet includes necessary dependencies for implementing the `snapshot_download` function. Write a Python function `def snapshot_download( repo_id: str, revision: Optional[str] = None, cache_dir: Union[str, Path, None] = None, library_name: Optional[str] = None, library_version: Optional[str] = None, user_agent: Union[Dict, str, None] = None, ignore_files: Optional[List[str]] = None, use_auth_token: Union[bool, str, None] = None ) -> str` to solve the following problem:
Method derived from huggingface_hub. Adds a new parameters 'ignore_files', which allows to ignore certain files / file-patterns
Here is the function:
def snapshot_download(
repo_id: str,
revision: Optional[str] = None,
cache_dir: Union[str, Path, None] = None,
library_name: Optional[str] = None,
library_version: Optional[str] = None,
user_agent: Union[Dict, str, None] = None,
ignore_files: Optional[List[str]] = None,
use_auth_token: Union[bool, str, None] = None
) -> str:
"""
Method derived from huggingface_hub.
Adds a new parameters 'ignore_files', which allows to ignore certain files / file-patterns
"""
if cache_dir is None:
cache_dir = HUGGINGFACE_HUB_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
_api = HfApi()
model_info = _api.model_info(repo_id=repo_id, revision=revision)
storage_folder = os.path.join(
cache_dir, repo_id.replace("/", "_")
)
for model_file in model_info.siblings:
if ignore_files is not None:
skip_download = False
for pattern in ignore_files:
if fnmatch.fnmatch(model_file.rfilename, pattern):
skip_download = True
break
if skip_download:
continue
url = hf_hub_url(
repo_id, filename=model_file.rfilename, revision=model_info.sha
)
relative_filepath = os.path.join(*model_file.rfilename.split("/"))
# Create potential nested dir
nested_dirname = os.path.dirname(
os.path.join(storage_folder, relative_filepath)
)
os.makedirs(nested_dirname, exist_ok=True)
path = cached_download(
url,
cache_dir=storage_folder,
force_filename=relative_filepath,
library_name=library_name,
library_version=library_version,
user_agent=user_agent,
use_auth_token=use_auth_token,
)
if os.path.exists(path + ".lock"):
os.remove(path + ".lock")
return storage_folder | Method derived from huggingface_hub. Adds a new parameters 'ignore_files', which allows to ignore certain files / file-patterns |
161,623 | import math
import os
import random
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from loguru import logger
from torch import nn
from tqdm.auto import tqdm, trange
from textgen.config.model_args import SongNetArgs
from textgen.language_modeling.songnet_utils import (
SongNetTokenizer, s2t, s2xy, s2xy_polish,
SongNetDataLoader,
BOS,
EOS,
Optim,
snapshot_download,
)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_guyu_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._guyu_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._guyu_instance_id, key)
The provided code snippet includes necessary dependencies for implementing the `get_incremental_state` function. Write a Python function `def get_incremental_state(module, incremental_state, key)` to solve the following problem:
Helper for getting incremental state for an nn.Module.
Here is the function:
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key] | Helper for getting incremental state for an nn.Module. |
161,624 | import math
import os
import random
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from loguru import logger
from torch import nn
from tqdm.auto import tqdm, trange
from textgen.config.model_args import SongNetArgs
from textgen.language_modeling.songnet_utils import (
SongNetTokenizer, s2t, s2xy, s2xy_polish,
SongNetDataLoader,
BOS,
EOS,
Optim,
snapshot_download,
)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_guyu_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._guyu_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._guyu_instance_id, key)
The provided code snippet includes necessary dependencies for implementing the `set_incremental_state` function. Write a Python function `def set_incremental_state(module, incremental_state, key, value)` to solve the following problem:
Helper for setting incremental state for an nn.Module.
Here is the function:
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value | Helper for setting incremental state for an nn.Module. |
161,625 | import math
import os
import random
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from loguru import logger
from torch import nn
from tqdm.auto import tqdm, trange
from textgen.config.model_args import SongNetArgs
from textgen.language_modeling.songnet_utils import (
SongNetTokenizer, s2t, s2xy, s2xy_polish,
SongNetDataLoader,
BOS,
EOS,
Optim,
snapshot_download,
)
def gelu(x):
cdf = 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
return cdf * x | null |
161,626 | import math
import os
import random
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from loguru import logger
from torch import nn
from tqdm.auto import tqdm, trange
from textgen.config.model_args import SongNetArgs
from textgen.language_modeling.songnet_utils import (
SongNetTokenizer, s2t, s2xy, s2xy_polish,
SongNetDataLoader,
BOS,
EOS,
Optim,
snapshot_download,
)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, std=0.02)
nn.init.constant_(m.weight[padding_idx], 0)
return m | null |
161,627 | import os
import pickle
from multiprocessing import Pool
from typing import Tuple
import torch
from datasets import load_dataset
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import PreTrainedTokenizer
from loguru import logger
def encode(data):
tokenizer, line = data
return tokenizer.encode(line) | null |
161,628 | import os
import pickle
from multiprocessing import Pool
from typing import Tuple
import torch
from datasets import load_dataset
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import PreTrainedTokenizer
from loguru import logger
def encode_sliding_window(data):
tokenizer, line, max_seq_length, special_tokens_count, stride, no_padding = data
tokens = tokenizer.tokenize(line)
stride = int(max_seq_length * stride)
token_sets = []
if len(tokens) > max_seq_length - special_tokens_count:
token_sets = [tokens[i: i + max_seq_length - special_tokens_count] for i in range(0, len(tokens), stride)]
else:
token_sets.append(tokens)
features = []
if not no_padding:
sep_token = tokenizer.sep_token_id
cls_token = tokenizer.cls_token_id
pad_token = tokenizer.pad_token_id
for tokens in token_sets:
tokens = [cls_token] + tokens + [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
padding_length = max_seq_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
assert len(input_ids) == max_seq_length
features.append(input_ids)
else:
for tokens in token_sets:
input_ids = tokenizer.convert_tokens_to_ids(tokens)
features.append(input_ids)
return features | null |
161,629 | import os
import pickle
from multiprocessing import Pool
from typing import Tuple
import torch
from datasets import load_dataset
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import PreTrainedTokenizer
from loguru import logger
def preprocess_batch_for_hf_dataset(dataset, tokenizer, max_seq_length):
return tokenizer(text=dataset["text"], truncation=True, padding="max_length", max_length=max_seq_length, )
def load_hf_dataset(data, tokenizer, args):
dataset = load_dataset("text", data_files=data)
dataset = dataset.map(
lambda x: preprocess_batch_for_hf_dataset(x, tokenizer=tokenizer, max_seq_length=args.max_seq_length),
batched=True,
)
dataset.set_format(type="pt", columns=["input_ids"])
if isinstance(data, str):
# This is not necessarily a train dataset. The datasets library insists on calling it train.
return dataset["train"]
else:
return dataset | null |
161,630 | import os
import pickle
from multiprocessing import Pool
from typing import Tuple
import torch
from datasets import load_dataset
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import PreTrainedTokenizer
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `mask_tokens` function. Write a Python function `def mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[torch.Tensor, torch.Tensor]` to solve the following problem:
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
Here is the function:
def mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[torch.Tensor, torch.Tensor]:
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
if tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling."
"Set 'mlm' to False in args if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training
# (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, args.mlm_probability)
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if tokenizer._pad_token is not None:
padding_mask = labels.eq(tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
if args.model_type == "electra":
# For ELECTRA, we replace all masked input tokens with tokenizer.mask_token
inputs[masked_indices] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
else:
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels | Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. |
161,631 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
an(line):
"""清洗无意义字符"""
if line == "":
return
line = convert(line)
c_content = []
for char in line:
if re.search("[\u4e00-\u9fa5]", char):
c_content.append(char)
elif re.search("[a-zA-Z0-9]", char):
c_content.append(char)
elif char in keep_p:
c_content.append(char)
elif char == ' ': # 很多用户喜欢用空格替代标点
c_content.append(',')
else:
c_content.append('')
nc_content = []
c = 0
for char in c_content:
if char in keep_p:
c += 1
else:
c = 0
if c < 2:
nc_content.append(char)
result = ''.join(nc_content)
result = result.strip()
result = result.lower() # 所有英文转成小写字母
return result
The provided code snippet includes necessary dependencies for implementing the `clean_review` function. Write a Python function `def clean_review(text)` to solve the following problem:
对原始评论进行清理,删去非法字符,统一标点,删去无用评论
Here is the function:
def clean_review(text):
"""
对原始评论进行清理,删去非法字符,统一标点,删去无用评论
"""
review_set = []
for line in text:
line = line.lstrip()
line = line.rstrip()
line = clean(line)
if len(line) < 7: # 过于短的评论需要删除
continue
if line and line not in ['该用户没有填写评论。', '用户晒单。']:
review_set.append(line)
return review_set | 对原始评论进行清理,删去非法字符,统一标点,删去无用评论 |
161,632 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E + ENG_MARK = NOUN_MARK + VERB_MARK
The provided code snippet includes necessary dependencies for implementing the `text2review` function. Write a Python function `def text2review(seg_pos_text)` to solve the following problem:
经过分词的文档,得到原始用户的每条评论
Here is the function:
def text2review(seg_pos_text):
"""
经过分词的文档,得到原始用户的每条评论
"""
review_list = [] # 保存全部的按照指定标点切分的句子
all_word = set() # 全部单词
for seg_pos in seg_pos_text:
cur_review = []
for term in seg_pos:
word, flag = term.split('/')
cur_review.append(word)
if flag in RESERVED_MARK:
all_word.add(word)
review_list.append(cur_review)
return review_list, all_word | 经过分词的文档,得到原始用户的每条评论 |
161,633 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
The provided code snippet includes necessary dependencies for implementing the `find_word_phrase` function. Write a Python function `def find_word_phrase(all_word, seg_list)` to solve the following problem:
根据点互信息以及信息熵发现词组,主要目的是提升分词效果
Here is the function:
def find_word_phrase(all_word, seg_list):
"""
根据点互信息以及信息熵发现词组,主要目的是提升分词效果
"""
res = []
word_count = {k: 0 for k in all_word} # 记录全部词出现的次数
all_word_count = 0
all_bi_gram_count = 0
for sentence in seg_list:
all_word_count += len(sentence)
all_bi_gram_count += len(sentence) - 1
for idx, word in enumerate(sentence):
if word in word_count:
word_count[word] += 1
bi_gram_count = {}
bi_gram_lcount = {}
bi_gram_rcount = {}
for sentence in seg_list:
for idx, _ in enumerate(sentence):
left_word = sentence[idx - 1] if idx != 0 else ''
right_word = sentence[idx + 2] if idx < len(sentence) - 2 else ''
first = sentence[idx]
second = sentence[idx + 1] if idx + 1 < len(sentence) else ''
if first in word_count and second in word_count:
if (first, second) in bi_gram_count:
bi_gram_count[(first, second)] += 1
else:
bi_gram_count[(first, second)] = 1
bi_gram_lcount[(first, second)] = {}
bi_gram_rcount[(first, second)] = {}
if left_word in bi_gram_lcount[(first, second)]:
bi_gram_lcount[(first, second)][left_word] += 1
elif left_word != '':
bi_gram_lcount[(first, second)][left_word] = 1
if right_word in bi_gram_rcount[(first, second)]:
bi_gram_rcount[(first, second)][right_word] += 1
elif right_word != '':
bi_gram_rcount[(first, second)][right_word] = 1
bi_gram_count = dict(filter(lambda x: x[1] >= 5, bi_gram_count.items()))
bi_gram_le = {} # 全部bi_gram的左熵
bi_gram_re = {} # 全部bi_gram的右熵
for phrase in bi_gram_count:
le = 0
for l_word in bi_gram_lcount[phrase]:
p_aw_w = bi_gram_lcount[phrase][l_word] / bi_gram_count[phrase] # P(aW | W)
le += p_aw_w * math.log2(p_aw_w)
le = -le
bi_gram_le[phrase] = le
for phrase in bi_gram_count:
re = 0
for r_word in bi_gram_rcount[phrase]:
p_wa_w = bi_gram_rcount[phrase][r_word] / bi_gram_count[phrase] # P(Wa | W)
re += p_wa_w * math.log2(p_wa_w)
re = -re
bi_gram_re[phrase] = re
PMI = {}
for phrase in bi_gram_count:
p_first = word_count[phrase[0]] / all_word_count
p_second = word_count[phrase[1]] / all_word_count
p_bi_gram = bi_gram_count[phrase] / all_bi_gram_count
PMI[phrase] = math.log2(p_bi_gram / (p_first * p_second))
phrase_score = []
for phrase in PMI:
le = bi_gram_le[phrase]
re = bi_gram_re[phrase]
score = PMI[phrase] + le + re
phrase_score.append((phrase, score))
phrase_score = sorted(phrase_score, key=lambda x: x[1], reverse=True)
for item in phrase_score:
res.append('{}:{}'.format(''.join(item[0]), item[1]))
return res | 根据点互信息以及信息熵发现词组,主要目的是提升分词效果 |
161,634 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
def load_list(path):
return [l for l in open(path, 'r', encoding='utf-8').read().split()] | null |
161,635 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
def get_seg_pos(line, type='word'):
"""
获取文档的分词以及词性标注结果,分词的方式可以为按词切分或者按字切分
"""
if type == 'word':
line_cut = jieba.posseg.cut(line.strip())
wordlist = []
for term in line_cut:
wordlist.append('%s/%s' % (term.word, term.flag))
res = wordlist
else:
res = list(line.strip())
return res
The provided code snippet includes necessary dependencies for implementing the `caculate_word_idf` function. Write a Python function `def caculate_word_idf(docs, stopwords)` to solve the following problem:
计算所有文档中的每个词的idf docs: list(list(str)), 数据集 stop_word: list, 停用词list return: 所有词的idf值
Here is the function:
def caculate_word_idf(docs, stopwords):
"""
计算所有文档中的每个词的idf
docs: list(list(str)), 数据集
stop_word: list, 停用词list
return: 所有词的idf值
"""
word_IDF = {} # word-IDF 记录每个word在不同的doc出现过的次数,然后计算IDF
num_doc = len(docs) # 商品数量
seg_pos_text = []
for doc in docs:
cur_doc_word_set = set() # 记录当前文档中出现的不同的词
for line in doc:
line = line.strip()
seg_pos_list = get_seg_pos(line, type='word')
seg_pos_text.append(seg_pos_list)
word_list = [term.split('/')[0] for term in seg_pos_list]
for w in word_list:
# 如果这个词在停用词表中就不添加
if w in stopwords:
continue
cur_doc_word_set.add(w)
for w in cur_doc_word_set:
if w in word_IDF:
word_IDF[w] += 1
else:
word_IDF[w] = 1
for w in word_IDF:
word_IDF[w] = math.log10(num_doc / word_IDF[w])
return word_IDF, seg_pos_text | 计算所有文档中的每个词的idf docs: list(list(str)), 数据集 stop_word: list, 停用词list return: 所有词的idf值 |
161,636 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
The provided code snippet includes necessary dependencies for implementing the `text2seg_pos` function. Write a Python function `def text2seg_pos(seg_pos_text, pattern='[。!?]')` to solve the following problem:
经过分词的文档,原始一条用户评论通过指定的标点符号分成多个句子
Here is the function:
def text2seg_pos(seg_pos_text, pattern='[。!?]'):
"""
经过分词的文档,原始一条用户评论通过指定的标点符号分成多个句子
"""
seg_list = [] # 保存全部按标点切分的seg
pos_list = [] # 保存全部按标点切分的pos
seg_review_list = [] # 用户完整的一条评论
for seg_pos in seg_pos_text:
seg_sub_list = []
pos_sub_list = []
cur_review = []
for term in seg_pos:
word, flag = term.split('/')
cur_review.append(word)
if word in pattern:
seg_sub_list.append(word)
pos_sub_list.append(flag)
seg_list.append(list(seg_sub_list))
pos_list.append(list(pos_sub_list))
seg_sub_list = []
pos_sub_list = []
else:
seg_sub_list.append(word)
pos_sub_list.append(flag)
seg_review_list.append(list(cur_review))
return seg_list, pos_list, seg_review_list | 经过分词的文档,原始一条用户评论通过指定的标点符号分成多个句子 |
161,637 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
WINDOW_SIZE = 5E
The provided code snippet includes necessary dependencies for implementing the `get_candidate_aspect` function. Write a Python function `def get_candidate_aspect(seg_list, pos_list, adj_word, stop_word, word_idf)` to solve the following problem:
输入的数据为用逗号隔开的短句, 利用开窗口的方式,根据情感词典抽名词得到候选的aspect
Here is the function:
def get_candidate_aspect(seg_list, pos_list, adj_word, stop_word, word_idf):
"""
输入的数据为用逗号隔开的短句,
利用开窗口的方式,根据情感词典抽名词得到候选的aspect
"""
aspect_dict = {}
for i, sentence in enumerate(seg_list):
for j, word in enumerate(sentence):
if word in adj_word and pos_list[i][j] in ADJECTIVE_MARK: # 当前的词属于情感词且词性为形容词
startpoint = j - WINDOW_SIZE
startpoint = startpoint if startpoint >= 0 else 0
for k in range(startpoint, j):
if pos_list[i][k] in ASPECT_MARK:
if seg_list[i][k] in aspect_dict:
aspect_dict[seg_list[i][k]] += 1
else:
aspect_dict[seg_list[i][k]] = 1
candidates = aspect_dict.items()
candidates = list(filter(lambda x: len(x[0]) > 1, candidates)) # 经过词组发现之后,删去一个字的词
candidates = [item[0] for item in candidates if item[0] not in stop_word] # 删去停用词
candidates = [item if (item in word_idf and word_idf[item] != 0) else item for item in candidates] # 删去IDF值为0的词
logger.debug(f"Extract {len(candidates)} aspect candidates, top10: {candidates[:10]}")
return candidates | 输入的数据为用逗号隔开的短句, 利用开窗口的方式,根据情感词典抽名词得到候选的aspect |
161,638 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
WINDOW_SIZE = 5E
The provided code snippet includes necessary dependencies for implementing the `get_aspect_express` function. Write a Python function `def get_aspect_express(seg_review_list, pair_useful)` to solve the following problem:
抽取原始评论中的aspect作为输入,完整的评论作为输出
Here is the function:
def get_aspect_express(seg_review_list, pair_useful):
"""
抽取原始评论中的aspect作为输入,完整的评论作为输出
"""
def check_sentence(sentence):
"""
判断短句是否合法
"""
_s = ''.join(sentence)
legal = True
if len(_s) > 30:
legal = False
return legal
raw_aspect_express = {k: [] for k in pair_useful} # 用户关于某个观点的一段原始表达
raw_aspect_express_count = {k: 0 for k in pair_useful} # 记录某个观点表达出现的次数
for review in seg_review_list: # 每个sentence就是一句完整的review
if review[-1] not in PUNCTUATION:
review.append('。')
# 对于单个review进行切分
cur_review = []
pre_end = 0
for i, _ in enumerate(review):
if review[i] in ['。', '!', '?', ',', '~']:
cur_review.append(review[pre_end:i + 1])
pre_end = i + 1
elif i == len(review) - 1:
cur_review.append(review[pre_end:])
for sentence in cur_review: # sentence 是两个标点之间的短句
if sentence[-1] not in PUNCTUATION:
sentence.append('。')
find_opinion_flag = False
for idx, word in enumerate(sentence):
if find_opinion_flag: # 如果在当前的短句中已经找到了一组观点表达就结束对这个短句的搜索
break
if word in pair_useful: # 当前的word属于aspect
# 向前开窗口
startpoint = idx - WINDOW_SIZE if idx - WINDOW_SIZE > 0 else 0
for i in range(startpoint, idx): # 寻找opinion word
cur_word = sentence[i]
if cur_word in pair_useful[word] and sentence[i + 1] == "的": # eg. 超赞的一款面膜
if check_sentence(sentence):
raw_aspect_express[word].append(sentence)
raw_aspect_express_count[word] += 1
find_opinion_flag = True # 只要找到一个opinion word就算命中一个短句了
# 向后开窗口
endpoint = idx + WINDOW_SIZE if idx + WINDOW_SIZE < len(sentence) else len(sentence)
for i in range(idx + 1, endpoint):
cur_word = sentence[i]
if cur_word in pair_useful[word]:
if check_sentence(sentence):
raw_aspect_express[word].append(sentence)
raw_aspect_express_count[word] += 1
find_opinion_flag = True # 只要找到一个opinion word就算命中一个短句了
# 筛选得到保留的aspect
aspect_express = {}
for aspect in raw_aspect_express:
if raw_aspect_express_count[aspect] < 5:
continue
aspect_express[aspect] = raw_aspect_express[aspect]
return aspect_express | 抽取原始评论中的aspect作为输入,完整的评论作为输出 |
161,639 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
The provided code snippet includes necessary dependencies for implementing the `merge_aspect_express` function. Write a Python function `def merge_aspect_express(aspect_express, pair_useful)` to solve the following problem:
对相似的观点表达进行合并, 同时输出最终的aspect_opinion_pair
Here is the function:
def merge_aspect_express(aspect_express, pair_useful):
"""
对相似的观点表达进行合并, 同时输出最终的aspect_opinion_pair
"""
aspects = list(aspect_express.keys())
aspects.sort() # 排成字典序
merged_aspects = [[aspects[0]]] if aspects else [[]]
merged_express = {}
opinion_set = []
def check_is_same(word1, word2):
"""
判断两个词当中是否存在相同的字
"""
for i in word1:
if i in word2:
return True
return False
for i in range(1, len(aspects)):
if check_is_same(merged_aspects[-1][-1], aspects[i]):
merged_aspects[-1].append(aspects[i])
else:
merged_aspects.append([aspects[i]])
for a_list in merged_aspects:
# 收集全部的形容词
for i in a_list:
opinion_set += pair_useful[i]
_l = ','.join(a_list)
merged_express[_l] = []
for i in a_list:
merged_express[_l] += aspect_express[i]
opinion_set = set(opinion_set)
return merged_express, opinion_set | 对相似的观点表达进行合并, 同时输出最终的aspect_opinion_pair |
161,640 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
The provided code snippet includes necessary dependencies for implementing the `build_dataset_express` function. Write a Python function `def build_dataset_express(seg_review_list, pair_useful)` to solve the following problem:
抽取原始评论中的aspect作为输入,完整的评论作为输出
Here is the function:
def build_dataset_express(seg_review_list, pair_useful):
"""
抽取原始评论中的aspect作为输入,完整的评论作为输出
"""
train_data = [] # 记录训练数据
for review in seg_review_list: # 每个sentence就是一句完整的review
source = [] # 训练的src
if review[-1] not in PUNCTUATION:
review.append('。')
target = review # 训练的tgt
# 对于单个review进行切分
cur_review = []
pre_end = 0
for i, _ in enumerate(review):
if review[i] in ['。', '!', '?', ',', '~']:
cur_review.append(review[pre_end:i + 1])
pre_end = i + 1
elif i == len(review) - 1:
cur_review.append(review[pre_end:])
for sentence in cur_review: # sentence 是两个标点之间的短
if sentence[-1] not in PUNCTUATION:
sentence.append('。')
find_opinion_flag = False
for idx, word in enumerate(sentence):
if find_opinion_flag: # 如果在当前的短句中已经找到了一组观点表达就结束对这个短句的搜索
break
if word in pair_useful: # 当前的word属于aspect
source.append(word)
find_opinion_flag = True # 只要找到一个opinion word就算命中一个短句了
train_data.append((list(source), target))
max_source_length = 0
# 筛选训练数据
def check_review(item):
"""
判断当前review是否合法
"""
source = item[0]
tgt = item[1]
legal = True
_s = ''.join(tgt)
if len(source) == 0 or len(source) > 5: # 不含有观点表达或者观点词太多
legal = False
unique_source = set(source)
if len(unique_source) != len(source):
legal = False
if len(_s) > 60:
legal = False
return legal
legal_train_data = []
for item in train_data:
if check_review(item):
max_source_length = max(max_source_length, len(item[0]))
legal_train_data.append(item)
logger.debug(f'max source length: {max_source_length}')
return legal_train_data | 抽取原始评论中的aspect作为输入,完整的评论作为输出 |
161,641 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
E
The provided code snippet includes necessary dependencies for implementing the `generate_reviews` function. Write a Python function `def generate_reviews(aspect_express, num_steps=1000)` to solve the following problem:
根据候选集合生成假评论
Here is the function:
def generate_reviews(aspect_express, num_steps=1000):
"""
根据候选集合生成假评论
"""
res = []
all_aspect = list(aspect_express.keys())
logger.debug(f'Aspect: {all_aspect}')
# 根据不同aspect出现的概率分配不同权重
aspect_length_dict = {}
for a in aspect_express:
aspect_length_dict[a] = len(aspect_express[a])
weight_aspect_list = []
for aspect in aspect_length_dict:
weight_aspect_list += [aspect] * aspect_length_dict[aspect]
if not weight_aspect_list:
return res
for _ in range(num_steps):
num_aspect = random.choice([1, 2, 3, 4, 5, 6])
review = []
used_aspect = []
for _ in range(num_aspect):
a = random.choice(weight_aspect_list)
if a in used_aspect and len(all_aspect) > 1:
a = random.choice(weight_aspect_list)
used_aspect.append(a)
a_s = random.choice(aspect_express[a])
a_s = a_s[:-1] + ['#'] # 丢掉标点,换位#作为切分点
review += a_s
res.append(review)
return res | 根据候选集合生成假评论 |
161,642 | import random
from loguru import logger
import os
import jieba
import jieba.posseg
import math
import re
EMOJI = ['😀', '😁', '😂', '😃', '😄', '😆', '😉', '😊',
'😋', '😎', '😍', '😘', '😗', '😙', '😚', '😇',
'😏', '😝']
YANWENZI = ['ヽ(✿゚▽゚)ノ', 'φ(≧ω≦*)♪', '╰(*°▽°*)╯', 'o( ̄▽ ̄)d', 'o( =•ω•= )m']
ILLEGAL_WORD = ['考拉', '网易', '淘宝', '京东', '拼多多', '不过', '因为', '而且', '但是', '但', '所以', '因此', '如果'] # 过滤词
RESERVED_MARK = NOUN_MARK + VERB_MARK + ADJE
The provided code snippet includes necessary dependencies for implementing the `fake_review_filter` function. Write a Python function `def fake_review_filter(reviews, opinion_set, is_uniq=True)` to solve the following problem:
筛去评论中不像人写的句子:如果同一个形容词重复出现两次就判定为假评论,同时筛去长度超过60的评论
Here is the function:
def fake_review_filter(reviews, opinion_set, is_uniq=True):
"""
筛去评论中不像人写的句子:如果同一个形容词重复出现两次就判定为假评论,同时筛去长度超过60的评论
"""
results = []
for review in reviews:
opinion_used = {k: 0 for k in opinion_set}
flag = True
for word in review:
if word in ILLEGAL_WORD:
flag = False
if word in opinion_used:
opinion_used[word] += 1
if opinion_used[word] >= 2:
flag = False
break
if flag:
_s = ''.join(review)
_s = _s.split('#') # 最后一个是空字符
review = ''
pu = [','] * 100 + ['~'] * 20 + ['!'] * 20 + EMOJI + YANWENZI
random.shuffle(pu)
for a_s in _s:
if a_s:
review += a_s + random.choice(pu)
if not review:
logger.warning(f'error: {review}')
review = review[:-1] + '。'
if is_uniq:
if review not in results:
results.append(review)
else:
results.append(review)
return results | 筛去评论中不像人写的句子:如果同一个形容词重复出现两次就判定为假评论,同时筛去长度超过60的评论 |
161,643 | import os
import pickle
from dataclasses import dataclass
from typing import List, Optional, Dict, Sequence
import datasets
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from transformers.trainer_pt_utils import LabelSmoother
class Conversation:
"""A class that manages prompt templates and keeps all conversation history."""
# The name of this template
name: str
# The system prompt
system_prompt: str
# All messages. format: list of [question, answer]
messages: Optional[List[Sequence[str]]]
# The roles of the speakers
roles: Optional[Sequence[str]]
# Conversation prompt
prompt: str
# Separator
sep: str
# Stop token, default is tokenizer.eos_token
stop_str: Optional[str] = "</s>"
def get_prompt(
self,
messages: Optional[List[Sequence[str]]] = None,
system_prompt: Optional[str] = ""
) -> str:
"""
Returns a string containing prompt without response.
"""
return "".join(self._format_example(messages, system_prompt))
def get_dialog(
self,
messages: Optional[List[Sequence[str]]] = None,
system_prompt: Optional[str] = ""
) -> List[str]:
"""
Returns a list containing 2 * n elements where the 2k-th is a query and the (2k+1)-th is a response.
"""
return self._format_example(messages, system_prompt)
def _format_example(
self,
messages: Optional[List[Sequence[str]]] = None,
system_prompt: Optional[str] = ""
) -> List[str]:
system_prompt = system_prompt or self.system_prompt
system_prompt = system_prompt + self.sep if system_prompt else "" # add separator for non-empty system prompt
messages = messages or self.messages
convs = []
for turn_idx, [user_query, bot_resp] in enumerate(messages):
if turn_idx == 0:
convs.append(system_prompt + self.prompt.format(query=user_query))
convs.append(bot_resp)
else:
convs.append(self.sep + self.prompt.format(query=user_query))
convs.append(bot_resp)
return convs
def append_message(self, query: str, answer: str):
"""Append a new message."""
self.messages.append([query, answer])
conv_templates: Dict[str, Conversation] = {}
The provided code snippet includes necessary dependencies for implementing the `register_conv_template` function. Write a Python function `def register_conv_template(template: Conversation)` to solve the following problem:
Register a new conversation template.
Here is the function:
def register_conv_template(template: Conversation):
"""Register a new conversation template."""
conv_templates[template.name] = template | Register a new conversation template. |
161,644 | import os
import pickle
from dataclasses import dataclass
from typing import List, Optional, Dict, Sequence
import datasets
from datasets import Dataset as HFDataset
from datasets import load_dataset
from loguru import logger
from torch.utils.data import Dataset
from transformers.trainer_pt_utils import LabelSmoother
def preprocess_function(examples, tokenizer, args):
"""
Preprocessing the datasets.
part of code modified from https://github.com/lm-sys/FastChat
"""
input_ids_list = []
targets_list = []
roles = ["human", "gpt"]
prompt_template = get_conv_template(args.prompt_template_name)
max_source_length = args.max_seq_length
max_target_length = args.max_length
max_full_length = max_source_length + max_target_length
def get_dialog(examples):
for i, source in enumerate(examples['conversations']):
if len(source) < 2:
continue
data_role = source[0].get("from", "")
if data_role not in roles or data_role != roles[0]:
# Skip the first one if it is not from human
break
messages = []
for j, sentence in enumerate(source):
data_role = sentence.get("from", "")
if data_role not in roles:
logger.warning(f"unknown role: {data_role}, {i}. (ignored)")
break
if data_role == roles[j % 2]:
messages.append(sentence["value"])
if len(messages) % 2 != 0:
continue
# Convert the list to pairs of elements
history_messages = [[messages[k], messages[k + 1]] for k in range(0, len(messages), 2)]
yield prompt_template.get_dialog(history_messages)
for dialog in get_dialog(examples):
input_ids, labels = [], []
for i in range(len(dialog) // 2):
source_ids = tokenizer.encode(text=dialog[2 * i], add_special_tokens=(i == 0))
target_ids = tokenizer.encode(text=dialog[2 * i + 1], add_special_tokens=False)
if len(source_ids) > max_source_length:
source_ids = source_ids[:max_source_length]
if len(target_ids) > max_target_length - 1: # eos token
target_ids = target_ids[:max_target_length - 1]
if len(source_ids) > 0 and source_ids[0] == tokenizer.eos_token_id:
source_ids = source_ids[1:]
if len(target_ids) > 0 and target_ids[-1] == tokenizer.eos_token_id:
target_ids = target_ids[:-1]
if len(input_ids) + len(source_ids) + len(target_ids) + 1 > max_full_length:
break
input_ids += source_ids + target_ids + [tokenizer.eos_token_id] # add eos token for each turn
labels += [IGNORE_INDEX] * len(source_ids) + target_ids + [tokenizer.eos_token_id]
input_ids_list.append(input_ids)
targets_list.append(labels)
return dict(
input_ids=input_ids_list,
labels=targets_list,
)
def filter_empty_labels(example):
"""Remove empty labels dataset."""
return not all(label == IGNORE_INDEX for label in example["labels"])
def load_supervised_dataset(tokenizer, args, data, mode):
if isinstance(data, str):
if data.endswith('.json') or data.endswith('.jsonl'):
dataset = load_dataset("json", data_files=data)
elif os.path.isdir(data):
dataset = datasets.load_from_disk(data)
else:
dataset = load_dataset(
data,
download_mode="force_redownload"
if args.reprocess_input_data
else "reuse_dataset_if_exists",
)
# This is not necessarily a train dataset. The datasets library insists on calling it train.
dataset = dataset['train']
if mode == 'dev' and args.max_eval_samples is not None:
max_eval_samples = min(len(dataset), args.max_eval_samples)
dataset = dataset.select(range(max_eval_samples))
else:
dataset = HFDataset.from_pandas(data)
dataset = dataset.shuffle().map(
lambda x: preprocess_function(x, tokenizer=tokenizer, args=args),
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=dataset.column_names,
desc="Running tokenizer on dataset",
)
dataset = dataset.filter(filter_empty_labels, num_proc=args.preprocessing_num_workers)
return dataset | null |
161,645 | import json
import os
from collections import OrderedDict
from typing import Any, Dict
import fire
import torch
from transformers.modeling_utils import shard_checkpoint, WEIGHTS_NAME, WEIGHTS_INDEX_NAME
def save_weight(
input_dir: str,
output_dir: str,
shard_size: str
):
baichuan2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
for filepath in os.listdir(input_dir):
if os.path.isfile(os.path.join(input_dir, filepath)) and filepath.endswith(".bin"):
shard_weight = torch.load(os.path.join(input_dir, filepath), map_location="cpu")
baichuan2_state_dict.update(shard_weight)
llama2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
for key, value in baichuan2_state_dict.items():
if "W_pack" in key:
proj_size = value.size(0) // 3
llama2_state_dict[key.replace("W_pack", "q_proj")] = value[:proj_size, :]
llama2_state_dict[key.replace("W_pack", "k_proj")] = value[proj_size:2 * proj_size, :]
llama2_state_dict[key.replace("W_pack", "v_proj")] = value[2 * proj_size:, :]
elif "lm_head" in key:
llama2_state_dict[key] = torch.nn.functional.normalize(value)
else:
llama2_state_dict[key] = value
shards, index = shard_checkpoint(llama2_state_dict, max_shard_size=shard_size, weights_name=WEIGHTS_NAME)
for shard_file, shard in shards.items():
torch.save(shard, os.path.join(output_dir, shard_file))
if index is None:
print("Model weights saved in {}".format(os.path.join(output_dir, WEIGHTS_NAME)))
else:
with open(os.path.join(output_dir, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f:
json.dump(index, f, indent=2, sort_keys=True)
print("Model weights saved in {}".format(output_dir))
def save_config(
input_dir: str,
output_dir: str
):
with open(os.path.join(input_dir, CONFIG_NAME), "r", encoding="utf-8") as f:
llama2_config_dict: Dict[str, Any] = json.load(f)
llama2_config_dict["architectures"] = ["LlamaForCausalLM"]
llama2_config_dict.pop("auto_map", None)
llama2_config_dict.pop("tokenizer_class", None)
llama2_config_dict["model_type"] = "llama"
with open(os.path.join(output_dir, CONFIG_NAME), "w", encoding="utf-8") as f:
json.dump(llama2_config_dict, f, indent=2)
print("Model config saved in {}".format(os.path.join(output_dir, CONFIG_NAME)))
def llamafy_baichuan2(
input_dir: str,
output_dir: str,
shard_size: str
):
try:
os.makedirs(output_dir, exist_ok=False)
except Exception as e:
raise print("Output dir already exists", e)
save_weight(input_dir, output_dir, shard_size)
save_config(input_dir, output_dir) | null |
161,646 | import json
import os
from collections import OrderedDict
from typing import Any, Dict
import fire
import torch
from safetensors import safe_open
from transformers.modeling_utils import shard_checkpoint, WEIGHTS_NAME, WEIGHTS_INDEX_NAME
from transformers.utils import check_min_version
def save_weight(
input_dir: str,
output_dir: str,
shard_size: str
) -> str:
qwen_state_dict: Dict[str, torch.Tensor] = OrderedDict()
for filepath in os.listdir(input_dir):
if os.path.isfile(os.path.join(input_dir, filepath)) and filepath.endswith(".safetensors"):
with safe_open(os.path.join(input_dir, filepath), framework="pt", device="cpu") as f:
for key in f.keys():
qwen_state_dict[key] = f.get_tensor(key)
llama2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
torch_dtype = None
for key, value in qwen_state_dict.items():
if torch_dtype is None:
torch_dtype = value.dtype
if "wte" in key:
llama2_state_dict["model.embed_tokens.weight"] = value
elif "ln_f" in key:
llama2_state_dict["model.norm.weight"] = value
else:
key = key.replace("transformer.h", "model.layers")
if "attn.c_attn" in key:
proj_size = value.size(0) // 3
llama2_state_dict[key.replace("attn.c_attn", "self_attn.q_proj")] = value[:proj_size, ...]
llama2_state_dict[key.replace("attn.c_attn", "self_attn.k_proj")] = value[proj_size:2 * proj_size, ...]
llama2_state_dict[key.replace("attn.c_attn", "self_attn.v_proj")] = value[2 * proj_size:, ...]
elif "attn.c_proj" in key:
llama2_state_dict[key.replace("attn.c_proj", "self_attn.o_proj")] = value
llama2_state_dict[key.replace("attn.c_proj.weight", "self_attn.o_proj.bias")] = (
torch.zeros_like(value[:, 0]).squeeze()
)
elif "ln_1" in key:
llama2_state_dict[key.replace("ln_1", "input_layernorm")] = value
elif "ln_2" in key:
llama2_state_dict[key.replace("ln_2", "post_attention_layernorm")] = value
elif "mlp.w1" in key:
llama2_state_dict[key.replace("mlp.w1", "mlp.up_proj")] = value
elif "mlp.w2" in key:
llama2_state_dict[key.replace("mlp.w2", "mlp.gate_proj")] = value
elif "mlp.c_proj" in key:
llama2_state_dict[key.replace("mlp.c_proj", "mlp.down_proj")] = value
elif "lm_head" in key:
llama2_state_dict[key] = value
else:
raise KeyError("Unable to process key {}".format(key))
shards, index = shard_checkpoint(llama2_state_dict, max_shard_size=shard_size, weights_name=WEIGHTS_NAME)
for shard_file, shard in shards.items():
torch.save(shard, os.path.join(output_dir, shard_file))
if index is None:
print("Model weights saved in {}".format(os.path.join(output_dir, WEIGHTS_NAME)))
else:
with open(os.path.join(output_dir, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f:
json.dump(index, f, indent=2, sort_keys=True)
print("Model weights saved in {}".format(output_dir))
return str(torch_dtype).replace("torch.", "")
def save_config(
input_dir: str,
output_dir: str,
torch_dtype: str
):
with open(os.path.join(input_dir, CONFIG_NAME), "r", encoding="utf-8") as f:
qwen_config_dict: Dict[str, Any] = json.load(f)
llama2_config_dict: Dict[str, Any] = OrderedDict()
llama2_config_dict["architectures"] = ["LlamaForCausalLM"]
llama2_config_dict["hidden_act"] = "silu"
llama2_config_dict["hidden_size"] = qwen_config_dict["hidden_size"]
llama2_config_dict["initializer_range"] = qwen_config_dict["initializer_range"]
llama2_config_dict["intermediate_size"] = qwen_config_dict["intermediate_size"] // 2
llama2_config_dict["max_position_embeddings"] = qwen_config_dict["max_position_embeddings"]
llama2_config_dict["model_type"] = "llama"
llama2_config_dict["num_attention_heads"] = qwen_config_dict["num_attention_heads"]
llama2_config_dict["num_hidden_layers"] = qwen_config_dict["num_hidden_layers"]
llama2_config_dict["num_key_value_heads"] = qwen_config_dict["hidden_size"] // qwen_config_dict["kv_channels"]
llama2_config_dict["pretraining_tp"] = 1
llama2_config_dict["rms_norm_eps"] = qwen_config_dict["layer_norm_epsilon"]
llama2_config_dict["rope_scaling"] = None
llama2_config_dict["tie_word_embeddings"] = qwen_config_dict["tie_word_embeddings"]
llama2_config_dict["torch_dtype"] = torch_dtype
llama2_config_dict["transformers_version"] = "4.34.0"
llama2_config_dict["use_cache"] = True
llama2_config_dict["vocab_size"] = qwen_config_dict["vocab_size"]
llama2_config_dict["attention_bias"] = True
with open(os.path.join(output_dir, CONFIG_NAME), "w", encoding="utf-8") as f:
json.dump(llama2_config_dict, f, indent=2)
print("Model config saved in {}".format(os.path.join(output_dir, CONFIG_NAME)))
def llamafy_qwen(
input_dir: str,
output_dir: str,
shard_size: str
):
try:
os.makedirs(output_dir, exist_ok=False)
except Exception as e:
raise print("Output dir already exists", e)
torch_dtype = save_weight(input_dir, output_dir, shard_size)
save_config(input_dir, output_dir, torch_dtype) | null |
161,647 | import argparse
import json
import os
import time
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
REQ_TIME_GAP = 3
def completion_with_backoff(**kwargs):
# 重试间隔时间1到3秒,重试次数3
return openai_reply(**kwargs)
def get_chatgpt_response(content, model_name):
try:
logger.debug(f"input openai_reply, content:{content}")
response = str(completion_with_backoff(content=content, model_name=model_name))
logger.debug(f"Successfully get chatgpt response, content:{content}, res:{response}")
except Exception as e:
logger.error(e)
response = ''
time.sleep(REQ_TIME_GAP)
return response | null |
161,648 | import argparse
import json
import os
import time
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
def save_jsonl(data_list, json_path):
dir = os.path.dirname(os.path.abspath(json_path))
if not os.path.exists(dir):
print(dir)
os.makedirs(dir)
with open(json_path, 'w', encoding='utf-8') as f:
for entry in data_list:
json.dump(entry, f, ensure_ascii=False)
f.write('\n')
print(f'save to {json_path}, size: {len(data_list)}') | null |
161,649 | import argparse
import json
import os
import time
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
def load_jsonl(json_path):
json_list = []
with open(json_path, 'r', encoding='utf-8') as f:
for json_str in f:
json_list.append(json.loads(json_str))
return json_list | null |
161,650 | import argparse
import json
import os
import time
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
def read_data(file_path):
return [line for line in open(file_path, 'r', encoding='utf-8').readlines() if line] | null |
161,651 | import argparse
import json
import os
import time
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
REQ_TIME_GAP = 3
def completion_with_backoff(**kwargs):
# 重试间隔时间1到3秒,重试次数3
return openai_reply(**kwargs)
def get_chatgpt_response(content, model_name, max_tokens, temperature):
try:
response = str(completion_with_backoff(content=content, model_name=model_name, max_tokens=max_tokens,
temperature=temperature))
logger.debug(f"Successfully get chatgpt response, content:{content}, res:{response}")
except Exception as e:
logger.error(e)
response = ''
time.sleep(REQ_TIME_GAP)
return response | null |
161,655 | import argparse
import json
import os
import time
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
def generate_prompt(data_list, prefix):
prompts = []
for data in data_list:
prompt = prefix + f"{data['instruction']}\n{data['input']}\n模型回答:{data['output']}\n请针对模型回答给出得分,顺便给出理由:"
prompts.append(prompt)
return prompts | null |
161,656 | import argparse
from datasets import load_dataset
def process_alpaca(examples):
convs = []
for instruction, inp, output in zip(examples['instruction'], examples['input'], examples['output']):
if len(inp.strip()) > 1:
instruction = instruction + '\n\n' + inp
q = instruction
a = output
convs.append([
{"from": "human", "value": q},
{"from": "gpt", "value": a}
])
return {"conversations": convs} | null |
161,657 | import argparse
import json
import os
import re
import time
from multiprocessing.dummy import Pool
from random import choices
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
def save_jsonl(data_list, json_path):
dir = os.path.dirname(os.path.abspath(json_path))
if not os.path.exists(dir):
print(dir)
os.makedirs(dir)
with open(json_path, 'w', encoding='utf-8') as f:
for entry in data_list:
json.dump(entry, f, ensure_ascii=False)
f.write('\n')
print(f'save to {json_path}, size: {len(data_list)}') | null |
161,658 | import argparse
import json
import os
import re
import time
from multiprocessing.dummy import Pool
from random import choices
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
def load_jsonl(json_path):
json_list = []
with open(json_path, 'r', encoding='utf-8') as f:
for json_str in f:
json_list.append(json.loads(json_str))
return json_list | null |
161,659 | import argparse
import json
import os
import re
import time
from multiprocessing.dummy import Pool
from random import choices
import openai
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from tqdm import tqdm
REQ_TIME_GAP = 3
def parse_response(responses):
data_list = []
response_list = [i for i in re.split("###", responses) if i]
for response in response_list:
intruction_pattern = re.compile(
r"(?<=(?:" + '|'.join(['指令:', '指令:']) + "))[\s\S]*?(?=" + '|'.join(['输入:', '输入:']) + ")")
input_pattern = re.compile(
r"(?<=(?:" + '|'.join(['输入:', '输入:']) + "))[\s\S]*?(?=" + '|'.join(['输出:', '输出:']) + ")")
output_pattern = re.compile(r"(?<=(?:" + '|'.join(['输出:', '输出:']) + "))[\s\S]*?(?=$)")
intruction_match = intruction_pattern.search(response)
input_match = input_pattern.search(response)
output_match = output_pattern.search(response)
if intruction_match and input_match and output_match:
inst = re.sub(r'\d+\.$', '', intruction_match.group().strip()).strip('\n').rstrip()
input = re.sub(r'\d+\.$', '', input_match.group().strip()).strip('\n').rstrip()
input = "" if "无输入" in input else input
output = output_match.group().strip().strip('\n')
if '指令:' in output and '输入:' in output and '输出:' in output: # 返回若没有以###号区分,取第一条数据
output_pattern_new = re.compile(r"(?<=(?:" + "))[\s\S]*?(?=" + '|'.join(['指令:', '指令:']) + ")")
output_match_new = output_pattern_new.search(output)
if output_match_new:
output = re.sub(r'\d+\.$', '', output_match_new.group().strip()).strip('\n').rstrip()
out = {"instruction": inst, "input": input, "output": output}
data_list.append(out)
return data_list
def completion_with_backoff(**kwargs):
# 重试间隔时间1到3秒,重试次数3
return openai_reply(**kwargs)
The provided code snippet includes necessary dependencies for implementing the `get_chatgpt_response` function. Write a Python function `def get_chatgpt_response(i)` to solve the following problem:
Get response from chatgpt model
Here is the function:
def get_chatgpt_response(i):
"""Get response from chatgpt model"""
task_subset = choices(seed_task_list, k=3)
prompt_ = prompt
for idx, task_dict in enumerate(task_subset):
(instruction, input, output) = task_dict["instruction"], task_dict["input"], task_dict["output"]
input = "<无输入>" if input.lower() == "" else input
prompt_ = prompt_ + f"###\n"
prompt_ += f"{idx + 1}. 指令: {instruction}\n"
prompt_ += f"{idx + 1}. 输入:\n{input}\n"
prompt_ += f"{idx + 1}. 输出:\n{output}\n"
prompt_ += f"###\n"
messages = [{"role": "assistant", "content": prompt_}]
logger.debug(f'prompt: {messages}')
try:
response = completion_with_backoff(messages=messages, model_name=model_name)
logger.debug(f"Successfully get chatgpt response, content:{prompt_}, res:{response}")
except Exception as e:
logger.error(e)
response = ''
instruction_list = parse_response(response)
time.sleep(REQ_TIME_GAP)
pbar.update(1)
if instruction_list:
data_result.extend(instruction_list) | Get response from chatgpt model |
161,660 | import argparse
import os
import re
import pandas as pd
import torch
from thefuzz import process
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16
).eval()
try:
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
model.generation_config.do_sample = False # use greedy decoding
except:
print("GenerationConfig not found, use default config.")
return model, tokenizer | null |
161,661 | import argparse
import os
import re
import pandas as pd
import torch
from thefuzz import process
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def count_substr(gen, pattern):
return len(re.findall(pattern, gen)) | null |
161,662 | import argparse
import os
import re
import pandas as pd
import torch
from thefuzz import process
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def format_example(line):
def extract_answer(response, row):
def model_predict(model, tokenizer, question, max_new_tokens=512, **kwargs):
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
save_result_dir=None,
overwrite=False,
**kwargs
):
result_path = os.path.join(save_result_dir, f'{subject_name}_result.csv')
if not overwrite and os.path.exists(result_path):
print(f"{result_path} existed, skip!")
score = []
for (_, datarow), (_, resultrow) in zip(test_df.iterrows(), pd.read_csv(result_path).iterrows()):
pred = extract_answer(resultrow['model_response'], datarow)
correct = 1 if pred == datarow['answer'] else 0
score.append(correct)
correct_ratio = 100 * sum(score) / len(score)
return correct_ratio
responses = []
result = []
score = []
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
question = format_example(row)
response = model_predict(model, tokenizer, question, **kwargs)
print('\nquestion: ', question)
print('response: ', response)
pred = extract_answer(response, row)
print('extract_answer: ', pred)
print("=" * 20)
if 'answer' in row:
correct = 1 if pred == row['answer'] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {row["answer"]}')
print("=" * 30)
responses.append(response)
result.append(pred)
if score:
correct_ratio = 100 * sum(score) / len(score)
if args.debug: print(subject_name, correct_ratio)
else:
correct_ratio = 0
if save_result_dir:
test_df['model_response'] = responses
test_df['model_output'] = result
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(result_path, encoding="utf-8", index=False)
return correct_ratio | null |
161,663 | import argparse
import os
import re
import pandas as pd
import torch
from thefuzz import process
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
TASK_NAME_MAPPING = {
"computer_network": ["Computer Network", "\u8ba1\u7b97\u673a\u7f51\u7edc", "STEM"],
"operating_system": ["Operating System", "\u64cd\u4f5c\u7cfb\u7edf", "STEM"],
"computer_architecture": ["Computer Architecture", "\u8ba1\u7b97\u673a\u7ec4\u6210", "STEM"],
"college_programming": ["College Programming", "\u5927\u5b66\u7f16\u7a0b", "STEM"],
"college_physics": ["College Physics", "\u5927\u5b66\u7269\u7406", "STEM"],
"college_chemistry": ["College Chemistry", "\u5927\u5b66\u5316\u5b66", "STEM"],
"advanced_mathematics": ["Advanced Mathematics", "\u9ad8\u7b49\u6570\u5b66", "STEM"],
"probability_and_statistics": ["Probability and Statistics", "\u6982\u7387\u7edf\u8ba1", "STEM"],
"discrete_mathematics": ["Discrete Mathematics", "\u79bb\u6563\u6570\u5b66", "STEM"],
"electrical_engineer": ["Electrical Engineer", "\u6ce8\u518c\u7535\u6c14\u5de5\u7a0b\u5e08", "STEM"],
"metrology_engineer": ["Metrology Engineer", "\u6ce8\u518c\u8ba1\u91cf\u5e08", "STEM"],
"high_school_mathematics": ["High School Mathematics", "\u9ad8\u4e2d\u6570\u5b66", "STEM"],
"high_school_physics": ["High School Physics", "\u9ad8\u4e2d\u7269\u7406", "STEM"],
"high_school_chemistry": ["High School Chemistry", "\u9ad8\u4e2d\u5316\u5b66", "STEM"],
"high_school_biology": ["High School Biology", "\u9ad8\u4e2d\u751f\u7269", "STEM"],
"middle_school_mathematics": ["Middle School Mathematics", "\u521d\u4e2d\u6570\u5b66", "STEM"],
"middle_school_biology": ["Middle School Biology", "\u521d\u4e2d\u751f\u7269", "STEM"],
"middle_school_physics": ["Middle School Physics", "\u521d\u4e2d\u7269\u7406", "STEM"],
"middle_school_chemistry": ["Middle School Chemistry", "\u521d\u4e2d\u5316\u5b66", "STEM"],
"veterinary_medicine": ["Veterinary Medicine", "\u517d\u533b\u5b66", "STEM"],
"college_economics": ["College Economics", "\u5927\u5b66\u7ecf\u6d4e\u5b66", "Social Science"],
"business_administration": ["Business Administration", "\u5de5\u5546\u7ba1\u7406", "Social Science"],
"marxism": ["Marxism", "\u9a6c\u514b\u601d\u4e3b\u4e49\u57fa\u672c\u539f\u7406", "Social Science"],
"mao_zedong_thought": ["Mao Zedong Thought",
"\u6bdb\u6cfd\u4e1c\u601d\u60f3\u548c\u4e2d\u56fd\u7279\u8272\u793e\u4f1a\u4e3b\u4e49\u7406\u8bba\u4f53\u7cfb\u6982\u8bba",
"Social Science"],
"education_science": ["Education Science", "\u6559\u80b2\u5b66", "Social Science"],
"teacher_qualification": ["Teacher Qualification", "\u6559\u5e08\u8d44\u683c", "Social Science"],
"high_school_politics": ["High School Politics", "\u9ad8\u4e2d\u653f\u6cbb", "Social Science"],
"high_school_geography": ["High School Geography", "\u9ad8\u4e2d\u5730\u7406", "Social Science"],
"middle_school_politics": ["Middle School Politics", "\u521d\u4e2d\u653f\u6cbb", "Social Science"],
"middle_school_geography": ["Middle School Geography", "\u521d\u4e2d\u5730\u7406", "Social Science"],
"modern_chinese_history": ["Modern Chinese History", "\u8fd1\u4ee3\u53f2\u7eb2\u8981", "Humanities"],
"ideological_and_moral_cultivation": ["Ideological and Moral Cultivation",
"\u601d\u60f3\u9053\u5fb7\u4fee\u517b\u4e0e\u6cd5\u5f8b\u57fa\u7840",
"Humanities"],
"logic": ["Logic", "\u903b\u8f91\u5b66", "Humanities"],
"law": ["Law", "\u6cd5\u5b66", "Humanities"],
"chinese_language_and_literature": ["Chinese Language and Literature", "\u4e2d\u56fd\u8bed\u8a00\u6587\u5b66",
"Humanities"],
"art_studies": ["Art Studies", "\u827a\u672f\u5b66", "Humanities"],
"professional_tour_guide": ["Professional Tour Guide", "\u5bfc\u6e38\u8d44\u683c", "Humanities"],
"legal_professional": ["Legal Professional", "\u6cd5\u5f8b\u804c\u4e1a\u8d44\u683c", "Humanities"],
"high_school_chinese": ["High School Chinese", "\u9ad8\u4e2d\u8bed\u6587", "Humanities"],
"high_school_history": ["High School History", "\u9ad8\u4e2d\u5386\u53f2", "Humanities"],
"middle_school_history": ["Middle School History", "\u521d\u4e2d\u5386\u53f2", "Humanities"],
"civil_servant": ["Civil Servant", "\u516c\u52a1\u5458", "Other"],
"sports_science": ["Sports Science", "\u4f53\u80b2\u5b66", "Other"],
"plant_protection": ["Plant Protection", "\u690d\u7269\u4fdd\u62a4", "Other"],
"basic_medicine": ["Basic Medicine", "\u57fa\u7840\u533b\u5b66", "Other"],
"clinical_medicine": ["Clinical Medicine", "\u4e34\u5e8a\u533b\u5b66", "Other"],
"urban_and_rural_planner": ["Urban and Rural Planner", "\u6ce8\u518c\u57ce\u4e61\u89c4\u5212\u5e08", "Other"],
"accountant": ["Accountant", "\u6ce8\u518c\u4f1a\u8ba1\u5e08", "Other"],
"fire_engineer": ["Fire Engineer", "\u6ce8\u518c\u6d88\u9632\u5de5\u7a0b\u5e08", "Other"],
"environmental_impact_assessment_engineer": ["Environmental Impact Assessment Engineer",
"\u73af\u5883\u5f71\u54cd\u8bc4\u4ef7\u5de5\u7a0b\u5e08", "Other"],
"tax_accountant": ["Tax Accountant", "\u7a0e\u52a1\u5e08", "Other"],
"physician": ["Physician", "\u533b\u5e08\u8d44\u683c", "Other"]
}
hard_list = ['advanced_mathematics', 'discrete_mathematics', 'probability_and_statistics', 'college_physics',
'college_chemistry', 'high_school_mathematics', 'high_school_physics', 'high_school_chemistry']
def cal_ceval(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.
cnt = 0
hard_cnt = 0
hard_acc_sum = 0.
for tt in res.keys():
name = tt.split('-')[-1]
acc_sum += float(res[tt])
cnt += 1
class_ = TASK_NAME_MAPPING[name][2]
if class_ not in acc_sum_dict:
acc_sum_dict[class_] = 0.
acc_norm_sum_dict[class_] = 0.
cnt_dict[class_] = 0.
if name in hard_list:
hard_cnt += 1
hard_acc_sum += float(res[tt])
acc_sum_dict[class_] += float(res[tt])
cnt_dict[class_] += 1
print('\n\n\n')
for k in ['STEM', 'Social Science', 'Humanities', 'Other']:
if k in cnt_dict:
print('%s acc: %.2f ' % (
k, acc_sum_dict[k] / cnt_dict[k]))
if hard_cnt > 0:
print('Hard acc:%.2f ' % (hard_acc_sum / hard_cnt))
print('AVERAGE acc:%.2f ' % (acc_sum / cnt)) | null |
161,664 | import argparse
import os
import re
import pandas as pd
import torch
from thefuzz import process
from tqdm import tqdm
from transformers.trainer_utils import set_seed
def load_models_tokenizer(args):
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True,
torch_dtype=torch.float16).eval()
try:
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
model.generation_config.do_sample = False # use greedy decoding
except:
print("GenerationConfig not found, use default config.")
return model, tokenizer | null |
161,665 | import argparse
import os
import re
import pandas as pd
import torch
from thefuzz import process
from tqdm import tqdm
from transformers.trainer_utils import set_seed
def format_example(line):
example = 'The following is a multiple-choice question. Please choose the most suitable one among A, B, C and D as the answer to this question.\n\n' + \
line['question'] + "\n"
for choice in choices:
example += f'{choice}. {line[f"{choice}"]}\n'
return example
def extract_answer(response, row):
gen = process_before_extraction(response, {choice: row[choice] for choice in choices})
pred = extract_choice(gen, [row[choice] for choice in choices])
return pred
def model_predict(model, tokenizer, question, max_new_tokens=512, **kwargs):
inputs = tokenizer(question, return_tensors="pt")
input_ids = inputs['input_ids'].to(model.device)
outputs = model.generate(input_ids=input_ids, max_new_tokens=max_new_tokens, **kwargs)
generated_sequence = outputs[0][len(input_ids[0]):]
output_text = tokenizer.decode(generated_sequence, skip_special_tokens=True)
return output_text.strip()
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
save_result_dir=None,
overwrite=False,
**kwargs
):
result_path = os.path.join(save_result_dir, f'{subject_name}_result.csv')
if not overwrite and os.path.exists(result_path):
print(f"{result_path} existed, skip!")
score = []
for (_, datarow), (_, resultrow) in zip(test_df.iterrows(), pd.read_csv(result_path).iterrows()):
# pred = extract_answer(resultrow['model_response'], datarow)
pred = resultrow['model_output']
correct = 1 if pred == datarow['answer'] else 0
score.append(correct)
return score
result = []
score = []
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
question = format_example(row)
response = model_predict(model, tokenizer, question, **kwargs)
print(question)
print(response)
pred = extract_answer(response, row)
print(pred)
print("=======")
if 'answer' in row:
correct = 1 if pred == row['answer'] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {row["answer"]}')
print("============================")
result.append(pred)
if save_result_dir:
test_df['model_output'] = result
test_df['model_response'] = response
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(os.path.join(
save_result_dir, f'{subject_name}_result.csv'), encoding="utf-8", index=False)
return score | null |
161,666 | import argparse
import os
import re
import pandas as pd
import torch
from thefuzz import process
from tqdm import tqdm
from transformers.trainer_utils import set_seed
TASK_NAME_MAPPING = {'stem': ['abstract_algebra', 'anatomy', 'astronomy', 'college_biology', 'college_chemistry',
'college_computer_science', 'college_mathematics', 'college_physics', 'computer_security',
'conceptual_physics', 'electrical_engineering', 'elementary_mathematics',
'high_school_biology', 'high_school_chemistry', 'high_school_computer_science',
'high_school_mathematics', 'high_school_physics', 'high_school_statistics',
'machine_learning'],
'Humanities': ['formal_logic', 'high_school_european_history', 'high_school_us_history',
'high_school_world_history', 'international_law', 'jurisprudence',
'logical_fallacies', 'moral_disputes', 'moral_scenarios', 'philosophy',
'prehistory', 'professional_law', 'world_religions'],
'other': ['business_ethics', 'college_medicine', 'human_aging', 'management', 'marketing',
'medical_genetics', 'miscellaneous', 'nutrition', 'professional_accounting',
'professional_medicine', 'virology', 'global_facts', 'clinical_knowledge'],
'social': ['econometrics', 'high_school_geography', 'high_school_government_and_politics',
'high_school_macroeconomics', 'high_school_microeconomics', 'high_school_psychology',
'human_sexuality', 'professional_psychology', 'public_relations', 'security_studies',
'sociology', 'us_foreign_policy']}
def cal_mmlu(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.
cnt = 0
hard_cnt = 0
hard_acc_sum = 0.
for class_ in TASK_NAME_MAPPING.keys():
acc_sum_dict[class_] = 0.
acc_norm_sum_dict[class_] = 0.
cnt_dict[class_] = 0.
for tt in TASK_NAME_MAPPING[class_]:
acc_sum += sum(res[tt])
cnt += len(res[tt])
acc_sum_dict[class_] += sum(res[tt])
cnt_dict[class_] += len(res[tt])
print('\n\n\n')
for k in TASK_NAME_MAPPING.keys():
if k in cnt_dict:
print('%s ACC: %.2f ' % (
k, acc_sum_dict[k] * 100 / cnt_dict[k]))
print('AVERAGE ACC:%.2f ' % (acc_sum * 100 / cnt)) | null |
161,667 | import argparse
import os
from collections import defaultdict
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16).eval()
try:
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
except:
print("GenerationConfig not found, use default config.")
return model, tokenizer | null |
161,668 | import argparse
import os
from collections import defaultdict
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def format_example(line, include_answer=True):
example = '问题:' + line['Question']
for choice in choices:
example += f'\n{choice}. {line[f"{choice}"]}'
if include_answer:
example += '\n答案:' + line["Answer"] + '\n\n'
else:
example += '\n答案:'
return example
def generate_few_shot_prompt(k, subject, dev_df):
prompt = ''
if k == -1:
k = dev_df.shape[0]
for i in range(k):
prompt += format_example(
dev_df.iloc[i, :],
include_answer=True,
)
return prompt
def get_logits(tokenizer, model, inputs: List[str]):
input_ids = tokenizer(inputs, padding=False)['input_ids']
input_ids = torch.tensor(input_ids, device=model.device)
tokens = {'input_ids': input_ids}
outputs = model(input_ids)['logits']
logits = outputs[:, -1, :]
log_probs = torch.nn.functional.softmax(logits, dim=-1)
return log_probs, {'tokens': tokens}
choices = ["A", "B", "C", "D"]
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
k=5,
dev_df=None,
few_shot=False,
save_result_dir=None,
**kwargs
):
result = []
score = []
few_shot_prompt = generate_few_shot_prompt(
k, subject_name, dev_df) if few_shot else []
all_probs = {'prob_A': [], 'prob_B': [], 'prob_C': [], 'prob_D': []}
if args.debug:
print(f"few_shot_prompt: {few_shot_prompt}")
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
question = format_example(row, include_answer=False)
full_prompt = few_shot_prompt + question
output, input_info = get_logits(tokenizer, model, [full_prompt])
assert output.shape[0] == 1
logits = output.flatten()
softval = torch.nn.functional.softmax(
torch.tensor(
[
logits[tokenizer("A")['input_ids']],
logits[tokenizer("B")['input_ids']],
logits[tokenizer("C")['input_ids']],
logits[tokenizer("D")['input_ids']],
]
),
dim=0,
)
if softval.dtype in {torch.bfloat16, torch.float16}:
softval = softval.to(dtype=torch.float32)
probs = softval.detach().cpu().numpy()
for i, choice in enumerate(choices):
all_probs[f'prob_{choice}'].append(probs[i])
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs)]
if 'Answer' in row:
correct = 1 if pred == row['Answer'] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {row["Answer"]}')
result.append(pred)
if score:
correct_ratio = 100 * sum(score) / len(score)
if args.debug:
print(subject_name, correct_ratio)
else:
correct_ratio = 0
if save_result_dir:
test_df['model_output'] = result
for i, choice in enumerate(choices):
test_df[f'prob_{choice}'] = (all_probs[f'prob_{choice}'])
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(os.path.join(
save_result_dir, f'{subject_name}_result.csv'), encoding="utf-8", index=False)
return correct_ratio | null |
161,669 | import argparse
import os
from collections import defaultdict
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
TASK_NAME_MAPPING = defaultdict(list)
for k, v in categories.items():
for subject, subcat in subcategories.items():
for c in subcat:
if c in v:
TASK_NAME_MAPPING[k].append(subject)
def cal_cmmlu(res):
print('\n\n\n')
res = {k.split('-')[-1]: float(v) for k, v in res.items()}
for k, v in TASK_NAME_MAPPING.items():
avg_acc = np.mean(list(map(lambda x: res[x], v)))
print(f"{k} acc: {avg_acc:.2f}")
avg_all_acc = np.mean(list(res.values()))
print(f"AVERAGE acc: {avg_all_acc:.2f}") | null |
161,670 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
def is_callable(response, golden):
return response['action'].strip().lower() == golden['action'].strip(
).lower()
def process_res(response):
# parse response
response += '\n' # fix not-find bug
thought = response[:response.find('Action:')].strip()
action = response[response.find('Action:') +
len('Action:'):response.find('Action Input:')].strip()
action_input = response[response.find('Action Input:') +
len('Action Input:'):response.find('Observation:'
)].strip()
observation = response[response.find('Observation:') +
len('Observation:'):response.rfind('Thought:'
)].strip()
thought_last = response[response.rfind('Thought:') +
len('Thought:'):response.find('Final Answer:'
)].strip()
final_answer = response[response.find('Final Answer:') +
len('Final Answer:'):].strip()
try:
action_input = json.dumps(json5.loads(action_input),
ensure_ascii=False,
sort_keys=True)
except:
# print("JSON Load Error:", action_input)
pass
res_dict = {
'thought': thought,
'action': action,
'action_input': action_input,
'observation': observation,
'thought_last': thought_last,
'final_answer': final_answer
}
return res_dict
def eval_action(job):
response = job['gen'][0]
golden = job['response']
if 'Action:' in response:
response, golden = process_res(response), process_res(golden)
if is_callable(response, golden):
return True
return False | null |
161,671 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
def process_res(response):
# parse response
response += '\n' # fix not-find bug
thought = response[:response.find('Action:')].strip()
action = response[response.find('Action:') +
len('Action:'):response.find('Action Input:')].strip()
action_input = response[response.find('Action Input:') +
len('Action Input:'):response.find('Observation:'
)].strip()
observation = response[response.find('Observation:') +
len('Observation:'):response.rfind('Thought:'
)].strip()
thought_last = response[response.rfind('Thought:') +
len('Thought:'):response.find('Final Answer:'
)].strip()
final_answer = response[response.find('Final Answer:') +
len('Final Answer:'):].strip()
try:
action_input = json.dumps(json5.loads(action_input),
ensure_ascii=False,
sort_keys=True)
except:
# print("JSON Load Error:", action_input)
pass
res_dict = {
'thought': thought,
'action': action,
'action_input': action_input,
'observation': observation,
'thought_last': thought_last,
'final_answer': final_answer
}
return res_dict
class _DummyTokenizer:
def tokenize(self, text: str):
return text.split()
def _get_tokenized_string(tokenizer, text_list):
token_ids_list, tokenized_string_list = [], []
for text in text_list:
assert tokenizer is not None
token_ids = tokenizer.encode(text)
tokens_bytes = tokenizer.convert_ids_to_tokens(token_ids)
tokens = [
token.decode('utf-8', errors='replace') for token in tokens_bytes
]
tokenized_string = ' '.join(tokens)
token_ids_list.append(token_ids)
tokenized_string_list.append(tokenized_string)
return token_ids_list, tokenized_string_list
def eval_action_input(job, tokenizer):
response = job['gen'][0]
golden = job['response']
response, golden = process_res(response), process_res(golden)
query = job['prompt']
job = {}
job['prompt'] = query
job['gen'] = response['action_input']
job['response'] = golden['action_input']
job['_gen_tok'], job['_gen_tok_str'] = _get_tokenized_string(
tokenizer, [response['action_input']])
job['_reference_tok'], job['_reference_tok_str'] = _get_tokenized_string(
tokenizer, [golden['action_input']])
scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'],
tokenizer=_DummyTokenizer())
score = scorer.score(job['_reference_tok_str'][0], job['_gen_tok_str'][0])
rouge = score['rougeL'].fmeasure
return rouge | null |
161,672 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path,
trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path,
device_map='auto',
trust_remote_code=True,
bf16=True,
use_flash_attn=True).eval()
try:
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
model.generation_config.do_sample = False # use greedy decoding
except:
print("GenerationConfig not found, use default config.")
return model, tokenizer | null |
161,673 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
data_root_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data'
)
def load_jobs(filename):
jobs = []
with jsonlines.open(os.path.join(data_root_path, filename),
mode='r') as reader:
for job in reader:
jobs.append(job)
return jobs
def react_inference(filename, model, tokenizer):
filename_cache = filename + '.cache'
if os.path.exists(os.path.join(data_root_path, filename_cache)):
jobs = load_jobs(filename=filename_cache)
print('Loaded from', filename_cache)
else:
with open(os.path.join(data_root_path, filename_cache), 'w') as f:
jobs = load_jobs(filename=filename)
print('Inference:', filename)
for job in tqdm(jobs):
response, history = model.chat(tokenizer,
job['prompt'],
history=None)
job['gen'] = [response]
f.writelines(json.dumps(job, ensure_ascii=False) + '\n')
print(filename_cache, 'is saved.')
return jobs | null |
161,674 | import argparse
import os
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16).eval()
try:
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
except:
print("GenerationConfig not found, use default config.")
return model, tokenizer | null |
161,675 | import argparse
import os
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def format_example(line, include_answer=True):
example = '问题:' + line['question']
for choice in choices:
example += f'\n{choice}. {line[f"{choice}"]}'
if include_answer:
example += '\n答案:' + line["answer"] + '\n\n'
else:
example += '\n答案:'
return example
def generate_few_shot_prompt(k, subject, dev_df):
prompt = ''
if k == -1:
k = dev_df.shape[0]
for i in range(k):
prompt += format_example(
dev_df.iloc[i, :],
include_answer=True,
)
return prompt
def get_logits(tokenizer, model, inputs: List[str]):
input_ids = tokenizer(inputs, padding=False)['input_ids']
input_ids = torch.tensor(input_ids, device=model.device)
tokens = {'input_ids': input_ids}
outputs = model(input_ids)['logits']
logits = outputs[:, -1, :]
log_probs = torch.nn.functional.softmax(logits, dim=-1)
return log_probs, {'tokens': tokens}
choices = ["A", "B", "C", "D"]
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
k=5,
dev_df=None,
few_shot=False,
save_result_dir=None,
**kwargs
):
result = []
score = []
few_shot_prompt = generate_few_shot_prompt(
k, subject_name, dev_df) if few_shot else ''
all_probs = {'prob_A': [], 'prob_B': [], 'prob_C': [], 'prob_D': []}
if args.debug: print(f"few_shot_prompt: {few_shot_prompt}")
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
question = format_example(row, include_answer=False)
full_prompt = few_shot_prompt + question
output, input_info = get_logits(tokenizer, model, [full_prompt])
assert output.shape[0] == 1
logits = output.flatten()
softval = torch.nn.functional.softmax(
torch.tensor(
[
logits[tokenizer("A")['input_ids']],
logits[tokenizer("B")['input_ids']],
logits[tokenizer("C")['input_ids']],
logits[tokenizer("D")['input_ids']],
]
),
dim=0,
)
if softval.dtype in {torch.bfloat16, torch.float16}:
softval = softval.to(dtype=torch.float32)
probs = softval.detach().cpu().numpy()
for i, choice in enumerate(choices):
all_probs[f'prob_{choice}'].append(probs[i])
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs)]
if 'answer' in row:
correct = 1 if pred == row['answer'] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {row["answer"]}')
result.append(pred)
if score:
correct_ratio = 100 * sum(score) / len(score)
if args.debug:
print(subject_name, correct_ratio)
else:
correct_ratio = 0
if save_result_dir:
test_df['model_output'] = result
for i, choice in enumerate(choices):
test_df[f'prob_{choice}'] = (all_probs[f'prob_{choice}'])
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(os.path.join(
save_result_dir, f'{subject_name}_result.csv'), encoding="utf-8", index=False)
return correct_ratio | null |
161,676 | import argparse
import os
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
TASK_NAME_MAPPING = {
"computer_network": ["Computer Network", "\u8ba1\u7b97\u673a\u7f51\u7edc", "STEM"],
"operating_system": ["Operating System", "\u64cd\u4f5c\u7cfb\u7edf", "STEM"],
"computer_architecture": ["Computer Architecture", "\u8ba1\u7b97\u673a\u7ec4\u6210", "STEM"],
"college_programming": ["College Programming", "\u5927\u5b66\u7f16\u7a0b", "STEM"],
"college_physics": ["College Physics", "\u5927\u5b66\u7269\u7406", "STEM"],
"college_chemistry": ["College Chemistry", "\u5927\u5b66\u5316\u5b66", "STEM"],
"advanced_mathematics": ["Advanced Mathematics", "\u9ad8\u7b49\u6570\u5b66", "STEM"],
"probability_and_statistics": ["Probability and Statistics", "\u6982\u7387\u7edf\u8ba1", "STEM"],
"discrete_mathematics": ["Discrete Mathematics", "\u79bb\u6563\u6570\u5b66", "STEM"],
"electrical_engineer": ["Electrical Engineer", "\u6ce8\u518c\u7535\u6c14\u5de5\u7a0b\u5e08", "STEM"],
"metrology_engineer": ["Metrology Engineer", "\u6ce8\u518c\u8ba1\u91cf\u5e08", "STEM"],
"high_school_mathematics": ["High School Mathematics", "\u9ad8\u4e2d\u6570\u5b66", "STEM"],
"high_school_physics": ["High School Physics", "\u9ad8\u4e2d\u7269\u7406", "STEM"],
"high_school_chemistry": ["High School Chemistry", "\u9ad8\u4e2d\u5316\u5b66", "STEM"],
"high_school_biology": ["High School Biology", "\u9ad8\u4e2d\u751f\u7269", "STEM"],
"middle_school_mathematics": ["Middle School Mathematics", "\u521d\u4e2d\u6570\u5b66", "STEM"],
"middle_school_biology": ["Middle School Biology", "\u521d\u4e2d\u751f\u7269", "STEM"],
"middle_school_physics": ["Middle School Physics", "\u521d\u4e2d\u7269\u7406", "STEM"],
"middle_school_chemistry": ["Middle School Chemistry", "\u521d\u4e2d\u5316\u5b66", "STEM"],
"veterinary_medicine": ["Veterinary Medicine", "\u517d\u533b\u5b66", "STEM"],
"college_economics": ["College Economics", "\u5927\u5b66\u7ecf\u6d4e\u5b66", "Social Science"],
"business_administration": ["Business Administration", "\u5de5\u5546\u7ba1\u7406", "Social Science"],
"marxism": ["Marxism", "\u9a6c\u514b\u601d\u4e3b\u4e49\u57fa\u672c\u539f\u7406", "Social Science"],
"mao_zedong_thought": ["Mao Zedong Thought",
"\u6bdb\u6cfd\u4e1c\u601d\u60f3\u548c\u4e2d\u56fd\u7279\u8272\u793e\u4f1a\u4e3b\u4e49\u7406\u8bba\u4f53\u7cfb\u6982\u8bba",
"Social Science"],
"education_science": ["Education Science", "\u6559\u80b2\u5b66", "Social Science"],
"teacher_qualification": ["Teacher Qualification", "\u6559\u5e08\u8d44\u683c", "Social Science"],
"high_school_politics": ["High School Politics", "\u9ad8\u4e2d\u653f\u6cbb", "Social Science"],
"high_school_geography": ["High School Geography", "\u9ad8\u4e2d\u5730\u7406", "Social Science"],
"middle_school_politics": ["Middle School Politics", "\u521d\u4e2d\u653f\u6cbb", "Social Science"],
"middle_school_geography": ["Middle School Geography", "\u521d\u4e2d\u5730\u7406", "Social Science"],
"modern_chinese_history": ["Modern Chinese History", "\u8fd1\u4ee3\u53f2\u7eb2\u8981", "Humanities"],
"ideological_and_moral_cultivation": ["Ideological and Moral Cultivation",
"\u601d\u60f3\u9053\u5fb7\u4fee\u517b\u4e0e\u6cd5\u5f8b\u57fa\u7840",
"Humanities"],
"logic": ["Logic", "\u903b\u8f91\u5b66", "Humanities"],
"law": ["Law", "\u6cd5\u5b66", "Humanities"],
"chinese_language_and_literature": ["Chinese Language and Literature", "\u4e2d\u56fd\u8bed\u8a00\u6587\u5b66",
"Humanities"],
"art_studies": ["Art Studies", "\u827a\u672f\u5b66", "Humanities"],
"professional_tour_guide": ["Professional Tour Guide", "\u5bfc\u6e38\u8d44\u683c", "Humanities"],
"legal_professional": ["Legal Professional", "\u6cd5\u5f8b\u804c\u4e1a\u8d44\u683c", "Humanities"],
"high_school_chinese": ["High School Chinese", "\u9ad8\u4e2d\u8bed\u6587", "Humanities"],
"high_school_history": ["High School History", "\u9ad8\u4e2d\u5386\u53f2", "Humanities"],
"middle_school_history": ["Middle School History", "\u521d\u4e2d\u5386\u53f2", "Humanities"],
"civil_servant": ["Civil Servant", "\u516c\u52a1\u5458", "Other"],
"sports_science": ["Sports Science", "\u4f53\u80b2\u5b66", "Other"],
"plant_protection": ["Plant Protection", "\u690d\u7269\u4fdd\u62a4", "Other"],
"basic_medicine": ["Basic Medicine", "\u57fa\u7840\u533b\u5b66", "Other"],
"clinical_medicine": ["Clinical Medicine", "\u4e34\u5e8a\u533b\u5b66", "Other"],
"urban_and_rural_planner": ["Urban and Rural Planner", "\u6ce8\u518c\u57ce\u4e61\u89c4\u5212\u5e08", "Other"],
"accountant": ["Accountant", "\u6ce8\u518c\u4f1a\u8ba1\u5e08", "Other"],
"fire_engineer": ["Fire Engineer", "\u6ce8\u518c\u6d88\u9632\u5de5\u7a0b\u5e08", "Other"],
"environmental_impact_assessment_engineer": ["Environmental Impact Assessment Engineer",
"\u73af\u5883\u5f71\u54cd\u8bc4\u4ef7\u5de5\u7a0b\u5e08", "Other"],
"tax_accountant": ["Tax Accountant", "\u7a0e\u52a1\u5e08", "Other"],
"physician": ["Physician", "\u533b\u5e08\u8d44\u683c", "Other"]
}
hard_list = ['advanced_mathematics', 'discrete_mathematics', 'probability_and_statistics', 'college_physics',
'college_chemistry', 'high_school_mathematics', 'high_school_physics', 'high_school_chemistry']
def cal_ceval(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.
cnt = 0
hard_cnt = 0
hard_acc_sum = 0.
for tt in res.keys():
name = tt.split('-')[-1]
acc_sum += float(res[tt])
cnt += 1
class_ = TASK_NAME_MAPPING[name][2]
if class_ not in acc_sum_dict:
acc_sum_dict[class_] = 0.
acc_norm_sum_dict[class_] = 0.
cnt_dict[class_] = 0.
if name in hard_list:
hard_cnt += 1
hard_acc_sum += float(res[tt])
acc_sum_dict[class_] += float(res[tt])
cnt_dict[class_] += 1
print('\n\n\n')
for k in ['STEM', 'Social Science', 'Humanities', 'Other']:
if k in cnt_dict:
print('%s acc: %.2f ' % (
k, acc_sum_dict[k] / cnt_dict[k]))
if hard_cnt > 0:
print('Hard acc:%.2f ' % (hard_acc_sum / hard_cnt))
print('AVERAGE acc:%.2f ' % (acc_sum / cnt)) | null |
161,677 | import argparse
import jsonlines
import torch
import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def post_decode(tokens_list, tokenizer, raw_text_len):
sents = []
# print(len(tokens_list))
for tokens in tokens_list:
tokens = tokens.cpu().numpy().tolist()
sent = tokenizer.decode(tokens[raw_text_len:])
sent = sent.split('<|endoftext|>')[0]
sent = sent.split('\n\n\n')[0]
sent = sent.split("\n\n")[0]
sent = sent.split("def ")[0]
sents.append(sent)
return sents
def generate_sample(model, tokenizer, input_txt):
input_ids = tokenizer.encode(input_txt)
raw_text_len = len(input_ids)
context_enc = torch.tensor([input_ids]).to(model.device)
print(f"Input text: {input_txt}\n")
outputs = model.generate(context_enc)
output_text = post_decode(outputs, tokenizer, raw_text_len)[0]
print(f"\nOutput text: \n{output_text}\n")
return output_text | null |
161,678 | import argparse
import json
import re
from pathlib import Path
import numpy as np
import torch
import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
def doc_to_text(doc, use_fewshot):
if use_fewshot:
context = "Question: Angelo and Melanie want to plan how many hours over the next week they should study together for their test next week. They have 2 chapters of their textbook to study and 4 worksheets to memorize. They figure out that they should dedicate 3 hours to each chapter of their textbook and 1.5 hours for each worksheet. If they plan to study no more than 4 hours each day, how many days should they plan to study total over the next week if they take a 10-minute break every hour, include 3 10-minute snack breaks each day, and 30 minutes for lunch each day?\nLet's think step by step\n" \
"Angelo and Melanie think they should dedicate 3 hours to each of the 2 chapters, 3 hours x 2 chapters = 6 hours total.\nFor the worksheets they plan to dedicate 1.5 hours for each worksheet, 1.5 hours x 4 worksheets = 6 hours total.\nAngelo and Melanie need to start with planning 12 hours to study, at 4 hours a day, 12 / 4 = 3 days.\nHowever, they need to include time for breaks and lunch. Every hour they want to include a 10-minute break, so 12 total hours x 10 minutes = 120 extra minutes for breaks.\nThey also want to include 3 10-minute snack breaks, 3 x 10 minutes = 30 minutes.\nAnd they want to include 30 minutes for lunch each day, so 120 minutes for breaks + 30 minutes for snack breaks + 30 minutes for lunch = 180 minutes, or 180 / 60 minutes per hour = 3 extra hours.\nSo Angelo and Melanie want to plan 12 hours to study + 3 hours of breaks = 15 hours total.\nThey want to study no more than 4 hours each day, 15 hours / 4 hours each day = 3.75\nThey will need to plan to study 4 days to allow for all the time they need.\nThe answer is 4\n\n" \
"Question: Mark's basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What's the total number of points scored by both teams added together?\nLet's think step by step\n" \
"Mark's team scores 25 2 pointers, meaning they scored 25*2= 50 points in 2 pointers.\nHis team also scores 6 3 pointers, meaning they scored 8*3= 24 points in 3 pointers\nThey scored 10 free throws, and free throws count as one point so they scored 10*1=10 points in free throws.\nAll together his team scored 50+24+10= 84 points\nMark's opponents scored double his team's number of 2 pointers, meaning they scored 50*2=100 points in 2 pointers.\nHis opponents scored half his team's number of 3 pointers, meaning they scored 24/2= 12 points in 3 pointers.\nThey also scored half Mark's team's points in free throws, meaning they scored 10/2=5 points in free throws.\nAll together Mark's opponents scored 100+12+5=117 points\nThe total score for the game is both team's scores added together, so it is 84+117=201 points\nThe answer is 201\n\n" \
"Question: Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?\nLet's think step by step\n" \
"When Bella buys 2/5 times more marbles, she'll have increased the number of marbles by 2/5*60 = 24\nThe total number of marbles she'll have is 60+24 = 84\nIf Bella currently has 60 marbles, and she has two times as many marbles as frisbees, she has 60/2 = 30 frisbees.\nIf Bella buys 2/5 times more frisbees, she'll have 2/5*30 = 12 more frisbees.\nThe total number of frisbees she'll have will increase to 30+12 = 42\nBella also has 20 more frisbees than deck cards, meaning she has 30-20 = 10 deck cards\nIf she buys 2/5 times more deck cards, she'll have 2/5*10 = 4 more deck cards.\nThe total number of deck cards she'll have is 10+4 = 14\nTogether, Bella will have a total of 14+42+84 = 140 items\nThe answer is 140\n\n" \
"Question: A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?\nLet's think step by step\n" \
"For the first three baskets, the number of apples and oranges in one basket is 9+15=24\nIn total, together with bananas, the number of fruits in one basket is 24+14=38 for the first three baskets.\nSince there are three baskets each having 38 fruits, there are 3*38=114 fruits in the first three baskets.\nThe number of apples in the fourth basket is 9-2=7\nThere are also 15-2=13 oranges in the fourth basket\nThe combined number of oranges and apples in the fourth basket is 13+7=20\nThe fourth basket also contains 14-2=12 bananas.\nIn total, the fourth basket has 20+12=32 fruits.\nThe four baskets together have 32+114=146 fruits.\nThe answer is 146\n\n" \
f"Question: {doc['question']}\nLet's think step by step"
else:
context = doc['question']
return context | null |
161,679 | import argparse
import json
import re
from pathlib import Path
import numpy as np
import torch
import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
def model_predict(model, tokenizer, question, max_new_tokens=512, **kwargs):
def generate_sample(model, tokenizer, question):
response = model_predict(model, tokenizer, question)
print(question)
print("-------------")
print(response)
print("=============")
return response | null |
161,680 | import argparse
import json
import re
from pathlib import Path
import numpy as np
import torch
import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
INVALID_ANS = "[invalid]"
def extract_answer_hf(completion):
def _get_last_digit(s):
_PAT_LAST_DIGIT = re.compile(
r"(?<=(\s|[\$%#{]))([+-])?(?=(\S))(0|([1-9](\d*|\d{0,2}(,\d{3})*)))?(\.\d*[1-9])?(?=(\s|[.,}]|$))")
match = list(_PAT_LAST_DIGIT.finditer(s))
if match:
last_digit = match[-1].group().replace(",", "").replace("+", "")
# print(f"The last digit in {s} is {last_digit}")
else:
last_digit = None
print(f"No digits found in {s!r}")
return last_digit
job_gen = completion.strip('.').replace('\n', '\\n')
last_digit = _get_last_digit(job_gen)
if last_digit is not None:
return eval(last_digit)
else:
return INVALID_ANS | null |
161,681 | import argparse
import json
import re
from pathlib import Path
import numpy as np
import torch
import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
INVALID_ANS = "[invalid]"
def extract_answer(completion):
try:
last_number = re.findall(r'\d+', completion)[-1]
return eval(last_number)
except:
return INVALID_ANS
def is_correct(completion, answer):
gold = extract_answer(answer)
assert gold != INVALID_ANS, "No ground truth answer found in the document."
return extract_answer(completion) == gold | null |
161,682 | import argparse
import re
import textwrap
from pathlib import Path
import jsonlines
import torch
import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def extract_code(text, entry_point):
# 正则表达式匹配代码块
code_block_pattern = re.compile(rf"```(?:[Pp]ython\n)?.*?def\s+{entry_point}.*?:\n(.*?)\n```", re.DOTALL)
code_block = code_block_pattern.search(text)
if code_block is None:
code_block_pattern = re.compile(rf"def\s+{entry_point}.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL)
code_block = code_block_pattern.search(text)
if code_block is None:
code_block_pattern = re.compile(rf"def.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL)
code_block = code_block_pattern.search(text)
if code_block is not None:
return code_block.group(1)
else:
# if no code block is found, assume the LM is simply filling the code
return textwrap.indent(text, ' ' * 4)
def model_predict(model, tokenizer, question, max_new_tokens=512, **kwargs):
inputs = tokenizer(question, return_tensors="pt")
input_ids = inputs['input_ids'].to(model.device)
outputs = model.generate(input_ids=input_ids, max_new_tokens=max_new_tokens, **kwargs)
generated_sequence = outputs[0][len(input_ids[0]):]
output_text = tokenizer.decode(generated_sequence, skip_special_tokens=True)
return output_text.strip()
def generate_sample(model, tokenizer, question, entry_point):
response = model_predict(model, tokenizer, question)
print(question)
print(response)
answer = extract_code(response, entry_point)
return answer, response | null |
161,684 | import argparse
import os
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
def format_example(line, include_answer=True):
example = 'Question: ' + line['question']
for choice in choices:
example += f'\n{choice}. {line[f"{choice}"]}'
if include_answer:
example += '\nAnswer: ' + line["answer"] + '\n\n'
else:
example += '\nAnswer:'
return example
def generate_few_shot_prompt(k, subject, dev_df):
def format_subject(subject):
l = subject.split("_")
s = ""
for entry in l:
s += " " + entry
return s.strip()
prompt = "The following are multiple choice questions (with answers) about {}.\n\n".format(format_subject(subject))
if k == -1:
k = dev_df.shape[0]
for i in range(k):
prompt += format_example(
dev_df.iloc[i, :],
include_answer=True,
)
return prompt
def get_logits(tokenizer, model, inputs: List[str]):
input_ids = tokenizer(inputs, padding=False)['input_ids']
input_ids = torch.tensor(input_ids, device=model.device)
if input_ids.shape[1] > args.max_seq_len:
input_ids = input_ids[:, input_ids.shape[1] - args.max_seq_len + 1:]
tokens = {'input_ids': input_ids}
outputs = model(input_ids)['logits']
logits = outputs[:, -1, :]
log_probs = torch.nn.functional.softmax(logits, dim=-1)
return log_probs, {'tokens': tokens}
choices = ["A", "B", "C", "D"]
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
k=5,
dev_df=None,
few_shot=False,
save_result_dir=None,
**kwargs
):
result = []
score = []
few_shot_prompt = generate_few_shot_prompt(
k, subject_name, dev_df) if few_shot else []
all_probs = {'prob_A': [], 'prob_B': [], 'prob_C': [], 'prob_D': []}
if args.debug: print(f"few_shot_prompt: {few_shot_prompt}")
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
question = format_example(row, include_answer=False)
full_prompt = few_shot_prompt + question
output, input_info = get_logits(tokenizer, model, [full_prompt])
assert output.shape[0] == 1
logits = output.flatten()
softval = torch.nn.functional.softmax(
torch.tensor(
[
logits[tokenizer(" A")['input_ids']],
logits[tokenizer(" B")['input_ids']],
logits[tokenizer(" C")['input_ids']],
logits[tokenizer(" D")['input_ids']],
]
),
dim=0,
)
if softval.dtype in {torch.bfloat16, torch.float16}:
softval = softval.to(dtype=torch.float32)
probs = softval.detach().cpu().numpy()
for i, choice in enumerate(choices):
all_probs[f'prob_{choice}'].append(probs[i])
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs)]
if 'answer' in row:
correct = 1 if pred == row['answer'] else 0
score.append(correct)
if args.debug: print(f'{question} pred: {pred} ref: {row["answer"]}')
result.append(pred)
if save_result_dir:
test_df['model_output'] = result
for i, choice in enumerate(choices):
test_df[f'prob_{choice}'] = (all_probs[f'prob_{choice}'])
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(os.path.join(
save_result_dir, f'{subject_name}_result.csv'), encoding="utf-8", index=False)
return score | null |
161,685 | import argparse
import os
from typing import List
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
from transformers.trainer_utils import set_seed
TASK_NAME_MAPPING = {'stem': ['abstract_algebra', 'anatomy', 'astronomy', 'college_biology', 'college_chemistry',
'college_computer_science', 'college_mathematics', 'college_physics', 'computer_security',
'conceptual_physics', 'electrical_engineering', 'elementary_mathematics',
'high_school_biology', 'high_school_chemistry', 'high_school_computer_science',
'high_school_mathematics', 'high_school_physics', 'high_school_statistics',
'machine_learning'],
'Humanities': ['formal_logic', 'high_school_european_history', 'high_school_us_history',
'high_school_world_history', 'international_law', 'jurisprudence',
'logical_fallacies', 'moral_disputes', 'moral_scenarios', 'philosophy',
'prehistory', 'professional_law', 'world_religions'],
'other': ['business_ethics', 'college_medicine', 'human_aging', 'management', 'marketing',
'medical_genetics', 'miscellaneous', 'nutrition', 'professional_accounting',
'professional_medicine', 'virology', 'global_facts', 'clinical_knowledge'],
'social': ['econometrics', 'high_school_geography', 'high_school_government_and_politics',
'high_school_macroeconomics', 'high_school_microeconomics', 'high_school_psychology',
'human_sexuality', 'professional_psychology', 'public_relations', 'security_studies',
'sociology', 'us_foreign_policy']}
def cal_mmlu(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.
cnt = 0
hard_cnt = 0
hard_acc_sum = 0.
for class_ in TASK_NAME_MAPPING.keys():
acc_sum_dict[class_] = 0.
acc_norm_sum_dict[class_] = 0.
cnt_dict[class_] = 0.
for tt in TASK_NAME_MAPPING[class_]:
acc_sum += sum(res[tt])
cnt += len(res[tt])
acc_sum_dict[class_] += sum(res[tt])
cnt_dict[class_] += len(res[tt])
print('\n\n\n', 'total cnt:', cnt, '\n')
for k in TASK_NAME_MAPPING.keys():
if k in cnt_dict:
print('%s ACC: %.2f ' % (
k, acc_sum_dict[k] / cnt_dict[k] * 100))
print('AVERAGE ACC:%.2f ' % (acc_sum / cnt * 100)) | null |
161,686 | import argparse
import re
import datasets
import jsonlines
import numpy as np
import torch
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def doc_to_text(doc):
return fewshot_prompt + "\nQuestion: " + doc["question"] + "\nLet's think step by step\n" | null |
161,687 | import argparse
import re
import datasets
import jsonlines
import numpy as np
import torch
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def post_decode(tokens_list, tokenizer, raw_text_len):
sents = []
# print(len(tokens_list))
for tokens in tokens_list:
tokens = tokens.cpu().numpy().tolist()
sent = tokenizer.decode(tokens[raw_text_len:])
sent = sent.split('<|endoftext|>')[0]
sent = sent.split('\n\n\n')[0]
sent = sent.split("\n\n")[0]
sent = sent.split("Question:")[0]
sents.append(sent)
return sents
def generate_sample(model, tokenizer, input_txt):
input_ids = tokenizer.encode(input_txt)
raw_text_len = len(input_ids)
context_enc = torch.tensor([input_ids]).to(model.device)
print(f"Input text: {input_txt}\n")
outputs = model.generate(context_enc)
output_text = post_decode(outputs, tokenizer, raw_text_len)[0]
print(f"\nOutput text: {output_text}\n")
return output_text | null |
161,688 | import argparse
import re
import datasets
import jsonlines
import numpy as np
import torch
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
INVALID_ANS = "[invalid]"
def extract_answer_hf(completion):
match = ANS_RE.search(completion)
if match:
match_str = match.group(1).strip()
match_str = match_str.replace(",", "")
return eval(match_str)
else:
return INVALID_ANS
def extract_answer(completion):
try:
last_number = re.findall(r'\d+', completion)[-1]
return eval(last_number)
except:
return INVALID_ANS
def is_correct(completion, answer):
gold = extract_answer_hf(answer)
assert gold != INVALID_ANS, "No ground truth answer found in the document."
return extract_answer(completion) == gold | null |
161,689 | import os
def Calendar():
now = datetime.datetime.now()
return f'Today is {calendar.day_name[now.weekday()]}, {calendar.month_name[now.month]} {now.day}, {now.year}.' | null |
161,690 | import os
def colbertv2_get_request(url: str, query: str, k: int):
payload = {'query': query, 'k': k}
res = requests.get(url, params=payload)
topk = res.json()['topk'][:k]
return topk | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.