id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
164,917 | import json
import pdb
from map_subword_serialize import schema_subword_matrix
import argparse
from transformers import AutoTokenizer
import pickle
def schema_subword_matrix(db_sep, init_idx, tables, tokenizer, table_items=None, column_items=None):
def schema_subword_dataset(seq2seq_dataset, tokenizer, tables, output_path = None):
i = 0
for i_str, data in seq2seq_dataset.items():
table_items = data['db_table_names']
column_items = data['db_column_names']['column_name']
# sampled_columns_idx = data['sampled_columns_idx']
# new_mapping = data['new_mapping'] # TODO
db_id = data['db_id']
# db_sep = data['struct_in']
db_sep = data['serialized_schema']
# db_sep = 'schema: {}'.format(data['struct_in'])
schema_relations = tables[db_id]
subword_matrix, subword_mapping_dict, new_struct_in, schema_to_ids = schema_subword_matrix(db_sep=db_sep, table_items=table_items, tokenizer=tokenizer,
column_items=column_items, init_idx=0, tables=tables)
# column_items=column_items, new_mapping=new_mapping, init_idx=0, tables=tables)
# column_items=column_items, init_idx=0, tables=tables, sampled_columns_idx=sampled_columns_idx)
data['schema_subword_relations'] = subword_matrix
data['schema_relations'] = schema_relations
data['schema_subword_mapping_dict'] = subword_mapping_dict
data['new_struct_in'] = new_struct_in
data['schema_to_ids'] = schema_to_ids
# data['schema_idx_ori'] = schema_idx_ori
# data['sampled_columns_idx'] = sampled_columns_idx
i += 1
if i % 1000 == 0:
print("******************************* processing {}th datasets *******************************".format(i))
if output_path:
pickle.dump(seq2seq_dataset, open(output_path, "wb"))
return seq2seq_dataset | null |
164,918 | import json
import pdb
from map_subword_serialize import schema_linking_subword
import argparse
from transformers import AutoTokenizer
import pickle
def schema_linking_subword(question_subword_dict: dict, schema_2_ids: dict, schema_linking: tuple, question_subword_len: int, schema_subword_len: int):
# assert dim match:
q_schema_mat, schema_q_mat = schema_linking
assert len(question_subword_dict) == len(q_schema_mat) + 1
assert len(schema_2_ids) == len(schema_q_mat)
q_schema_subword_matrix = [[0] * schema_subword_len for _ in range(question_subword_len)]
schema_q_subword_matrix = [[0] * question_subword_len for _ in range(schema_subword_len)]
# pdb.set_trace()
# construct subword_matrix for q_schema_mat:
for r in range(len(q_schema_mat)):
for c in range(len(schema_2_ids)):
temp_relation = q_schema_mat[r][c]
for sub_idx_r in question_subword_dict[r]:
for sub_idx_c in schema_2_ids[c]:
q_schema_subword_matrix[sub_idx_r][sub_idx_c] = temp_relation
# construct subword_matrix for schema_q_mat:
for r_s in range(len(schema_2_ids)):
for c_q in range(len(q_schema_mat)):
tmp_relation = schema_q_mat[r_s][c_q]
for sub_idx_s in schema_2_ids[r_s]:
for sub_idx_q in question_subword_dict[c_q]:
schema_q_subword_matrix[sub_idx_s][sub_idx_q] = tmp_relation
q_schema_subword_matrix = np.array(q_schema_subword_matrix, dtype='<U100')
schema_q_subword_matrix = np.array(schema_q_subword_matrix, dtype='<U100')
subword_schema_linking = (q_schema_subword_matrix.tolist(), schema_q_subword_matrix.tolist())
return subword_schema_linking
def schema_linking_subword_dataset(seq2seq_dataset, output_path = None):
i = 0
for i_str, data in seq2seq_dataset.items():
question_subword_dict = data['question_subword_dict']
schema_to_ids = data['schema_to_ids']
question_subword_len = len(data['question_subword_matrix'])
schema_subword_len = len(data['schema_subword_relations'])
schema_linking = data['schema_linking']
# schema_idx_ori = data['schema_idx_ori']
# sampled_columns_idx = data['sampled_columns_idx']
# new_mapping_zip = (data['new_mapping'], data['db_table_names'], data['db_column_names']['column_name']) # TODO
schema_linking_subwords = schema_linking_subword(
question_subword_dict=question_subword_dict,
schema_2_ids=schema_to_ids,
question_subword_len=question_subword_len,
schema_subword_len=schema_subword_len,
schema_linking=schema_linking,
# new_mapping_zip=new_mapping_zip # TODO
)
# schema_linking_subwords = schema_linking_subword_sampled(
# question_subword_dict=question_subword_dict,
# schema_2_ids=schema_to_ids,
# question_subword_len=question_subword_len,
# schema_subword_len=schema_subword_len,
# schema_linking=schema_linking,
# schema_idx_ori=schema_idx_ori,
# # new_mapping_zip=new_mapping_zip # TODO
# )
data['schema_linking_subword'] = schema_linking_subwords
i += 1
if i % 1000 == 0:
print("******************************* processing {}th datasets *******************************".format(i))
if output_path:
pickle.dump(seq2seq_dataset, open(output_path, "wb"))
return seq2seq_dataset | null |
164,919 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
from transformers import AutoModel, AutoTokenizer
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
164,920 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
from transformers import AutoModel, AutoTokenizer
def subword_dict(input_ids):
word_subword_mapping = defaultdict()
for sub_idx, word_idx in enumerate(input_ids):
if word_idx is None:
break
if word_idx in word_subword_mapping:
word_subword_mapping[word_idx].append(sub_idx)
else:
word_subword_mapping[word_idx] = [sub_idx]
return word_subword_mapping
def reduce_redundancy_idx(schema_dict_split: dict, schema_dict_ori: dict):
cross_index = 0
compact_schema_dict = 0
for k, v in schema_dict_ori.items():
# k: tokenized idx for schema, v: original schema index
tokenized_subword_idx = schema_dict_split[k]
temp_lst = []
for idx in range(len(tokenized_subword_idx)):
temp_lst.append(idx + cross_index)
cross_index = temp_lst[-1] + 1
compact_schema_dict[v] = temp_lst
return compact_schema_dict
def index_schema(db_seq, ori_tables_name=None, ori_columns_name=None, init_index=0):
seq_lst = db_seq.split(" ")
special_token = ["|", ':', ',', 'schema:', '(', ')', ';']
table_idx_lst = []
column_idx_lst = []
table_items_normal = [t.lower() for t in ori_tables_name]
column_items_normal = [c.lower() for c in ori_columns_name]
for i, item in enumerate(seq_lst):
if item in special_token:
continue
if seq_lst[i-1] == "schema:":
table_idx_lst.append(i + init_index)
elif seq_lst[i-1] == "|":
table_idx_lst.append(i + init_index)
elif seq_lst[i-1] == ";":
column_idx_lst.append(i + init_index)
elif seq_lst[i-1] == ",":
column_idx_lst.append(i + init_index)
assert len(table_items_normal) == len(table_items_normal)
assert len(column_items_normal) == len(column_items_normal)
return table_idx_lst , column_idx_lst
def schema_subword_matrix(ori_tables_name, ori_columns_name, tokenizer, schema_relations):
# tokenized schema names with split since to tell the cases with " " in single column name
ori_columns_str = " , ".join([column[1].lower() for column in ori_columns_name])
ori_tables_str = " | ".join([table.lower() for table in ori_tables_name])
# pdb.set_trace()
ori_schema_str = "schema: {} ; {} ".format(ori_tables_str, ori_columns_str)
tokenized_schema_split = tokenizer(ori_schema_str)
word_ids_split = tokenized_schema_split.word_ids()
word_ids_split = word_ids_split[:-1]
subword_matrix = [[0] * len(word_ids_split) for _ in range(len(word_ids_split))]
# tokenize schema names only with " "
table_idx_lst, column_idx_lst = index_schema(ori_schema_str, ori_tables_str, ori_columns_str)
schema_idx_lst = table_idx_lst + column_idx_lst
schema_dict_split = subword_dict(word_ids_split)
schema_to_original = {}
for i, tokenized_schema in enumerate(schema_idx_lst):
schema_to_original[tokenized_schema] = i
schema_original_dict = reduce_redundancy_idx(schema_dict_split=schema_dict_split, schema_dict_ori=schema_to_original)
assert len(schema_original_dict) == len(ori_tables_name + ori_columns_name)
# fully-connected subwords as new matrix:
for r in range(len(ori_tables_name + ori_columns_name)):
for c in range(len(ori_tables_name + ori_columns_name)):
for sub_idx_r in schema_original_dict[r]:
for sub_idx_c in schema_original_dict[c]:
subword_matrix[sub_idx_r][sub_idx_c] = schema_relations[r][c]
subword_matrix = np.array(subword_matrix, dtype='<U100')
subword_matrix = subword_matrix.tolist()
return subword_matrix, schema_original_dict | null |
164,921 | import datetime
import hashlib
import math
import json
import os
import shutil
import time
import codecs
import numpy as np
import pickle
import tensorflow as tf
from data import data_provider_bert
from model import bert
from model import dse_cl_bert
from config_bert import get_parser
import wrapper
The provided code snippet includes necessary dependencies for implementing the `average_gradients` function. Write a Python function `def average_gradients(tower_grads)` to solve the following problem:
多卡梯度求平均
Here is the function:
def average_gradients(tower_grads):
"""
多卡梯度求平均
"""
average_grad = []
variable_name = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
if g is None:
continue
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
if grads:
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
else:
grad = None
v = grad_and_vars[0][1]
average_grad.append(grad)
variable_name.append(v)
return average_grad, variable_name | 多卡梯度求平均 |
164,922 | import argparse
The provided code snippet includes necessary dependencies for implementing the `get_parser` function. Write a Python function `def get_parser()` to solve the following problem:
从命令行获取parser
Here is the function:
def get_parser():
"""
从命令行获取parser
"""
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="./sentence_embeddings/")
parser.add_argument("--serving_dir", type=str, default="./outputs/qa_representation_model/")
parser.add_argument("--model_name", type=str, default="qa_representation")
parser.add_argument("--train_file", type=str, default="/data/qa_representation/train_data.txt.rs")
parser.add_argument("--test_file", type=str, default="/data/qa_representation/test_data.txt.rs")
parser.add_argument("--env_name", type=str, default="qa_representation")
parser.add_argument("--load_step", type=str, default="")
# 模型参数相关
parser.add_argument("--do_lower", type=int, default=True)
parser.add_argument("--model_type", type=str, default="matching_transformer")
parser.add_argument("--bidirectional", type=str, default=1)
parser.add_argument("--block_num", type=int, default=1)
parser.add_argument("--head_num", type=int, default=8)
parser.add_argument("--embedding_size", type=int, default=256)
parser.add_argument("--hidden_size", type=int, default=256)
parser.add_argument("--kernel_size", type=int, default=3)
parser.add_argument("--filter_num", type=int, default=256)
parser.add_argument("--class_num", type=int, default=2)
parser.add_argument("--enc_num", type=int, default=1)
# 数据相关的参数
parser.add_argument("--train_multi_mode", type=bool, default=True)
parser.add_argument("--test_multi_mode", type=bool, default=True)
parser.add_argument("--line_sep_char", type=str, default="\t")
parser.add_argument("--turn_sep_char", type=str, default="|")
parser.add_argument("--token_sep_char", type=str, default="")
parser.add_argument("--round_num", type=int, default=3)
parser.add_argument("--train_pack_label", type=bool, default=False)
parser.add_argument("--test_pack_label", type=bool, default=False)
parser.add_argument("--train_response_num", type=int, default=10)
parser.add_argument("--test_response_num", type=int, default=10)
parser.add_argument("--max_seq_len", type=int, default=100)
parser.add_argument("--min_df", type=int, default=0)
parser.add_argument("--column_num", type=int, default=-1)
parser.add_argument("--partition_num", type=int, default=10)
parser.add_argument("--load_batch", type=int, default=10)
# 训练相关的参数
parser.add_argument("--train_gpu", type=str, default="0,1,2,3")
parser.add_argument("--test_gpu", type=str, default="0,1,2,3")
# parser.add_argument("--pretrain_embedding_path", type=str, default="")
parser.add_argument("--pretrain_embedding_path", type=str, default="")
parser.add_argument("--trainable_embedding", action="store_true", default=True)
parser.add_argument("--keep_prob", type=float, default=0.8)
parser.add_argument("--train_batch_size", type=int, default=70)
parser.add_argument("--test_batch_size", type=int, default=150)
parser.add_argument("--epoches", type=int, default=50)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--beta1", type=float, default=0.9)
parser.add_argument("--beta2", type=float, default=0.999)
parser.add_argument("--min_lr", type=float, default=1e-4)
parser.add_argument("--lr_decay_rate", type=float, default=0.98)
parser.add_argument("--lr_warmup_steps", type=int, default=2000)
parser.add_argument("--lr_decay_steps", type=int, default=10000)
parser.add_argument("--print_per_steps", type=int, default=50)
parser.add_argument("--eval_per_steps", type=int, default=2000)
parser.add_argument("--l2_reg", type=float, default=0.0003)
# bert系列模型特有参数
parser.add_argument("--init_checkpoint", type=str, default=None)
parser.add_argument("--vocab_file", type=str, default="")
parser.add_argument("--bert_config_file", type=str, default="")
# 模式选择
parser.add_argument("--stage", type=str, default="train")
args = parser.parse_known_args()[0]
return args | 从命令行获取parser |
164,923 | import argparse
The provided code snippet includes necessary dependencies for implementing the `get_parser` function. Write a Python function `def get_parser()` to solve the following problem:
从命令行获取parser
Here is the function:
def get_parser():
"""
从命令行获取parser
"""
parser = argparse.ArgumentParser()
parser.add_argument("--model_save_dir", type=str, default="./output_dse_model/")
parser.add_argument("--serving_dir", type=str, default="./serving_dse_model/")
parser.add_argument("--model_name", type=str, default="dse")
parser.add_argument("--train_file", type=str, default="./data/train.txt")
parser.add_argument("--test_file", type=str, default="./data/test.txt")
parser.add_argument("--env_name", type=str, default="dse")
parser.add_argument("--load_step", type=str, default="1000")
# 模型参数相关
parser.add_argument("--do_lower", type=bool, default=True)
parser.add_argument("--model_type", type=str, default="cl_bert")
parser.add_argument("--bidirectional", type=str, default=1)
# 数据相关的参数
parser.add_argument("--train_multi_mode", type=bool, default=True)
parser.add_argument("--test_multi_mode", type=bool, default=True)
parser.add_argument("--line_sep_char", type=str, default="\t")
parser.add_argument("--turn_sep_char", type=str, default="|")
parser.add_argument("--token_sep_char", type=str, default="")
parser.add_argument("--layer_num", type=int, default=6)
parser.add_argument("--round_num", type=int, default=3)
parser.add_argument("--class_num", type=int, default=2)
parser.add_argument("--keep_prob", type=int, default=0.8)
parser.add_argument("--train_pack_label", type=bool, default=False)
parser.add_argument("--test_pack_label", type=bool, default=False)
parser.add_argument("--train_response_num", type=int, default=10)
parser.add_argument("--test_response_num", type=int, default=10)
parser.add_argument("--hidden_size", type=int, default=768)
parser.add_argument("--max_seq_len", type=int, default=100)
parser.add_argument("--min_df", type=int, default=0)
parser.add_argument("--column_num", type=int, default=-1)
parser.add_argument("--partition_num", type=int, default=2)
parser.add_argument("--load_batch", type=int, default=4)
# 训练相关的参数
parser.add_argument("--train_gpu", type=str, default="0,1,2,3")
parser.add_argument("--test_gpu", type=str, default="0,1,2,3")
parser.add_argument("--train_batch_size", type=int, default=20)
parser.add_argument("--test_batch_size", type=int, default=50)
parser.add_argument("--epoches", type=int, default=50)
parser.add_argument("--lr", type=float, default=5e-5)
parser.add_argument("--beta1", type=float, default=0.9)
parser.add_argument("--beta2", type=float, default=0.999)
parser.add_argument("--min_lr", type=float, default=1e-5)
parser.add_argument("--lr_decay_rate", type=float, default=0.98)
parser.add_argument("--lr_warmup_steps", type=int, default=2000)
parser.add_argument("--lr_decay_steps", type=int, default=10000)
parser.add_argument("--print_per_steps", type=int, default=1)
parser.add_argument("--eval_per_steps", type=int, default=10)
parser.add_argument("--l2_reg", type=float, default=0.0003)
# bert系列模型特有参数
parser.add_argument("--use_init_model", type=bool, default=True)
parser.add_argument("--bert_init_dir", type=str, default="./pretrain_model/") # download from BERT github
parser.add_argument("--init_checkpoint", type=str, default="bert_model")
parser.add_argument("--vocab_file", type=str, default="vocab.txt")
parser.add_argument("--bert_config_file", type=str, default="bert_config.json")
# 模式选择
parser.add_argument("--stage", type=str, default="train")
args = parser.parse_known_args()[0]
return args | 从命令行获取parser |
164,924 | import datetime
import hashlib
import math
import json
import os
import shutil
import time
import codecs
import numpy as np
import pickle
import tensorflow as tf
from common import dump_script
from data import data_provider
from config import get_parser
The provided code snippet includes necessary dependencies for implementing the `average_gradients` function. Write a Python function `def average_gradients(tower_grads)` to solve the following problem:
多卡梯度求平均
Here is the function:
def average_gradients(tower_grads):
"""
多卡梯度求平均
"""
average_grad = []
variable_name = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
if g is None:
continue
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
if grads:
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
else:
grad = None
v = grad_and_vars[0][1]
average_grad.append(grad)
variable_name.append(v)
return average_grad, variable_name | 多卡梯度求平均 |
164,925 |
The provided code snippet includes necessary dependencies for implementing the `add_path_suffix` function. Write a Python function `def add_path_suffix(path_variable, suffix)` to solve the following problem:
增加分区后缀
Here is the function:
def add_path_suffix(path_variable, suffix):
"""
增加分区后缀
"""
return str(path_variable) + "." + str(suffix) | 增加分区后缀 |
164,926 | import codecs
The provided code snippet includes necessary dependencies for implementing the `dump_script` function. Write a Python function `def dump_script(module)` to solve the following problem:
将当前文件打印, 供模型分析. pyc文件无法正常打印,打印对应的python文件
Here is the function:
def dump_script(module):
"""
将当前文件打印, 供模型分析. pyc文件无法正常打印,打印对应的python文件
"""
file_name = module.__file__
if file_name[-3:] == "pyc":
file_name = file_name[0:-1]
with codecs.open(file_name, "r", "utf-8") as f_in:
print("\n===================dump_begin====================\n")
print("file_path: %s" % file_name)
for line in f_in.readlines():
print(line.rstrip().encode("utf-8"))
print("\n===================dump_finish====================\n") | 将当前文件打印, 供模型分析. pyc文件无法正常打印,打印对应的python文件 |
164,927 | import os
The provided code snippet includes necessary dependencies for implementing the `line_statistics` function. Write a Python function `def line_statistics(file_name)` to solve the following problem:
统计文件行数
Here is the function:
def line_statistics(file_name):
"""
统计文件行数
"""
if file_name is None:
return 0
content = os.popen("wc -l %s" % file_name)
line_number = int(content.read().split(" ")[0])
return line_number | 统计文件行数 |
164,928 | import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
The provided code snippet includes necessary dependencies for implementing the `get_activation` function. Write a Python function `def get_activation(activation_string)` to solve the following problem:
Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation.
Here is the function:
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act) | Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. |
164,929 | import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_lookup` function. Write a Python function `def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False)` to solve the following problem:
Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.gather()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size].
Here is the function:
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table) | Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.gather()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. |
164,930 | import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_postprocessor` function. Write a Python function `def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1)` to solve the following problem:
Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid.
Here is the function:
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output | Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. |
164,931 | import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `create_attention_mask_from_input_mask` function. Write a Python function `def create_attention_mask_from_input_mask(from_tensor, to_mask)` to solve the following problem:
Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length].
Here is the function:
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask_pre = tf.cast(tf.reshape(to_mask, [batch_size, to_seq_length, 1]), tf.float32)
to_mask = tf.cast(tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
mask = tf.matmul(to_mask_pre, to_mask)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
# broadcast_ones = tf.ones(
# shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
#mask = broadcast_ones * to_mask
return mask | Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. |
164,932 | import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
The provided code snippet includes necessary dependencies for implementing the `transformer_model` function. Write a Python function `def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False)` to solve the following problem:
Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid.
Here is the function:
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output | Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. |
164,933 | import tensorflow as tf
from model import operations
The provided code snippet includes necessary dependencies for implementing the `attentive_pooling` function. Write a Python function `def attentive_pooling(inputs, attention_size, sequence_mask=None, return_alphas=False, scope="", temperature=1.0)` to solve the following problem:
Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.
Here is the function:
def attentive_pooling(inputs, attention_size, sequence_mask=None, return_alphas=False, scope="", temperature=1.0):
"""
Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.
"""
if isinstance(inputs, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
inputs = inputs[0] + inputs[1]
inputs_shape = inputs.shape # ( batch_size , seq_len, hidden_size)
sequence_length = inputs_shape[1].value # the length of sequences processed
hidden_size = inputs_shape[2].value # hidden size of the RNN layer
# Attention mechanism
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as _:
w = tf.get_variable("w", shape=[hidden_size, attention_size],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
b = tf.get_variable("b", shape=[attention_size],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
u = tf.get_variable("u", shape=[attention_size],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
v = tf.tanh(tf.matmul(tf.reshape(inputs, [-1, hidden_size]), w) + tf.reshape(b, [1, -1]))
vu = tf.matmul(v, tf.reshape(u, [-1, 1])) / temperature
vu = tf.reshape(vu, [-1, sequence_length])
if sequence_mask is not None:
zero_pad = tf.ones_like(vu) * (-2 ** 32 + 1)
vu = tf.where(sequence_mask > 0.0, vu, zero_pad) # 不必担心浮点数比较问题
score = tf.nn.softmax(vu)
else:
score = tf.nn.softmax(vu)
output = tf.reduce_sum(inputs * tf.reshape(score, [-1, sequence_length, 1]), 1)
if not return_alphas:
return output
return output, score | Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector. |
164,934 | import tensorflow as tf
from model import operations
The provided code snippet includes necessary dependencies for implementing the `average_pooling` function. Write a Python function `def average_pooling(inputs, sequence_mask=None)` to solve the following problem:
Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.
Here is the function:
def average_pooling(inputs, sequence_mask=None):
"""
Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.
"""
if sequence_mask is None:
sequence_mask = operations.get_mask_from_tensor(inputs)
inputs = tf.reduce_sum(inputs * tf.expand_dims(sequence_mask, dim=2), axis=1) / (
tf.expand_dims(tf.reduce_sum(sequence_mask, axis=1), dim=1) + 1e-3)
return inputs | Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector. |
164,935 | import tensorflow as tf
from model import bert as modeling
from util.bert import tokenization
The provided code snippet includes necessary dependencies for implementing the `create_model` function. Write a Python function `def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, scope="", init_checkpoint=None, use_seq_feature=False)` to solve the following problem:
Creates a classification model.
Here is the function:
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, scope="", init_checkpoint=None,
use_seq_feature=False):
"""Creates a classification model."""
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids,
input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=False)
if init_checkpoint is not None:
# 允许bert原生变量和带scope的改造变量进入
tvars = [var for var in tf.trainable_variables() if (var.name.startswith("bert")
or var.name.startswith(scope + "/bert"))]
assignment_map, variable_name = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
scope = scope + "/" if scope != "" else scope
# 此处进行命名替换
tf.train.init_from_checkpoint(init_checkpoint, dict((k, scope + v) for k, v in assignment_map.items()))
for var in tvars:
init_string = ""
if var.name in variable_name:
init_string = ", *INIT_FROM_CKPT*"
print(" name = %s, shape = %s%s" % (var.name, var.shape, init_string))
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output() instead.
if use_seq_feature is False:
output_layer = [tf.squeeze(t[:, 0:1, :], axis=1) for t in model.get_all_encoder_layers()]
else:
output_layer = model.get_all_encoder_layers()
return output_layer | Creates a classification model. |
164,936 | import tensorflow as tf
from model import bert as modeling
from util.bert import tokenization
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
"""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
"""
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
"""
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
"""
feature = InputFeatures(guid=example.guid, input_ids=input_ids, input_mask=input_mask,
segment_ids=segment_ids, labels=example.label)
return feature
The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)` to solve the following problem:
Convert a set of `InputExample`s to a list of `InputFeatures`.
Here is the function:
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
"""
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
"""
feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)
features.append(feature)
return features | Convert a set of `InputExample`s to a list of `InputFeatures`. |
164,937 | import re
import tensorflow as tf
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
next_step = global_step + 1
assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v),
global_step.assign(next_step)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
The provided code snippet includes necessary dependencies for implementing the `create_optimizer` function. Write a Python function `def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)` to solve the following problem:
Creates an optimizer training op.
Here is the function:
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
list(zip(grads, tvars)), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op | Creates an optimizer training op. |
164,938 | import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `validate_case_matches_checkpoint` function. Write a Python function `def validate_case_matches_checkpoint(do_lower_case, init_checkpoint)` to solve the following problem:
Checks whether the casing config is consistent with the checkpoint name.
Here is the function:
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag)) | Checks whether the casing config is consistent with the checkpoint name. |
164,939 | import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `printable_text` function. Write a Python function `def printable_text(text)` to solve the following problem:
Returns text encoded in a way suitable for print or `tf.logging`.
Here is the function:
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, str):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?") | Returns text encoded in a way suitable for print or `tf.logging`. |
164,940 | import collections
import re
import unicodedata
import six
import tensorflow as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, str):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
164,941 | import collections
import re
import unicodedata
import six
import tensorflow as tf
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids) | null |
164,942 | import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
164,943 | import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
164,944 | import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False | Checks whether `chars` is a control character. |
164,945 | import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
164,946 | import os
import codecs
import math
import numpy as np
import argparse
from tqdm import tqdm
from model import bert
from eval import bert_dse_server
The provided code snippet includes necessary dependencies for implementing the `compute_kernel_bias` function. Write a Python function `def compute_kernel_bias(vecs)` to solve the following problem:
计算kernel和bias vecs.shape = [num_samples, embedding_size], 最后的变换:y = (x + bias).dot(kernel)
Here is the function:
def compute_kernel_bias(vecs):
"""
计算kernel和bias
vecs.shape = [num_samples, embedding_size],
最后的变换:y = (x + bias).dot(kernel)
"""
mu = vecs.mean(axis=0, keepdims=True)
cov = np.cov(vecs.T)
u, s, vh = np.linalg.svd(cov)
W = np.dot(u, np.diag(1 / np.sqrt(s)))
return W, -mu | 计算kernel和bias vecs.shape = [num_samples, embedding_size], 最后的变换:y = (x + bias).dot(kernel) |
164,947 | import os
import codecs
import math
import numpy as np
import argparse
from collections import OrderedDict
from tqdm import tqdm
import bert_dse_server
The provided code snippet includes necessary dependencies for implementing the `cos_similarity` function. Write a Python function `def cos_similarity(vec1, vec2)` to solve the following problem:
:param matrix: (n,d) :param vec: (d) :return: (n)
Here is the function:
def cos_similarity(vec1, vec2):
"""
:param matrix: (n,d)
:param vec: (d)
:return: (n)
"""
vec1 = np.array(vec1, dtype=np.float32)
vec2 = np.array(vec2, dtype=np.float32)
dot = np.sum(vec1 * vec2, axis=0) # n
norm1 = np.sqrt(np.sum(vec1 ** 2, axis=0)) # n
norm2 = np.sqrt(np.sum(vec2 ** 2)) # 1
cos = ((dot / (norm1 * norm2) + 1.0) / 2.0)
return cos | :param matrix: (n,d) :param vec: (d) :return: (n) |
164,948 | import os
import codecs
import numpy as np
import argparse
from scipy import stats
def pearson_r(x, y):
assert x.ndim == y.ndim == 1
corr_mat = np.corrcoef(x, y)
return corr_mat[0, 1] | null |
164,949 | import os
import codecs
import numpy as np
import argparse
from scipy import stats
def spearman_r(x, y):
assert x.ndim == y.ndim == 1
return stats.spearmanr(x, y).correlation | null |
164,950 | import os
import codecs
import numpy as np
import argparse
from scipy import stats
def get_args():
parser = argparse.ArgumentParser(description="Semantic Textual Similarity")
parser.add_argument("--dataset", "-g", help=r"指定的数据集目录,包括语义相似度数据和其结果数据", default='')
return parser.parse_args() | null |
164,951 | import os
import codecs
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
import jieba
from typing import *
def check_rank_matrix(rank_matrix):
assert rank_matrix.ndim == 2, rank_matrix.shape
assert rank_matrix.dtype == np.int32
for vec in rank_matrix:
values = set(vec)
assert len(values) == 2
assert 0 in values and 1 in values, "全为0或全为1"
def mean_average_precision(rank_matrix):
check_rank_matrix(rank_matrix)
ap_list = []
for rank in rank_matrix:
prec_list = []
num_posi = 0
for i, relevant in enumerate(rank):
if relevant == 1:
num_posi += 1
prec = num_posi / (i + 1)
prec_list.append(prec)
assert len(prec_list) > 0
average_precision = np.mean(prec_list)
ap_list.append(average_precision)
mAP = np.mean(ap_list)
return mAP, ap_list | null |
164,952 | import os
import codecs
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
import jieba
from typing import *
def check_rank_matrix(rank_matrix):
assert rank_matrix.ndim == 2, rank_matrix.shape
assert rank_matrix.dtype == np.int32
for vec in rank_matrix:
values = set(vec)
assert len(values) == 2
assert 0 in values and 1 in values, "全为0或全为1"
def recall_at_k(rank_matrix, k):
check_rank_matrix(rank_matrix)
recall_list = []
for rank in rank_matrix:
gold = rank.sum()
right = rank[:k].sum()
recall_list.append(right / gold)
recall = np.mean(recall_list)
return recall | null |
164,953 | import os
import codecs
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
import jieba
from typing import *
def check_rank_matrix(rank_matrix):
def precision_at_k(rank_matrix, k):
check_rank_matrix(rank_matrix)
prec_list = []
for rank in rank_matrix:
right = rank[:k].sum()
prec_list.append(right / k)
prec = np.mean(prec_list)
return prec | null |
164,954 | import os
import codecs
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
import jieba
from typing import *
def check_rank_matrix(rank_matrix):
assert rank_matrix.ndim == 2, rank_matrix.shape
assert rank_matrix.dtype == np.int32
for vec in rank_matrix:
values = set(vec)
assert len(values) == 2
assert 0 in values and 1 in values, "全为0或全为1"
def mean_reciprocal_rank(rank_matrix):
check_rank_matrix(rank_matrix)
rr_list = []
for rank in rank_matrix:
for idx, relevant in enumerate(rank):
if relevant == 1:
rr_list.append(1 / (idx + 1))
break
assert len(rr_list) == len(rank_matrix)
mrr = np.mean(rr_list)
return mrr, rr_list | null |
164,955 | import os
import codecs
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
import jieba
from typing import *
The provided code snippet includes necessary dependencies for implementing the `cos_similarity` function. Write a Python function `def cos_similarity(matrix, vec)` to solve the following problem:
:param matrix: (n,d) :param vec: (d) :return: (n)
Here is the function:
def cos_similarity(matrix, vec):
"""
:param matrix: (n,d)
:param vec: (d)
:return: (n)
"""
vec = vec[None, :] # (1,d)
dot = np.sum(vec * matrix, axis=1) # n
norm1 = np.sqrt(np.sum(matrix ** 2, axis=1)) # n
norm2 = np.sqrt(np.sum(vec ** 2)) # 1
cos = dot / (norm1 * norm2)
return cos | :param matrix: (n,d) :param vec: (d) :return: (n) |
164,956 | import os
import codecs
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
import jieba
from typing import *
def pad_rank_label_list(rank_label_list):
lens = [len(_) for _ in rank_label_list]
if len(set(lens)) == 1:
return rank_label_list
max_len = max(lens)
_res = []
for rank_labels in rank_label_list:
if len(rank_labels) < max_len:
rank_labels = list(rank_labels) + [0] * (max_len - len(rank_labels))
rank_labels = np.array(rank_labels, dtype=np.int32)
_res.append(rank_labels)
return _res | null |
164,957 | import os
import codecs
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
import jieba
from typing import *
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", help="数据集", default="")
parser.add_argument("--select_file", default="",help="query_id\tquery\tcls\tpositive_id,...\tnegative_id,...")
parser.add_argument("--embed_file", default="", help="query\tfloat,...")
parser.add_argument("--result_file", help="检索结果输出到文件", default="")
parser.add_argument("--bm25", choices=["1", "0"], default="0", help="用于测试,若为1则根据bm25排序")
parser.add_argument("--precision_k", default=[1, 2, 5, 10], type=lambda s: map(int, s.split(",")))
parser.add_argument("--recall_k", default=[10, 20, 50], type=lambda s: map(int, s.split(",")))
return parser.parse_args() | null |
164,958 | import random, re, os
from data.prompt_dataset import *
from data.plot_dataset import *
from data.arxiv_dataset import *
from data.yelp_dataset import *
import torch
import torch.utils.data as data
from torch.utils.data.distributed import DistributedSampler
from unidecode import unidecode
import functools
from rake_nltk import Rake
import urllib, sys
import urllib.request
import json, re
import numpy as np
from scipy.spatial.distance import cdist
from bert_serving.client import BertClient
from tqdm import trange
from random import shuffle
The provided code snippet includes necessary dependencies for implementing the `compose` function. Write a Python function `def compose(*functions)` to solve the following problem:
Executes a list of functions in order
Here is the function:
def compose(*functions):
""" Executes a list of functions in order """
return functools.reduce(lambda f, g: lambda x: g(f(x)), functions, lambda x: x) | Executes a list of functions in order |
164,959 | import random, re, os
from data.prompt_dataset import *
from data.plot_dataset import *
from data.arxiv_dataset import *
from data.yelp_dataset import *
import torch
import torch.utils.data as data
from torch.utils.data.distributed import DistributedSampler
from unidecode import unidecode
import functools
from rake_nltk import Rake
import urllib, sys
import urllib.request
import json, re
import numpy as np
from scipy.spatial.distance import cdist
from bert_serving.client import BertClient
from tqdm import trange
from random import shuffle
The provided code snippet includes necessary dependencies for implementing the `prefix_truncate` function. Write a Python function `def prefix_truncate(window)` to solve the following problem:
truncates text to the prefix window size
Here is the function:
def prefix_truncate(window):
""" truncates text to the prefix window size """
def f(text):
if len(text) > window:
text = text[:window]
return text
return f | truncates text to the prefix window size |
164,960 | import random, re, os
from data.prompt_dataset import *
from data.plot_dataset import *
from data.arxiv_dataset import *
from data.yelp_dataset import *
import torch
import torch.utils.data as data
from torch.utils.data.distributed import DistributedSampler
from unidecode import unidecode
import functools
from rake_nltk import Rake
import urllib, sys
import urllib.request
import json, re
import numpy as np
from scipy.spatial.distance import cdist
from bert_serving.client import BertClient
from tqdm import trange
from random import shuffle
def encode_tuple(tokenizer, t):
return tokenizer.encode(t[0]), tokenizer.encode(t[1]), tokenizer.encode(t[2]) | null |
164,961 | import random, re, os
from data.prompt_dataset import *
from data.plot_dataset import *
from data.arxiv_dataset import *
from data.yelp_dataset import *
import torch
import torch.utils.data as data
from torch.utils.data.distributed import DistributedSampler
from unidecode import unidecode
import functools
from rake_nltk import Rake
import urllib, sys
import urllib.request
import json, re
import numpy as np
from scipy.spatial.distance import cdist
from bert_serving.client import BertClient
from tqdm import trange
from random import shuffle
def truncate_tuple(truncator, t):
return truncator(t[0]), truncator(t[1]), truncator(t[2]) | null |
164,962 | import random, re, os
from data.prompt_dataset import *
from data.plot_dataset import *
from data.arxiv_dataset import *
from data.yelp_dataset import *
import torch
import torch.utils.data as data
from torch.utils.data.distributed import DistributedSampler
from unidecode import unidecode
import functools
from rake_nltk import Rake
import urllib, sys
import urllib.request
import json, re
import numpy as np
from scipy.spatial.distance import cdist
from bert_serving.client import BertClient
from tqdm import trange
from random import shuffle
def extract_keywords(text, r):
r.extract_keywords_from_text(text)
# 114 2, +1 per 228, add one key per 2 sentences, which is 114 in length
num = min(5, max(2, int(len(text) / 228.0 + 1.5)))
key = [re.sub(' (\'|\.|\,|\:|\?|\!|;)', '\g<1>', k.strip('\'.,:?!;" ')) for k in r.get_ranked_phrases()[:num]]
return key
def insert_keywords(tokenizer, data_type):
def f(text_raw_dict):
# 'prompt' in text_raw_dict --> wp dataset; 'title' in text_raw_dict --> wi dataset and other well preprocessed dataset
summary = text_raw_dict['prompt'] if 'prompt' in text_raw_dict else text_raw_dict['title']
story = text_raw_dict['story']
if data_type == 't0': # x, y, y
if 'prompt' in text_raw_dict:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
else:
pp = story.split('<newline><newline>')
story = '\n\n'.join(pp)
return summary + tokenizer.eos_token, story + tokenizer.eos_token, tokenizer.eos_token + story + tokenizer.eos_token
elif data_type == 't1': # x, x + y, x + y
if 'prompt' in text_raw_dict:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
else:
pp = story.split('<newline><newline>')
story = '\n\n'.join(pp)
summary_story = summary + tokenizer.eos_token + story + tokenizer.eos_token
return summary + tokenizer.eos_token, summary_story, tokenizer.eos_token + summary_story
elif data_type == 't2': # x, x + o + y, x + o + y, append
if 'title' in text_raw_dict:
pp = story.split('<newline><newline>')
else:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
# extract keywords
r = Rake(min_length=1, max_length=4)
keys = [extract_keywords(text, r) for text in pp]
keys_str = [tokenizer.cls_token + tokenizer.sep_token.join(key) + tokenizer.mask_token for key in keys]
story_appended = summary + ''.join(keys_str) + tokenizer.eos_token + '\n\n'.join(pp)
return summary + tokenizer.eos_token, story_appended + tokenizer.eos_token, tokenizer.eos_token + story_appended + tokenizer.eos_token
elif data_type == 't3': # x, x + o + y, x + o + y, insert
if 'title' in text_raw_dict:
pp = story.split('<newline><newline>')
else:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
# extract keywords
r = Rake(min_length=1, max_length=4)
keys = [extract_keywords(text, r) for text in pp]
keys_str = [tokenizer.cls_token + tokenizer.sep_token.join(key) + tokenizer.mask_token for key in keys]
keys_str[0] += tokenizer.eos_token
story_inserted = summary + ''.join([k + pt for k, pt in zip(keys_str, pp)])
return summary + tokenizer.eos_token, story_inserted + tokenizer.eos_token, tokenizer.eos_token + story_inserted + tokenizer.eos_token
elif data_type == 't4': # x + o, y, x + o + y
if 'title' in text_raw_dict:
pp = story.split('<newline><newline>')
else:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
# extract keywords
r = Rake(min_length=1, max_length=4)
keys = [extract_keywords(text, r) for text in pp]
keys_str = [tokenizer.cls_token + tokenizer.sep_token.join(key) + tokenizer.mask_token for key in keys]
summary_story = tokenizer.eos_token + summary + ''.join(keys_str) + tokenizer.eos_token + story + tokenizer.eos_token
return summary + ''.join(keys_str) + tokenizer.eos_token, story + tokenizer.eos_token, summary_story
elif data_type == 't5': # x + o, x + o + y, x + o + y, append
if 'title' in text_raw_dict:
pp = story.split('<newline><newline>')
else:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
# extract keywords
r = Rake(min_length=1, max_length=4)
keys = [extract_keywords(text, r) for text in pp]
keys_str = [tokenizer.cls_token + tokenizer.sep_token.join(key) + tokenizer.mask_token for key in keys]
story_appended = summary + ''.join(keys_str) + tokenizer.eos_token + '\n\n'.join(pp)
return summary + ''.join(keys_str) + tokenizer.eos_token, story_appended + tokenizer.eos_token, tokenizer.eos_token + story_appended + tokenizer.eos_token
elif data_type == 't6': # x + o, x + o + y, x + o + y, insert
if 'title' in text_raw_dict:
pp = story.split('<newline><newline>')
else:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
# extract keywords
r = Rake(min_length=1, max_length=4)
keys = [extract_keywords(text, r) for text in pp]
keys_str = [tokenizer.cls_token + tokenizer.sep_token.join(key) + tokenizer.mask_token for key in keys]
keys_str[0] += tokenizer.eos_token
story_inserted = summary + ''.join([k + pt for k, pt in zip(keys_str, pp)])
return summary + ''.join(keys_str) + tokenizer.eos_token, story_inserted + tokenizer.eos_token, tokenizer.eos_token + story_inserted + tokenizer.eos_token
elif data_type == 't7': # x + o, x + o + y, x + o + y, append, extend
if 'title' in text_raw_dict:
pp = story.split('<newline><newline>')
else:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
# extract keywords
r = Rake(min_length=1, max_length=4)
keys = [extract_keywords(text, r) for text in pp]
keys_str = [tokenizer.cls_token + tokenizer.sep_token.join(key) + tokenizer.mask_token for key in keys]
extended_res = []
for i in range(len(pp)):
k_i, p_i = keys_str[:i], pp[:i]
out_i = summary + ''.join(k_i) + tokenizer.eos_token
story_appended_i = summary + ''.join(k_i) + tokenizer.eos_token + '\n\n'.join(p_i) + tokenizer.eos_token
story_i = tokenizer.eos_token + summary + ''.join(k_i) + tokenizer.eos_token + '\n\n'.join(p_i) + tokenizer.eos_token
extended_res.append((out_i, story_appended_i, story_i))
return extended_res
elif data_type == 't8': # x + o, x + o + y, x + o + y, insert, extend
if 'title' in text_raw_dict:
pp = story.split('<newline><newline>')
else:
pp = get_paragraph(story)
story = '\n\n'.join(pp)
# extract keywords
r = Rake(min_length=1, max_length=4)
keys = [extract_keywords(text, r) for text in pp]
keys_str = [tokenizer.cls_token + tokenizer.sep_token.join(key) + tokenizer.mask_token for key in keys]
keys_str[0] += tokenizer.eos_token
extended_res = []
for i in range(len(pp)):
k_i, p_i = keys_str[:i], pp[:i]
out_i = summary + ''.join(k_i).replace(tokenizer.eos_token, '') + tokenizer.eos_token
story_inserted_i = summary + ''.join([k + pt for k, pt in zip(k_i, p_i)]) + tokenizer.eos_token
story_i = tokenizer.eos_token + summary + ''.join([k + pt for k, pt in zip(k_i, p_i)]) + tokenizer.eos_token
extended_res.append((out_i, story_inserted_i, story_i))
return extended_res
else:
raise Exception('Data type not implemented.')
return f | null |
164,963 | import random, re, os
from data.prompt_dataset import *
from data.plot_dataset import *
from data.arxiv_dataset import *
from data.yelp_dataset import *
import torch
import torch.utils.data as data
from torch.utils.data.distributed import DistributedSampler
from unidecode import unidecode
import functools
from rake_nltk import Rake
import urllib, sys
import urllib.request
import json, re
import numpy as np
from scipy.spatial.distance import cdist
from bert_serving.client import BertClient
from tqdm import trange
from random import shuffle
class Preprocessor(Preprocessor_base):
def __init__(self, tokenizer, seq_len, data_type):
super().__init__()
self.tokenizer = tokenizer
self.seq_len = seq_len
self.data_type = data_type
def make_fn(self):
return compose(
insert_keywords(self.tokenizer, self.data_type),
lambda input: encode_tuple(self.tokenizer, input) if isinstance(input, tuple) else [encode_tuple(self.tokenizer, inp) for inp in input],
lambda input: truncate_tuple(prefix_truncate(self.seq_len), input) if isinstance(input, tuple) else [truncate_tuple(prefix_truncate(self.seq_len), inp) for inp in input]
)
def collate_fn(samples):
""" Creates a batch out of samples """
# each sample=[source, target, ?]
x_max_len = max(map(lambda s: len(s[0]), samples))
# Zero pad mask
x_mask = torch.ByteTensor([[1] * len(ss[0]) + [0] * (x_max_len - len(ss[0])) for ss in samples])
# tokenizer.convert_tokens_to_ids('<|startoftext|>') = 50257, endoftext 50256, use 50257 here causes errors!!
x = torch.LongTensor([ss[0] + [50256] * (x_max_len - len(ss[0])) for ss in samples])
max_len = max(map(lambda s: len(s[1]), samples))
# Zero pad mask
y_mask = torch.ByteTensor([[1] * len(ss[1]) + [0] * (max_len - len(ss[1])) for ss in samples])
# tokenizer.convert_tokens_to_ids('<|startoftext|>') = 50257
y = torch.LongTensor([ss[1] + [50256] * (max_len - len(ss[1])) for ss in samples])
max_len = max(map(lambda s: len(s[2]), samples))
# Zero pad mask
input_mask = torch.ByteTensor([[1] * len(ip[2]) + [0] * (max_len - len(ip[2])) for ip in samples])
# tokenizer.convert_tokens_to_ids('<|startoftext|>') = 50257
input = torch.LongTensor([ip[2] + [50256] * (max_len - len(ip[2])) for ip in samples])
return x_mask, x, y_mask, y, input[:, :-1], input[:, 1:].contiguous(), input_mask[:, 1:]
# x_mask, x_tokens, y_mask, y_tokens, input_tokens, target_tokens, mask
def prepare_dataset(data_dir, dataset_name, tokenizer, train_bsz, train_seq_len, val_bsz, val_seq_len, test_bsz=1,
test_seq_len=1024, data_type='t0', num_workers=1, make_train=True, make_val=True, make_test=False):
# data_dir, dataset_name, tokenizer, train_bsz, train_seq_len, val_bsz, val_seq_len, num_workers = args.data_dir, args.dataset, tokenizer, batch_schedule[cur_b_schedule][0], batch_schedule[cur_b_schedule][1], batch_schedule[-1][0], batch_schedule[-1][1], args.workers
loaders = []
if dataset_name == 'wp':
train_collate_fn = collate_fn
val_collate_fn = collate_fn
test_collate_fn = collate_fn
if make_train:
train_preproc = Preprocessor(tokenizer, train_seq_len, data_type)
d_train = PromptDataset(
os.path.join(data_dir, 'writingPrompts/train.wp_source'),
os.path.join(data_dir, 'writingPrompts/train.wp_target'),
train_preproc)
if data_type == 't7' or data_type == 't8':
d_train = [t for lt in d_train for t in lt]
print('Train dataset size', len(d_train))
loaders.append(data.DataLoader(d_train,
# sampler=DistributedSampler(d_train) if distributed else None,
batch_size=train_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=train_collate_fn) if d_train else None)
if make_val:
val_preproc = Preprocessor(tokenizer, val_seq_len, data_type)
d_val = PromptDataset(
os.path.join(data_dir, 'writingPrompts/valid.wp_source'),
os.path.join(data_dir, 'writingPrompts/valid.wp_target'),
val_preproc)
if data_type == 't7' or data_type == 't8':
d_val = [t for lt in d_val for t in lt]
print('Val dataset size', len(d_val))
loaders.append(data.DataLoader(d_val,
# sampler=DistributedSampler(d_val),
batch_size=val_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=val_collate_fn) if d_val else None)
if make_test:
test_preproc = Preprocessor(tokenizer, test_seq_len, data_type)
d_test = PromptDataset(
os.path.join(data_dir, 'writingPrompts/test.wp_source'),
os.path.join(data_dir, 'writingPrompts/test.wp_target'),
test_preproc)
if data_type == 't7' or data_type == 't8':
d_test = [t for lt in d_test for t in lt]
print('Test dataset size', len(d_test))
loaders.append(data.DataLoader(d_test,
# sampler=DistributedSampler(d_val),
batch_size=test_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=test_collate_fn) if d_test else None)
elif dataset_name == 'wi':
train_collate_fn = collate_fn
val_collate_fn = collate_fn
test_collate_fn = collate_fn
print('Loading wikiplot dataset...')
data_plots = os.path.join(data_dir, 'wikiPlots/plots_paragraph')
data_titles = os.path.join(data_dir, 'wikiPlots/titles')
with open(data_plots, errors='ignore') as fp:
plots = fp.readlines()
with open(data_titles, errors='ignore') as ft:
titles = ft.readlines()
texts = [(t, p) for t, p in zip(titles, plots) if t.strip() != '' and p.strip() != '']
print('Done.')
train_text = texts[:int(len(texts) * 0.9)]
val_text = texts[int(len(texts) * 0.9):int(len(texts) * 0.95)]
test_text = texts[int(len(texts) * 0.95):]
if make_train:
train_preproc = Preprocessor(tokenizer, train_seq_len, data_type)
d_train = PlotDataset(train_text, train_preproc)
if data_type == 't7' or data_type == 't8':
d_train = [t for lt in d_train for t in lt]
print('Train dataset size', len(d_train))
loaders.append(data.DataLoader(d_train,
# sampler=DistributedSampler(d_train) if distributed else None,
batch_size=train_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=train_collate_fn) if d_train else None)
if make_val:
val_preproc = Preprocessor(tokenizer, val_seq_len, data_type)
d_val = PlotDataset(val_text, val_preproc)
if data_type == 't7' or data_type == 't8':
d_val = [t for lt in d_val for t in lt]
print('Val dataset size', len(d_val))
loaders.append(data.DataLoader(d_val,
# sampler=DistributedSampler(d_val),
batch_size=val_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=val_collate_fn) if d_val else None)
if make_test:
test_preproc = Preprocessor(tokenizer, test_seq_len, data_type)
d_test = PlotDataset(test_text, test_preproc)
if data_type == 't7' or data_type == 't8':
d_test = [t for lt in d_test for t in lt]
print('Test dataset size', len(d_test))
loaders.append(data.DataLoader(d_test,
# sampler=DistributedSampler(d_val),
batch_size=test_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=test_collate_fn) if d_test else None)
elif dataset_name == 'ax':
train_collate_fn = collate_fn
val_collate_fn = collate_fn
test_collate_fn = collate_fn
print('Loading arxiv dataset...')
data_abs = os.path.join(data_dir, 'arxiv/artificial intelligence_10047_15000_15_abs.txt')
data_titles = os.path.join(data_dir, 'arxiv/artificial intelligence_10047_15000_15_title.txt')
with open(data_abs, errors='ignore') as fp:
abs = fp.readlines()
with open(data_titles, errors='ignore') as ft:
titles = ft.readlines()
assert len(titles) == len(abs)
ai_data = [('ai', t.strip(), p.strip()) for t, p in zip(titles, abs) if t.strip() != '' and p.strip() != '']
data_abs = os.path.join(data_dir, 'arxiv/computer vision_14582_15000_15_abs.txt')
data_titles = os.path.join(data_dir, 'arxiv/computer vision_14582_15000_15_title.txt')
with open(data_abs, errors='ignore') as fp:
abs = fp.readlines()
with open(data_titles, errors='ignore') as ft:
titles = ft.readlines()
assert len(titles) == len(abs)
cv_data = [('cv', t.strip(), p.strip()) for t, p in zip(titles, abs) if t.strip() != '' and p.strip() != '']
data_abs = os.path.join(data_dir, 'arxiv/language generation_14514_15000_15_abs.txt')
data_titles = os.path.join(data_dir, 'arxiv/language generation_14514_15000_15_title.txt')
with open(data_abs, errors='ignore') as fp:
abs = fp.readlines()
with open(data_titles, errors='ignore') as ft:
titles = ft.readlines()
assert len(titles) == len(abs)
lg_data = [('lg', t.strip(), p.strip()) for t, p in zip(titles, abs) if t.strip() != '' and p.strip() != '']
texts = ai_data + cv_data + lg_data
shuffle(texts)
print('Done.')
train_text = texts[:int(len(texts) * 0.9)]
val_text = texts[int(len(texts) * 0.9):int(len(texts) * 0.95)]
test_text = texts[int(len(texts) * 0.95):]
if make_train:
train_preproc = Preprocessor(tokenizer, train_seq_len, data_type)
d_train = ArxivDataset(train_text, train_preproc)
print('Train dataset size', len(d_train))
loaders.append(data.DataLoader(d_train,
# sampler=DistributedSampler(d_train) if distributed else None,
batch_size=train_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=train_collate_fn) if d_train else None)
if make_val:
val_preproc = Preprocessor(tokenizer, val_seq_len, data_type)
d_val = ArxivDataset(val_text, val_preproc)
print('Val dataset size', len(d_val))
loaders.append(data.DataLoader(d_val,
# sampler=DistributedSampler(d_val),
batch_size=val_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=val_collate_fn) if d_val else None)
if make_test:
test_preproc = Preprocessor(tokenizer, test_seq_len, data_type)
d_test = ArxivDataset(test_text, test_preproc)
print('Test dataset size', len(d_test))
loaders.append(data.DataLoader(d_test,
# sampler=DistributedSampler(d_val),
batch_size=test_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=test_collate_fn) if d_test else None)
elif dataset_name == 'yp':
train_collate_fn = collate_fn
val_collate_fn = collate_fn
test_collate_fn = collate_fn
if make_train:
train_preproc = Preprocessor(tokenizer, train_seq_len, data_type)
d_train = YelpDataset(os.path.join(data_dir, 'yelp/yelp.train.txt'), train_preproc)
print('Train dataset size', len(d_train))
loaders.append(data.DataLoader(d_train,
# sampler=DistributedSampler(d_train) if distributed else None,
batch_size=train_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=train_collate_fn) if d_train else None)
if make_val:
val_preproc = Preprocessor(tokenizer, val_seq_len, data_type)
d_val = YelpDataset(os.path.join(data_dir, 'yelp/yelp.valid.txt'), val_preproc)
print('Val dataset size', len(d_val))
loaders.append(data.DataLoader(d_val,
# sampler=DistributedSampler(d_val),
batch_size=val_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=val_collate_fn) if d_val else None)
if make_test:
test_preproc = Preprocessor(tokenizer, test_seq_len, data_type)
d_test = YelpDataset(os.path.join(data_dir, 'yelp/yelp.test.txt'), test_preproc)
print('Test dataset size', len(d_test))
loaders.append(data.DataLoader(d_test,
# sampler=DistributedSampler(d_val),
batch_size=test_bsz,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
collate_fn=test_collate_fn) if d_test else None)
else:
raise Exception('Invalid dataset')
return loaders | null |
164,964 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
loggers = {}
def get_logger(filename, level=logging.INFO, print2screen=True):
global loggers
import logging
if os.path.exists(filename):
os.remove(filename)
if loggers.get(filename):
return loggers.get(filename)
else:
logger = logging.getLogger(filename)
logger.setLevel(level)
fh = logging.FileHandler(filename, encoding='utf-8')
fh.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('[%(asctime)s][%(filename)s][line: %(lineno)d][%(levelname)s] >> %(message)s')
# formatter = logging.Formatter('[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] >> %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
if print2screen:
logger.addHandler(ch)
loggers[filename] = logger
return logger | null |
164,965 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def num_params(model):
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad]) | null |
164,966 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def init_para_frompretrained(m, pm, share_para=False):
# m.wte.weight = pm.wte.weight
# m.wpe.weight = pm.wpe.weight
m.wte.weight = copy.deepcopy(pm.wte.weight)
m.wpe.weight = copy.deepcopy(pm.wpe.weight)
for i in range(min(len(m.h), len(pm.h))):
m.h[i].ln_1.weight = pm.h[i].ln_1.weight if share_para else copy.deepcopy(pm.h[i].ln_1.weight)
# print('ln_1',type(m.h[i].ln_1.weight),m.h[i].ln_1.weight,flush=True)
m.h[i].ln_1.bias = pm.h[i].ln_1.bias if share_para else copy.deepcopy(pm.h[i].ln_1.bias)
m.h[i].attn.c_attn.weight = pm.h[i].attn.c_attn.weight if share_para else copy.deepcopy(pm.h[i].attn.c_attn.weight)
m.h[i].attn.c_attn.bias = pm.h[i].attn.c_attn.bias if share_para else copy.deepcopy(pm.h[i].attn.c_attn.bias)
m.h[i].attn.c_proj.weight = pm.h[i].attn.c_proj.weight if share_para else copy.deepcopy(pm.h[i].attn.c_proj.weight)
m.h[i].attn.c_proj.bias = pm.h[i].attn.c_proj.bias if share_para else copy.deepcopy(pm.h[i].attn.c_proj.bias)
m.h[i].ln_2.weight = pm.h[i].ln_2.weight if share_para else copy.deepcopy(pm.h[i].ln_2.weight)
m.h[i].ln_2.bias = pm.h[i].ln_2.bias if share_para else copy.deepcopy(pm.h[i].ln_2.bias)
m.h[i].mlp.c_fc.weight = pm.h[i].mlp.c_fc.weight if share_para else copy.deepcopy(pm.h[i].mlp.c_fc.weight)
m.h[i].mlp.c_fc.bias = pm.h[i].mlp.c_fc.bias if share_para else copy.deepcopy(pm.h[i].mlp.c_fc.bias)
m.h[i].mlp.c_proj.weight = pm.h[i].mlp.c_proj.weight if share_para else copy.deepcopy(pm.h[i].mlp.c_proj.weight)
m.h[i].mlp.c_proj.bias = pm.h[i].mlp.c_proj.bias if share_para else copy.deepcopy(pm.h[i].mlp.c_proj.bias)
# if isdecoder:
# m.h_ori[i].ln_1.weight = pm.h[i].ln_1.weight if share_para else copy.copy(pm.h[i].ln_1.weight)
# m.h_ori[i].ln_1.bias = pm.h[i].ln_1.bias if share_para else copy.copy(pm.h[i].ln_1.bias)
# m.h_ori[i].attn.c_attn.weight = pm.h[i].attn.c_attn.weight if share_para else copy.copy(pm.h[i].attn.c_attn.weight)
# m.h_ori[i].attn.c_attn.bias = pm.h[i].attn.c_attn.bias if share_para else copy.copy(pm.h[i].attn.c_attn.bias)
# m.h_ori[i].attn.c_proj.weight = pm.h[i].attn.c_proj.weight if share_para else copy.copy(pm.h[i].attn.c_proj.weight)
# m.h_ori[i].attn.c_proj.bias = pm.h[i].attn.c_proj.bias if share_para else copy.copy(pm.h[i].attn.c_proj.bias)
# m.h_ori[i].ln_2.weight = pm.h[i].ln_2.weight if share_para else copy.copy(pm.h[i].ln_2.weight)
# m.h_ori[i].ln_2.bias = pm.h[i].ln_2.bias if share_para else copy.copy(pm.h[i].ln_2.bias)
# m.h_ori[i].mlp.c_fc.weight = pm.h[i].mlp.c_fc.weight if share_para else copy.copy(pm.h[i].mlp.c_fc.weight)
# m.h_ori[i].mlp.c_fc.bias = pm.h[i].mlp.c_fc.bias if share_para else copy.copy(pm.h[i].mlp.c_fc.bias)
# m.h_ori[i].mlp.c_proj.weight = pm.h[i].mlp.c_proj.weight if share_para else copy.copy(pm.h[i].mlp.c_proj.weight)
# m.h_ori[i].mlp.c_proj.bias = pm.h[i].mlp.c_proj.bias if share_para else copy.copy(pm.h[i].mlp.c_proj.bias)
m.ln_f.weight = pm.ln_f.weight if share_para else copy.deepcopy(pm.ln_f.weight)
# m.ln_f.weight = pm.ln_f.weight if share_para else copy.copy(pm.ln_f.weight)
m.ln_f.bias = pm.ln_f.bias if share_para else copy.deepcopy(pm.ln_f.bias)
# m.ln_f.bias = pm.ln_f.bias if share_para else copy.copy(pm.ln_f.bias) | null |
164,967 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def init_params(m, pm, share_para=False):
m.wte.weight = pm.wte.weight
m.wpe.weight = pm.wpe.weight
# for i in range(min(len(m.h), len(pm.h))):
# m.h[i].ln_1.weight = pm.h[i].ln_1.weight if share_para else copy.copy(pm.h[i].ln_1.weight)
# m.h[i].ln_1.bias = pm.h[i].ln_1.bias if share_para else copy.copy(pm.h[i].ln_1.bias)
# m.h[i].attn.c_attn.weight = pm.h[i].attn.c_attn.weight if share_para else copy.copy(pm.h[i].attn.c_attn.weight)
# m.h[i].attn.c_attn.bias = pm.h[i].attn.c_attn.bias if share_para else copy.copy(pm.h[i].attn.c_attn.bias)
# m.h[i].attn.c_proj.weight = pm.h[i].attn.c_proj.weight if share_para else copy.copy(pm.h[i].attn.c_proj.weight)
# m.h[i].attn.c_proj.bias = pm.h[i].attn.c_proj.bias if share_para else copy.copy(pm.h[i].attn.c_proj.bias)
# m.h[i].ln_2.weight = pm.h[i].ln_2.weight if share_para else copy.copy(pm.h[i].ln_2.weight)
# m.h[i].ln_2.bias = pm.h[i].ln_2.bias if share_para else copy.copy(pm.h[i].ln_2.bias)
# m.h[i].mlp.c_fc.weight = pm.h[i].mlp.c_fc.weight if share_para else copy.copy(pm.h[i].mlp.c_fc.weight)
# m.h[i].mlp.c_fc.bias = pm.h[i].mlp.c_fc.bias if share_para else copy.copy(pm.h[i].mlp.c_fc.bias)
# m.h[i].mlp.c_proj.weight = pm.h[i].mlp.c_proj.weight if share_para else copy.copy(pm.h[i].mlp.c_proj.weight)
# m.h[i].mlp.c_proj.bias = pm.h[i].mlp.c_proj.bias if share_para else copy.copy(pm.h[i].mlp.c_proj.bias)
# if isdecoder:
# m.h_ori[i].ln_1.weight = pm.h[i].ln_1.weight if share_para else copy.copy(pm.h[i].ln_1.weight)
# m.h_ori[i].ln_1.bias = pm.h[i].ln_1.bias if share_para else copy.copy(pm.h[i].ln_1.bias)
# m.h_ori[i].attn.c_attn.weight = pm.h[i].attn.c_attn.weight if share_para else copy.copy(pm.h[i].attn.c_attn.weight)
# m.h_ori[i].attn.c_attn.bias = pm.h[i].attn.c_attn.bias if share_para else copy.copy(pm.h[i].attn.c_attn.bias)
# m.h_ori[i].attn.c_proj.weight = pm.h[i].attn.c_proj.weight if share_para else copy.copy(pm.h[i].attn.c_proj.weight)
# m.h_ori[i].attn.c_proj.bias = pm.h[i].attn.c_proj.bias if share_para else copy.copy(pm.h[i].attn.c_proj.bias)
# m.h_ori[i].ln_2.weight = pm.h[i].ln_2.weight if share_para else copy.copy(pm.h[i].ln_2.weight)
# m.h_ori[i].ln_2.bias = pm.h[i].ln_2.bias if share_para else copy.copy(pm.h[i].ln_2.bias)
# m.h_ori[i].mlp.c_fc.weight = pm.h[i].mlp.c_fc.weight if share_para else copy.copy(pm.h[i].mlp.c_fc.weight)
# m.h_ori[i].mlp.c_fc.bias = pm.h[i].mlp.c_fc.bias if share_para else copy.copy(pm.h[i].mlp.c_fc.bias)
# m.h_ori[i].mlp.c_proj.weight = pm.h[i].mlp.c_proj.weight if share_para else copy.copy(pm.h[i].mlp.c_proj.weight)
# m.h_ori[i].mlp.c_proj.bias = pm.h[i].mlp.c_proj.bias if share_para else copy.copy(pm.h[i].mlp.c_proj.bias)
m.ln_f.weight = pm.ln_f.weight if share_para else copy.copy(pm.ln_f.weight)
m.ln_f.bias = pm.ln_f.bias if share_para else copy.copy(pm.ln_f.bias) | null |
164,968 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
The provided code snippet includes necessary dependencies for implementing the `switch_schedule` function. Write a Python function `def switch_schedule(schedule, mult, switch)` to solve the following problem:
Apply LR multiplier before iteration "switch"
Here is the function:
def switch_schedule(schedule, mult, switch):
""" Apply LR multiplier before iteration "switch" """
def f(e):
s = schedule(e)
if e < switch:
return s * mult
return s
return f | Apply LR multiplier before iteration "switch" |
164,969 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def linear_schedule(args):
def f(e):
if e <= args.warmup:
return e / args.warmup
return max((e - args.iterations) / (args.warmup - args.iterations), 0)
return f | null |
164,970 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def get_mask(x_len):
mask = torch.arange(max(x_len), device=x_len.device)[None, :] < x_len[:, None] # [bs, max_len]
return mask.bool() | null |
164,971 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def compare_tokens(x, y, eos_id):
if eos_id in x:
x = x[:x.index(eos_id)]
if eos_id in y:
y = y[:y.index(eos_id)]
return x == y | null |
164,972 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def seed_everything(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed_all(seed) | null |
164,973 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def pad_tensor(tensor, length, pad_token=0):
return torch.cat([tensor, tensor.new(tensor.size(0), length - tensor.size()[1]).fill_(pad_token)], dim=1)
The provided code snippet includes necessary dependencies for implementing the `communicate_tensor` function. Write a Python function `def communicate_tensor(tensor_list, pad_token=0)` to solve the following problem:
collect tensors from all processes
Here is the function:
def communicate_tensor(tensor_list, pad_token=0):
'''
collect tensors from all processes
'''
if len(tensor_list) == 0:
return None
device = tensor_list[0].device
max_len = torch.tensor(max([i.shape[1] for i in tensor_list]), dtype=torch.int64, device=device)
if dist.is_initialized(): # Obtain the max_len of the second dim of each tensor
dist.all_reduce(max_len, op=dist.ReduceOp.MAX)
# Pad tensors to the max_len
tensor = torch.cat([pad_tensor(i, max_len, pad_token) for i in tensor_list], dim=0)
tensor_bs = torch.tensor(tensor.shape[0], dtype=torch.int64, device=device)
max_tensor_bs = torch.tensor(tensor.shape[0], dtype=torch.int64, device=device)
if dist.is_initialized():
dist.all_reduce(max_tensor_bs, op=dist.ReduceOp.MAX) # Obtain the max_tensor_bs of each tensor
if max_tensor_bs != tensor_bs:
tensor = torch.cat([tensor, tensor.new(max_tensor_bs-tensor_bs, tensor.shape[1]).fill_(pad_token)], dim=0)
# Gather padded tensors and the bs of each tensor
tensor_list = [torch.ones_like(tensor).fill_(pad_token) for _ in range(dist.get_world_size())]
tensor_bs_list = [torch.ones_like(tensor_bs).fill_(pad_token) for _ in range(dist.get_world_size())]
dist.all_gather(tensor_list=tensor_list, tensor=tensor.contiguous())
dist.all_gather(tensor_list=tensor_bs_list, tensor=tensor_bs)
# Cut the padded batch
for i in range(dist.get_world_size()):
tensor_list[i] = tensor_list[i][:tensor_bs_list[i]]
tensor = torch.cat(tensor_list, dim=0)
return tensor | collect tensors from all processes |
164,974 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def get_reverse_mask(x_len):
def cut_eos(seq, eos_id):
class PadBatchSeq:
def __init__(self, pad_id=0):
def __call__(self, batch):
def infer_model_pred(model, tokz, dataset, outfile, batch_size=30):
max_ans_len = dataset.max_ans_len + 1
data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=3, pin_memory=True, collate_fn=PadBatchSeq(0))
device = model.device
with open(outfile, 'w', encoding='utf-8') as f:
with torch.no_grad():
model.eval()
for i, data in enumerate(data_loader):
bs = data['context_id'].shape[0]
context = data['context_id'].to(device, non_blocking=True)
context_lens = data['context_lens'].to(device, non_blocking=True)
mask = get_reverse_mask(context_lens)
output_sequence = model.generate(
input_ids=context, attention_mask=mask, do_sample=False, eos_token_id=tokz.eos_token_id,
pad_token_id=tokz.eos_token_id, max_length=context.shape[1] + max_ans_len, early_stopping=True)
cls_res = output_sequence[:,context.shape[1]:].tolist()
ans = data['ans_id'].tolist()
for i in range(bs):
res = {}
res['context'] = tokz.decode(context[i][-context_lens[i]:])
res['ans_gold'] = tokz.decode(ans[i][:data['ans_lens'][i]-1])
res['ans_pred'] = tokz.decode(cut_eos(cls_res[i], tokz.eos_token_id))
print(json.dumps(res), file=f) | null |
164,975 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def cal_metrics_from_pred_files(res_file):
with open(res_file, 'r', encoding='utf-8') as f:
res = [json.loads(i) for i in f.readlines()]
y_true = [i['ans_gold'] for i in res]
y_pred = [i['ans_pred'] for i in res]
return {
"accuracy": accuracy_score(y_true, y_pred),
} | null |
164,976 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
The provided code snippet includes necessary dependencies for implementing the `slot_f1_score` function. Write a Python function `def slot_f1_score(pred_slots, true_slots)` to solve the following problem:
pred_slots, true_slots are like [['from_location:10-11', 'leaving_date:12-13']]
Here is the function:
def slot_f1_score(pred_slots, true_slots):
'''
pred_slots, true_slots are like [['from_location:10-11', 'leaving_date:12-13']]
'''
slot_types = set([slot.split(":")[0] for row in true_slots for slot in row])
slot_type_f1_scores = []
for slot_type in slot_types:
predictions_for_slot = [[p for p in prediction if slot_type in p] for prediction in pred_slots] # [['from_location'],[],[],['from_location']]
labels_for_slot = [[l for l in label if slot_type in l] for label in true_slots]
proposal_made = [len(p) > 0 for p in predictions_for_slot]
has_label = [len(l) > 0 for l in labels_for_slot]
prediction_correct = [prediction == label for prediction, label in zip(predictions_for_slot, labels_for_slot)]
true_positives = sum([
int(proposed and correct)
for proposed, correct in zip(proposal_made, prediction_correct)])
num_predicted = sum([int(proposed) for proposed in proposal_made])
num_to_recall = sum([int(hl) for hl in has_label])
precision = true_positives / (1e-5 + num_predicted)
recall = true_positives / (1e-5 + num_to_recall)
f1_score = 2 * precision * recall / (1e-5 + precision + recall)
slot_type_f1_scores.append(f1_score)
return np.mean(slot_type_f1_scores) | pred_slots, true_slots are like [['from_location:10-11', 'leaving_date:12-13']] |
164,977 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def get_answer(tokz ,model, example, example_lens, max_ans_len, sampling=True, temperature=0.95, top_k=100, top_p=0.95):
def textid_decode(text, eos, tokz):
def padding_convert(text_list, eos):
def gen_pseudo_data(model, task, dataset, max_output_len=90, batch_size=30, target_count=100, output_file=None,
top_k=100, top_p=0.95, temperature=1, only_decoder=False):
device = 'cuda'
prompt_id = [dataset.tokz.bos_token_id] + dataset.pseudo_data_prompt_id + [dataset.tokz.eos_token_id]
prompt_id = torch.LongTensor([prompt_id for _ in range(batch_size)]).to(device)
# prompt length is 15 for intent detection atis dataset.
prompt_mask, prompt_lens = None, None
ans_prompt_id = dataset.pseudo_ans_prompt_id
ans_prompt_id = torch.LongTensor([ans_prompt_id for _ in range(batch_size)]).to(device)
res = []
utter_set = set()
eos_token = dataset.tokz.eos_token_id
if os.path.exists(output_file):
os.remove(output_file)
if output_file is not None:
if not os.path.isdir(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
genefile = open(output_file,'w', encoding='utf8')
while len(res) < target_count:
with torch.no_grad():
model.eval()
output_seq = sample_sequence(model, dataset.tokz, length=max_output_len, batch_size=batch_size,
x_tokens=prompt_id, x_mask=prompt_mask, x_lens=prompt_lens, temperature=temperature,
top_k=top_k, top_p=top_p, sampling=True, only_decoder=only_decoder)
output_list = output_seq.tolist()
# print(prompt_id.size(),ans_prompt_id.size(),output_seq.size(),flush=True)
# * prompt (w/o eos) + generated utterence + answer_prompt as the pseudo input for the LM.
# lm_input_list = torch.cat((prompt_id[..., :-1], output_seq, ans_prompt_id), dim=1).tolist()
# Change right padding to left padding.
# lm_input, lm_input_lens = padding_convert(lm_input_list, eos_token)
# if not only_decoder:
# label_batch = get_answer(eos_token, model, lm_input, lm_input_lens, max_ans_len=10).tolist()
for i in range(batch_size):
# print('LM INPUT::', dataset.tokz.decode(lm_input[i]), flush=True)
# print('UTTER BEFORE::', dataset.tokz.decode(output_list[i]), flush=True)
output = textid_decode(output_list[i], eos_token, dataset.tokz)
if 'Answer:' in output:
regex = re.compile('(.+) \"\? Answer:')
utter1 = regex.search(output)
if utter1 is not None:
utter = utter1.group(1)
utter_id = dataset.tokz.encode(utter)
else:
utter = output
utter_id = output_list[i]
# * Get labels.
if not only_decoder:
lm_input = [torch.cat((prompt_id[i,:-1], output_seq[i,:], ans_prompt_id[i,:])).tolist()]
lm_input, lm_input_lens = padding_convert(lm_input, eos_token)
label_id = get_answer(tokz, model, lm_input, lm_input_lens, max_ans_len=10).tolist()[0] # ? Not finished.
# label_id = label_batch[i]
label = textid_decode(label_id, eos_token, dataset.tokz)
elif 'Answer:' in output:
label = re.findall('(?<=Answer: ).*$', output)[0]
if utter is not None and label != '':
print('UTTER::', utter,'====>> LABEL::', label, flush=True)
if utter not in utter_set:
utter_set.add(utter) # avoid duplicate utterance
res.append([utter, label])
print(json.dumps({'Utterence': utter, 'Label': label}, ensure_ascii=False), file=genefile, flush=True)
res = res[:target_count] # only output the first target_count utterances
return res[:target_count] | null |
164,978 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def infer_batch_pseudo_data(model, dataset, max_output_len=90, batch_size=30):
prompt_id = [dataset.tokz.bos_token_id] + dataset.pseudo_data_prompt_id
max_output_len += len(prompt_id)
prompt_id = torch.LongTensor(
[prompt_id for _ in range(batch_size)]).to(model.device)
with torch.no_grad():
model.eval()
# output_seq = model.generate(input_ids=prompt_id, do_sample=True, eos_token_id=dataset.tokz.eos_token_id,
# pad_token_id=dataset.tokz.eos_token_id, max_length=max_output_len, early_stopping=True)
output_seq, _ = sample_sequence(model, dataset.tokz, length=max_output_len, batch_size=batch_size,
x_mask=x_mask, x_tokens=x_tokens, temperature=temperature,
top_k=top_k, top_p=top_p, eos_token=dataset.tokz.eos_token_id, device=device )
output_seq = output_seq.tolist()
res = []
for i in range(batch_size):
output = output_seq[i][1:]
if dataset.tokz.eos_token_id in output:
output = output[:output.index(dataset.tokz.eos_token_id)]
output = dataset.tokz.decode(output)
output = dataset.parse_pseudo_data(output)
if output is not None:
res.append(output)
return res | null |
164,979 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from generate import *
def strip_list(seq, eos_id):
l, r = 0, len(seq)-1
for i in range(len(seq)):
if seq[i] != eos_id:
break
l = i
for i in range(len(seq)-1, -1, -1):
if seq[i] != eos_id:
break
r = i
return seq[l+1:r] | null |
164,980 | import json
import numpy as np
import os
def get_slot_list(data_path):
# data_dir = '/'.join(data_path.split('/')[:-1])
data_type = ['train.json','valid.json','test.json']
slot_list = []
for tp in data_type:
datapp = os.path.join(data_path,tp)
with open(datapp, 'r') as f:
data = [json.loads(i) for i in f.readlines()]
for sample in data:
# Create slots dictionary
for label in sample.get('labels', []):
slot = label['slot']
slot_list.append(slot)
# * Record all slot labels with BIO tagging.
slot_list = list(set(slot_list))
slot_list.sort()
return slot_list | null |
164,981 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
loggers = {}
def get_logger(filename, level=logging.INFO, print2screen=True):
global loggers
import logging
if os.path.exists(filename):
os.remove(filename)
if loggers.get(filename):
return loggers.get(filename)
else:
logger = logging.getLogger(filename)
logger.setLevel(level)
fh = logging.FileHandler(filename, encoding='utf-8')
fh.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('[%(asctime)s][%(filename)s][line: %(lineno)d][%(levelname)s] >> %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
if print2screen:
logger.addHandler(ch)
loggers[filename] = logger
return logger | null |
164,982 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def frange_cycle_linear(n_iter, start=0.01, stop=1.0, n_cycle=4, ratio=0.5):
L = np.ones(n_iter) * stop
period = n_iter/n_cycle
step = (stop-start)/(period*ratio) # linear schedule
for c in range(n_cycle):
v, i = start, 0
while v <= stop and (int(i+c*period) < n_iter):
L[int(i+c*period)] = v
v += step
i += 1
return L | null |
164,983 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def num_params(model):
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad]) | null |
164,984 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `switch_schedule` function. Write a Python function `def switch_schedule(schedule, mult, switch)` to solve the following problem:
Apply LR multiplier before iteration "switch"
Here is the function:
def switch_schedule(schedule, mult, switch):
""" Apply LR multiplier before iteration "switch" """
def f(e):
s = schedule(e)
if e < switch:
return s * mult
return s
return f | Apply LR multiplier before iteration "switch" |
164,985 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def linear_schedule(args):
def f(e):
if e <= args.warmup:
return e / args.warmup
return max((e - args.iterations) / (args.warmup - args.iterations), 0)
return f | null |
164,986 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def get_mask(x_len):
mask = torch.arange(max(x_len), device=x_len.device)[None, :] < x_len[:, None] # [bs, max_len]
return mask.bool() | null |
164,987 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def compare_tokens(x, y, eos_id):
if eos_id in x:
x = x[:x.index(eos_id)]
if eos_id in y:
y = y[:y.index(eos_id)]
return x == y | null |
164,988 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def seed_everything(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed_all(seed) | null |
164,989 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def pad_tensor(tensor, length, pad_token=0):
return torch.cat([tensor, tensor.new(tensor.size(0), length - tensor.size()[1]).fill_(pad_token)], dim=1)
The provided code snippet includes necessary dependencies for implementing the `communicate_tensor` function. Write a Python function `def communicate_tensor(tensor_list, pad_token=0)` to solve the following problem:
collect tensors from all processes
Here is the function:
def communicate_tensor(tensor_list, pad_token=0):
'''
collect tensors from all processes
'''
if len(tensor_list) == 0:
return None
device = tensor_list[0].device
max_len = torch.tensor(max([i.shape[1] for i in tensor_list]), dtype=torch.int64, device=device)
if dist.is_initialized(): # Obtain the max_len of the second dim of each tensor
dist.all_reduce(max_len, op=dist.ReduceOp.MAX)
# Pad tensors to the max_len
tensor = torch.cat([pad_tensor(i, max_len, pad_token) for i in tensor_list], dim=0)
tensor_bs = torch.tensor(tensor.shape[0], dtype=torch.int64, device=device)
max_tensor_bs = torch.tensor(tensor.shape[0], dtype=torch.int64, device=device)
if dist.is_initialized():
dist.all_reduce(max_tensor_bs, op=dist.ReduceOp.MAX) # Obtain the max_tensor_bs of each tensor
if max_tensor_bs != tensor_bs:
tensor = torch.cat([tensor, tensor.new(max_tensor_bs-tensor_bs, tensor.shape[1]).fill_(pad_token)], dim=0)
# Gather padded tensors and the bs of each tensor
tensor_list = [torch.ones_like(tensor).fill_(pad_token) for _ in range(dist.get_world_size())]
tensor_bs_list = [torch.ones_like(tensor_bs).fill_(pad_token) for _ in range(dist.get_world_size())]
dist.all_gather(tensor_list=tensor_list, tensor=tensor.contiguous())
dist.all_gather(tensor_list=tensor_bs_list, tensor=tensor_bs)
# Cut the padded batch
for i in range(dist.get_world_size()):
tensor_list[i] = tensor_list[i][:tensor_bs_list[i]]
tensor = torch.cat(tensor_list, dim=0)
return tensor | collect tensors from all processes |
164,990 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def get_reverse_mask(x_len):
mask = torch.arange(max(x_len)-1, -1, -1, device=x_len.device)[None, :] < x_len[:, None] # [bs, max_len]
return mask.bool()
def cut_eos(seq, eos_id):
if eos_id not in seq:
return seq
return seq[:seq.index(eos_id)]
class PadBatchSeq:
def __init__(self, pad_id=0):
self.pad_id = pad_id
def __call__(self, batch):
# Fetch all ids.
utter_id = [i['utter_id'] for i in batch]
input_id = [i['input_id'] for i in batch]
posterior_id = [i['posterior_id'] for i in batch]
prompt_id = [i['prompt_id'] for i in batch]
gene_prompt_id = [i['gene_prompt_id'] for i in batch]
gene_input_id = [i['gene_input_id'] for i in batch]
gene_posterior_id = [i['gene_posterior_id'] for i in batch]
context_id = [i['context_id'] for i in batch]
general_context_id = [i['general_context_id'] for i in batch]
ans_id = [i['ans_id'] for i in batch]
all_id = [i['all_id'] for i in batch]
gene_all_id = [i['gene_all_id'] for i in batch]
# id check
# for i in batch:
# print('input_id',i['input_id'],flush=True)
# print('context_id',i['context_id'],flush=True)
# Store lengths.
all_lens = [len(i) for i in all_id]
gene_all_lens = [len(i) for i in gene_all_id]
context_lens = [len(i) for i in context_id]
general_context_lens = [len(i) for i in general_context_id]
ans_lens = [len(i) for i in ans_id]
prompt_lens = [len(i) for i in prompt_id]
gene_prompt_lens = [len(i) for i in gene_prompt_id]
input_lens = [len(i) for i in input_id]
gene_input_lens = [len(i) for i in gene_input_id]
posterior_lens = [len(i) for i in posterior_id]
gene_posterior_lens = [len(i) for i in gene_posterior_id]
utter_lens = [len(i) for i in utter_id]
# Construct masks
ans_mask = torch.ByteTensor([[1] * ans_lens[i] + [0] * (max(ans_lens)-ans_lens[i]) for i in range(len(ans_id))])
context_mask = torch.ByteTensor([[1] * context_lens[i] + [0] * (max(context_lens)-context_lens[i]) for i in range(len(context_id))])
general_context_mask = torch.ByteTensor([[1] * general_context_lens[i] + [0] * (max(general_context_lens)-general_context_lens[i]) for i in range(len(general_context_id))])
prompt_mask = torch.ByteTensor([[1] * prompt_lens[i] + [0] * (max(prompt_lens)-prompt_lens[i]) for i in range(len(prompt_id))])
gene_prompt_mask = torch.ByteTensor([[1] * gene_prompt_lens[i] + [0] * (max(gene_prompt_lens)-gene_prompt_lens[i]) for i in range(len(gene_prompt_id))])
input_mask = torch.ByteTensor([[1] * input_lens[i] + [0] * (max(input_lens)-input_lens[i]) for i in range(len(input_id))])
gene_input_mask = torch.ByteTensor([[1] * gene_input_lens[i] + [0] * (max(gene_input_lens)-gene_input_lens[i]) for i in range(len(gene_input_id))])
utter_mask = torch.ByteTensor([[1] * utter_lens[i] + [0] * (max(utter_lens)-utter_lens[i]) for i in range(len(utter_id))])
posterior_mask = torch.ByteTensor([[1] * posterior_lens[i] + [0] * (max(posterior_lens)-posterior_lens[i]) for i in range(len(posterior_id))])
gene_posterior_mask = torch.ByteTensor([[1] * gene_posterior_lens[i] + [0] * (max(gene_posterior_lens)-gene_posterior_lens[i]) for i in range(len(gene_posterior_id))])
all_mask = torch.ByteTensor([[1] * all_lens[i] + [0] * (max(all_lens)-all_lens[i]) for i in range(len(all_id))])
gene_all_mask = torch.ByteTensor([[1] * gene_all_lens[i] + [0] * (max(gene_all_lens)-gene_all_lens[i]) for i in range(len(gene_all_id))])
# * Record the mask so that we do not need to calculate losses on prompt tokens. Change 1202/2021, since I add 'Answer' to prompts.
# * Construct label mask for all (prompts, utterences, answers).
all_label_mask = torch.ByteTensor([[0] * (context_lens[i]) + [1] * (all_lens[i] - context_lens[i]) + [0] * (max(all_lens)-all_lens[i]) for i in range(len(all_id))]) # Only calculate losses on answer tokens.
gene_all_label_mask = torch.ByteTensor([[0] * (general_context_lens[i]) + [1] * (gene_all_lens[i] - general_context_lens[i]) + [0] * (max(gene_all_lens)-gene_all_lens[i]) for i in range(len(gene_all_id))]) # Only calculate losses on answer tokens.
input_label_mask = torch.ByteTensor([[0] * (prompt_lens[i]-1) +[1] * (input_lens[i]-prompt_lens[i]+1) + [0] * (max(input_lens)-input_lens[i]) for i in range(len(input_id))]) # Record the mask so that we do not need to calculate losses on prompt tokens.
gene_input_label_mask = torch.ByteTensor([[0] * (gene_prompt_lens[i]-1) +[1] * (gene_input_lens[i]-gene_prompt_lens[i]+1) + [0] * (max(gene_input_lens)-gene_input_lens[i]) for i in range(len(gene_input_id))]) # Record the mask so that we do not need to calculate losses on prompt tokens.
# * Construct label mask for inputs.
# input_label_mask = []
# # print('max input id',max(input_lens),flush=True)
# # print('input_mask',input_mask.size(),flush=True)
# for i in range(len(input_id)):
# # print(input_id[i],flush=True)
# # print(utter_id[i],flush=True)
# l = sublist_start_index(input_id[i],utter_id[i])
# # print('l',l,'len utter',len(utter_id))
# input_label_mask.append([0] * l + [1] * len(utter_id[i]) + [0] * (max(input_lens)-l-len(utter_id[i])))
# # print(i,len(input_label_mask[i]),flush=True)
# input_label_mask = torch.ByteTensor(input_label_mask)
# print('input_label_mask',input_label_mask.size(),flush=True)
# Return ids and masks
res = {}
res['prompt_id'] = torch.tensor([pad_seq(i, self.pad_id, max(prompt_lens)) for i in prompt_id], dtype=torch.long)
res['gene_prompt_id'] = torch.tensor([pad_seq(i, self.pad_id, max(gene_prompt_lens)) for i in gene_prompt_id], dtype=torch.long)
res['input_id'] = torch.tensor([pad_seq(i, self.pad_id, max(input_lens)) for i in input_id], dtype=torch.long)
res['gene_input_id'] = torch.tensor([pad_seq(i, self.pad_id, max(gene_input_lens)) for i in gene_input_id], dtype=torch.long)
res['posterior_id'] = torch.tensor([pad_seq(i, self.pad_id, max(posterior_lens)) for i in posterior_id], dtype=torch.long)
res['gene_posterior_id'] = torch.tensor([pad_seq(i, self.pad_id, max(gene_posterior_lens)) for i in gene_posterior_id], dtype=torch.long)
res["ans_id"] = torch.tensor([pad_seq(i, self.pad_id, max(ans_lens)) for i in ans_id], dtype=torch.long)
# res["context_id"] = torch.tensor([pad_seq(i, self.pad_id, max(context_lens)) for i in context_id], dtype=torch.long) # Not pad left.
res["context_id"] = torch.tensor([pad_seq(i, self.pad_id, max(context_lens), pad_left=True) for i in context_id], dtype=torch.long)
res["general_context_id"] = torch.tensor([pad_seq(i, self.pad_id, max(general_context_lens), pad_left=True) for i in general_context_id], dtype=torch.long)
res["all_id"] = torch.tensor([pad_seq(i, self.pad_id, max(all_lens)) for i in all_id], dtype=torch.long)
res["gene_all_id"] = torch.tensor([pad_seq(i, self.pad_id, max(gene_all_lens)) for i in gene_all_id], dtype=torch.long)
res["utter_id"] = torch.tensor([pad_seq(i, self.pad_id, max(utter_lens)) for i in utter_id], dtype=torch.long)
res["all_lens"] = torch.tensor(all_lens, dtype=torch.long)
res["context_lens"] = torch.tensor(context_lens, dtype=torch.long)
res["general_context_lens"] = torch.tensor(general_context_lens, dtype=torch.long)
res["ans_lens"] = torch.tensor(ans_lens, dtype=torch.long)
res["prompt_lens"] = torch.tensor(prompt_lens, dtype=torch.long)
res["all_mask"], res["context_mask"], res['prompt_mask'], res['ans_mask'], res['input_mask'] = all_mask, context_mask, prompt_mask, ans_mask, input_mask
res['utter_mask'] = utter_mask
res['posterior_mask'] = posterior_mask
res['input_label_mask'] = input_label_mask
res['all_label_mask'] = all_label_mask
res['general_context_mask'] = general_context_mask
res['gene_all_mask'] = gene_all_mask
res['gene_input_mask'] = gene_input_mask
res['gene_input_label_mask'] = gene_input_label_mask
res['gene_prompt_mask'] = gene_prompt_mask
res['gene_all_label_mask'] = gene_all_label_mask
res['gene_input_label_mask'] = gene_input_label_mask
res['gene_posterior_mask'] = gene_posterior_mask
return PinnedBatch(res)
def infer_model_pred(model, tokz, dataset, outfile, batch_size=30):
max_ans_len = dataset.max_ans_len + 1
data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=3, pin_memory=True, collate_fn=PadBatchSeq(0))
device = model.device
with open(outfile, 'w', encoding='utf-8') as f:
with torch.no_grad():
model.eval()
for i, data in enumerate(data_loader):
bs = data['context_id'].shape[0]
context = data['context_id'].to(device, non_blocking=True)
context_lens = data['context_lens'].to(device, non_blocking=True)
mask = get_reverse_mask(context_lens)
output_sequence = model.generate(
input_ids=context, attention_mask=mask, do_sample=False, eos_token_id=tokz.eos_token_id,
pad_token_id=tokz.eos_token_id, max_length=context.shape[1] + max_ans_len, early_stopping=True)
cls_res = output_sequence[:,context.shape[1]:].tolist()
ans = data['ans_id'].tolist()
for i in range(bs):
res = {}
res['context'] = tokz.decode(context[i][-context_lens[i]:])
res['ans_gold'] = tokz.decode(ans[i][:data['ans_lens'][i]-1])
res['ans_pred'] = tokz.decode(cut_eos(cls_res[i], tokz.eos_token_id))
print(json.dumps(res), file=f) | null |
164,991 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def cal_metrics_from_pred_files(res_file):
with open(res_file, 'r', encoding='utf-8') as f:
res = [json.loads(i) for i in f.readlines()]
y_true = [i['ans_gold'] for i in res]
y_pred = [i['ans_pred'] for i in res]
return {
"accuracy": accuracy_score(y_true, y_pred),
} | null |
164,992 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `slot_f1_score` function. Write a Python function `def slot_f1_score(pred_slots, true_slots)` to solve the following problem:
pred_slots, true_slots are like [['from_location:10-11', 'leaving_date:12-13']]
Here is the function:
def slot_f1_score(pred_slots, true_slots):
'''
pred_slots, true_slots are like [['from_location:10-11', 'leaving_date:12-13']]
'''
slot_types = set([slot.split(":")[0] for row in true_slots for slot in row])
slot_type_f1_scores = []
for slot_type in slot_types:
predictions_for_slot = [[p for p in prediction if slot_type in p] for prediction in pred_slots] # [['from_location'],[],[],['from_location']]
labels_for_slot = [[l for l in label if slot_type in l] for label in true_slots]
proposal_made = [len(p) > 0 for p in predictions_for_slot]
has_label = [len(l) > 0 for l in labels_for_slot]
prediction_correct = [prediction == label for prediction, label in zip(predictions_for_slot, labels_for_slot)]
true_positives = sum([
int(proposed and correct)
for proposed, correct in zip(proposal_made, prediction_correct)])
num_predicted = sum([int(proposed) for proposed in proposal_made])
num_to_recall = sum([int(hl) for hl in has_label])
precision = true_positives / (1e-5 + num_predicted)
recall = true_positives / (1e-5 + num_to_recall)
f1_score = 2 * precision * recall / (1e-5 + precision + recall)
slot_type_f1_scores.append(f1_score)
return np.mean(slot_type_f1_scores) | pred_slots, true_slots are like [['from_location:10-11', 'leaving_date:12-13']] |
164,993 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def get_answer(tokz, lm_model, example, example_lens, max_ans_len, sampling=False, args=None):
temperature = args.temperature
top_k = args.top_k
top_p = args.top_p
lm_model.eval()
device = 'cuda'
# print('context',context.size(),flush=True)
if example_lens==None:
mask=None
else:
mask = get_reverse_mask(example_lens).to(device)
eos_token = tokz.eos_token_id
pad_token = tokz.eos_token_id
# * Get sequence outputs.
output_seq = lm_model.decoder.generate(input_ids=example, attention_mask=mask, do_sample=False, eos_token_id=eos_token, pad_token_id=pad_token,
max_length=example.shape[1]+max_ans_len, early_stopping=True)
return output_seq[:, example.shape[1]:]
def textid_decode(text, eos, tokz):
if eos in text:
text = text[:text.index(eos)]
text = tokz.decode(text).strip()
return text
def padding_convert(text_list, eos):
tt_list = []
for text in text_list:
if eos in text:
eos_indexs = [i for i, x in enumerate(text) if x==eos]
if len(eos_indexs)>1: text = text[:eos_indexs[1]]
tt_list.append(text)
tt_lens = [len(i) for i in tt_list]
tt_lens = torch.tensor(tt_lens, dtype=torch.long).to('cuda')
tt_pad = torch.tensor([pad_seq(i, eos, max(tt_lens), pad_left=True) for i in tt_list], dtype=torch.long).to('cuda')
return tt_pad, tt_lens
def sample_sequence(model, tokenizer, length, batch_size=None, p_mask=None, p_tokens=None, p_lens=None,
temperature=1, top_k=100, top_p=0.95, sampling=True, only_decoder=False, memory=None, args=None, task=None, use_prior=False):
device = 'cuda'
p_tokens = p_tokens.to(device)
if p_mask is not None: p_mask = p_mask.to(device)
if p_lens is not None:
p_reverse_mask = get_reverse_mask(p_lens)
else:
p_reverse_mask = None
eos_token = tokenizer.eos_token_id
with torch.no_grad():
if not only_decoder:
if memory is None:
prior_out = model.encoder(input_ids=p_tokens, attention_mask=p_mask)
prior_emb, _ = model.avg_attn(prior_out[0])
prior_mean, prior_logvar = model.prior_mean(prior_emb), model.prior_logvar(prior_emb)
z = model.reparameterize(prior_mean, prior_logvar)
z_proj = model.latent_mlp(z) * args.alpha_z
assert not torch.isnan(z).any(), 'training get nan z'
elif use_prior:
if args.save_z:
z_proj = memory.memory[task][1]['prior_z']
else:
old_prior_mean, old_prior_logvar = memory.memory[task][1]['prior']
z = model.reparameterize(old_prior_mean, old_prior_logvar)
z_proj = model.latent_mlp(z) * args.alpha_z
assert not torch.isnan(z).any(), 'training get nan z'
else: # * use posterior
if args.save_z:
z_proj = random.choice(memory.memory[task][1]['posterior_z'])
else:
prev_post_mean, prev_post_logvar = random.choice(memory.memory[task][1]['posterior'])
z = model.reparameterize(prev_post_mean, prev_post_logvar)
z_proj = model.latent_mlp(z) * args.alpha_z
assert not torch.isnan(z).any(), 'training get nan z'
else:
z_proj = None
model_kwargs = {'latent_proj':z_proj} # !
# print('*'*100,flush=True)
output_seq = model.decoder.generate(input_ids=p_tokens, attention_mask=p_mask, do_sample=True,
eos_token_id=eos_token, pad_token_id=eos_token, max_length=length, early_stopping=True,
**model_kwargs)
return output_seq
def gen_pseudo_data(model, task, dataset, max_output_len=90, batch_size=30, target_count=100, output_file=None,
top_k=100, top_p=0.95, temperature=1, only_decoder=False, memory=None, args=None):
device = 'cuda'
if not args.general_prompt:
prompt_id = [dataset.tokz.bos_token_id] + dataset.pseudo_data_prompt_id
else:
prompt_id = [dataset.tokz.bos_token_id] + dataset.pseudo_data_prompt_id
prompt_mask, prompt_lens = None, None
ans_prompt_id_ls = dataset.pseudo_ans_prompt_id
max_output_len += len(prompt_id) + len(ans_prompt_id_ls)
prompt_id = torch.LongTensor([prompt_id for _ in range(batch_size)]).to(device)
ans_prompt_id = torch.LongTensor([ans_prompt_id_ls for _ in range(batch_size)]).to(device)
pseudo_list = []
utter_set = set()
eos_token = dataset.tokz.eos_token_id
# if os.path.exists(output_file):
# os.remove(output_file)
if output_file is None:
raise ValueError("Pseudo output file is not specified.")
if output_file is not None:
if not os.path.isdir(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
# genefile = open(output_file,'w', encoding='utf8')
while len(pseudo_list) < target_count:
if len(pseudo_list) <= target_count // 5:
use_prior = True
else: use_prior = False
# use_prior = True
with torch.no_grad():
model.eval()
output_seq = sample_sequence(model, dataset.tokz, length=max_output_len, batch_size=batch_size,
p_tokens=prompt_id, p_mask=prompt_mask, p_lens=prompt_lens, temperature=temperature,
top_k=top_k, top_p=top_p, sampling=True, only_decoder=only_decoder, memory=memory,
args=args, task=task, use_prior=use_prior)
output_list = output_seq.tolist()
for i in range(batch_size):
# print(output_seq[i,1:],flush=True)
output_id = output_list[i][1:]
# print(output_id, flush=True)
if eos_token in output_id:
output_id = output_id[:output_id.index(eos_token)]
output = dataset.tokz.decode(output_id)
# print('LM INPUT::', dataset.tokz.decode(lm_input[i]), flush=True)
# print('UTTER BEFORE::', dataset.tokz.decode(output_list[i]), flush=True)
if ' "? Answer: ' in output:
# res = dataset.parse_pseudo_data(output)
if args.data_type == 'intent':
utter = re.findall(r'task, which intent category best describes: " (.+?) "\? Answer: ', output)
else:
utter = re.findall(r'task, what are slots and values: " (.+?) "\? Answer: ', output)
# utter = re.findall(r'task, if there are any slots and values, what are they in this sentence: " (.+?) "\? Answer: ', output)
if len(utter) > 0:
utter = utter[0]
else: continue
elif ' "? ' in output:
if args.data_type == 'intent':
utter = re.findall(r'task, which intent category best describes: " (.+?) "\? ', output)
else:
utter = re.findall(r'task, what are slots and values: " (.+?) "\? ', output)
# utter = re.findall(r'task, if there are any slots and values, what are they in this sentence: " (.+?) "\? ', output)
if len(utter) > 0:
utter = utter[0]
else: continue
else:
if args.data_type == 'intent':
utter = output.replace(f"In the \"{task}\" task, which intent category best describes: \" ", "")
else:
utter = output.replace(f"In the \"{task}\" task, what are slots and values: \" ", "")
# utter = output.replace(f"In the \"{task}\" task, if there are any slots and values, what are they in this sentence: \" ", "")
if len(utter) <= 1: continue
# * Get labels.
if len(utter.split())>2:
lm_input_id = output_id + ans_prompt_id_ls
lm_input, lm_input_lens = padding_convert([lm_input_id], eos_token)
label_id = get_answer(dataset.tokz, model, lm_input, lm_input_lens, max_ans_len=10, args=args).tolist()[0]
label = textid_decode(label_id, eos_token, dataset.tokz)
res = {'task_name': task,'utter': utter, 'label': label}
else:
res = None
if res is not None:
utter = res['utter']
label = res['label']
print('UTTER::', utter,'====>> LABEL::', label, flush=True)
if utter not in utter_set and res['task_name']==task and label!='':
# Select pseudo slot data based on rules.
if args.data_type == 'slot':
# select = slot_select_pseudo(utter, label, task_name=task)
select = True
if select:
utter_set.add(utter)
if label[-1] == ';': label = label[:-1]
pseudo_list.append([utter,label])
else: # for intent pseudo data.
utter_set.add(utter) # avoid duplicate utterance
pseudo_list.append([utter, label])
pseudo_list = pseudo_list[:target_count] # only output the first target_count utterances
with open(output_file, 'w', encoding='utf8') as f:
for utter, label in pseudo_list:
print(json.dumps({'Utterence': utter, 'Label': label}, ensure_ascii=False), file=f)
return pseudo_list | null |
164,994 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def sample_sequence(model, tokenizer, length, batch_size=None, p_mask=None, p_tokens=None, p_lens=None,
temperature=1, top_k=100, top_p=0.95, sampling=True, only_decoder=False, memory=None, args=None, task=None, use_prior=False):
def infer_batch_pseudo_data(model, dataset, max_output_len=90, batch_size=30):
prompt_id = [dataset.tokz.bos_token_id] + dataset.pseudo_data_prompt_id
max_output_len += len(prompt_id)
prompt_id = torch.LongTensor(
[prompt_id for _ in range(batch_size)]).to(model.device)
with torch.no_grad():
model.eval()
output_seq, _ = sample_sequence(model, dataset.tokz, length=max_output_len, batch_size=batch_size,
x_mask=x_mask, x_tokens=x_tokens, temperature=temperature,
top_k=top_k, top_p=top_p, eos_token=dataset.tokz.eos_token_id, device=device )
output_seq = output_seq.tolist()
res = []
for i in range(batch_size):
output = output_seq[i][1:]
if dataset.tokz.eos_token_id in output:
output = output[:output.index(dataset.tokz.eos_token_id)]
output = dataset.tokz.decode(output)
output = dataset.parse_pseudo_data(output)
if output is not None:
res.append(output)
return res | null |
164,995 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def strip_list(seq, eos_id):
l, r = 0, len(seq)-1
for i in range(len(seq)):
if seq[i] != eos_id:
break
l = i
for i in range(len(seq)-1, -1, -1):
if seq[i] != eos_id:
break
r = i
return seq[l+1:r] | null |
164,996 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def get_all_priors(model, tokz, args):
def pseudo_prompt(task):
return f"In the \"{task}\" task, which intent category best describes: \""
all_prior_info = {}
for task in args.tasks:
prompt_id = [tokz.bos_token_id]+tokz.encode(pseudo_prompt(task))+[tokz.eos_token_id]
prompt_id = torch.LongTensor(prompt_id).to('cuda')
prior_out = model.encoder(input_ids=prompt_id)
prior_emb, _ = model.avg_attn(prior_out[0])
prior_mean, prior_logvar = model.prior_mean(prior_emb), model.prior_logvar(prior_emb)
all_prior_info[task]=(prior_mean, prior_logvar)
return all_prior_info | null |
164,997 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def kl_divergence(mean1, logvar1, mean2, logvar2):
# print(mean1.size(),logvar1.size(),mean2.size(),logvar2.size(),flush=True)
exponential = logvar1 - logvar2 - \
torch.pow(mean1 - mean2, 2) / logvar2.exp() - \
torch.exp(logvar1 - logvar2) + 1
result = -0.5 * torch.sum(exponential, tuple(range(1, len(exponential.shape))))
return result.mean()
def get_nearest_task(model, tokz, sample, all_prior_info, args):
def pseudo_prompt(task):
return f"In the \"{task}\" task, which intent category best describes: \""
all_posteriors={}
batch_size = len(sample['utter_id'])
for task in args.tasks:
prompt_id = [tokz.bos_token_id]+tokz.encode(pseudo_prompt(task))
bt_prompt_id = torch.LongTensor([prompt_id for _ in range(batch_size)]).to('cuda')
bt_px_id = torch.cat((bt_prompt_id,sample['utter_id'].to('cuda')),dim=1)
bt_px_id = bt_px_id.to('cuda')
if len(bt_px_id)!=batch_size:
raise ValueError('Tensor concatenate is wrong.')
post_out = model.encoder(input_ids=bt_px_id)
post_emb, _ = model.avg_attn(post_out[0])
post_mean, post_logvar = model.post_mean(post_emb), model.post_logvar(post_emb)
all_posteriors[task]=(post_mean, post_logvar)
min_kl = 1e10
res_task = args.tasks[0]
all_kl_dist = []
for task in all_prior_info.keys():
prior_mean, prior_logvar = all_prior_info[task]
post_mean, post_logvar = all_posteriors[task]
kl_dist = kl_divergence(post_mean, post_logvar, prior_mean, prior_logvar)
all_kl_dist.append(kl_dist)
if kl_dist < min_kl:
min_kl = kl_dist
res_task = task
print(all_kl_dist,flush=True)
return res_task | null |
164,998 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
def pad_seq(seq, pad, max_len, pad_left=False):
def get_pred_context(tokz, pred_task_name, gt_task_name, sample):
new_list = []
for ss in sample['context_id'].tolist():
context = tokz.decode(ss)
new_context = re.sub(gt_task_name,pred_task_name,context)
new_context_id = tokz.encode(new_context)
new_list.append(new_context_id)
context_lens = [len(i) for i in new_list]
context_mask = torch.ByteTensor([[1] * context_lens[i] + [0] * (max(context_lens)-context_lens[i]) for i in range(len(context_lens))])
new_res = torch.tensor([pad_seq(i, tokz.eos_token_id, max(context_lens), pad_left=True) for i in new_list], dtype=torch.long).to('cuda')
new_lens = torch.tensor(context_lens,dtype=torch.long).to('cuda')
return new_res, new_lens | null |
164,999 | import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, pad_seq
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel
import torch.nn.functional as F
all_slot_dict = json.load(open('slot_label_dict.json'))
def slot_select_pseudo(utter, answer, task_name):
slot_list = all_slot_dict[task_name]
pair_list = answer.split('; ')
pseudo_slot = []
if len(pair_list) == 0:
return False
for pair in pair_list:
slot_value = pair.split(': ')
if len(slot_value) != 2:
return False
slot, value = slot_value
if slot not in slot_list or value not in utter or value == '':
return False
return True | null |
165,000 | from mycvae.utils import *
from mycvae.model import *
import threading
import torch
import os, shutil
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, TASK2INFO, MixedCLSDataset, MixedSlotTaggingDataset, PromptCLSDataset, PromptSlotTaggingDataset
from tqdm import tqdm
import json
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.nn import DataParallel
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import transformers
from transformers import get_linear_schedule_with_warmup, Conv1D, AdamW
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import importlib
import copy
from apex.optimizers import FusedAdam
from apex import amp
from apex.fp16_utils import FP16_Optimizer
from collections import Counter
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from dataset import get_datasets, get_dataclass_dict
from transformers import GPT2Config, GPT2LMHeadModel, GPT2Model
def compute_vae_loss(device, model, loss_fn, beta, vae_total, distill=False, prev_model=None):
def compute_lm_loss(device, model, loss_fn, lm_total, distill=False, prev_model=None):
def train_step(device, model, optimizer, loss_fn, beta, vae_total, lm_total, distill=False, only_decoder=False, only_vae=False, prev_model=None):
output = []
optimizer.zero_grad()
if only_decoder:
vae_loss, vae_ce_loss, vae_kl_loss = torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0)
else:
vae_loss, vae_ce_loss, vae_kl_loss = compute_vae_loss(device, model, loss_fn, beta, vae_total=vae_total, distill=distill, prev_model=prev_model)
lm_loss = compute_lm_loss(device, model, loss_fn, lm_total, distill=distill, prev_model=prev_model)
if not only_decoder and not only_vae:
total_loss = vae_loss + 0.5 * lm_loss
elif only_vae:
total_loss = vae_loss
else:
total_loss = lm_loss
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0) # max_grad_norm=1.0
total_loss.backward()
# print('loss gradient',total_loss.grad,flush=True)
# for p in model.parameters():
# print(p.grad.norm(),flush=True)
# torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0) # max_grad_norm=1.0
optimizer.step()
output.append((vae_loss.item(), vae_ce_loss.mean().item(), vae_kl_loss.item(), lm_loss.item()))
return output | null |
165,001 | from mycvae.utils import *
from mycvae.model import *
import threading
import torch
import os, shutil
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, TASK2INFO, MixedCLSDataset, MixedSlotTaggingDataset, PromptCLSDataset, PromptSlotTaggingDataset
from tqdm import tqdm
import json
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.nn import DataParallel
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import transformers
from transformers import get_linear_schedule_with_warmup, Conv1D, AdamW
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import importlib
import copy
from apex.optimizers import FusedAdam
from apex import amp
from apex.fp16_utils import FP16_Optimizer
from collections import Counter
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from dataset import get_datasets, get_dataclass_dict
from transformers import GPT2Config, GPT2LMHeadModel, GPT2Model
def get_model_input(batch, input_type='vae', step=100, tokz=None):
if input_type == 'vae':
px_tokens = batch['posterior_id'][..., :-1]
px_mask = batch['posterior_mask'][..., :-1].contiguous()
p_tokens = batch['prompt_id']
p_mask = batch['prompt_mask'].contiguous()
input_tokens = batch['input_id'][...,:-1].contiguous()
attn_mask = batch['input_mask'][..., :-1].contiguous()
input_label_mask = batch['input_label_mask'][..., 1:].contiguous()
tgt_tokens = batch['input_id'][..., 1:].contiguous()
input_total = (p_mask, p_tokens, px_mask, px_tokens, input_tokens, attn_mask, tgt_tokens, input_label_mask)
if step < 3:
print('*'*10,'VAE','*'*10, flush=True)
print('prefix', tokz.decode(p_tokens[0]), flush=True)
print("input_id", px_tokens[0], flush=True)
print('input', tokz.decode(px_tokens[0]), flush=True)
print('input_mask', px_mask[0], flush=True)
print('target_id', tgt_tokens[0], flush=True)
print('target', tokz.decode(tgt_tokens[0]), flush=True)
# print('\n',flush=True)
else:
all_tokens = batch['all_id'][..., :-1]
all_mask = batch['all_mask'][..., :-1].contiguous()
all_tgt_tokens = batch['all_id'][..., 1:].contiguous()
all_label_mask = batch['all_label_mask'][..., 1:].contiguous()
input_total = (all_tokens, all_mask, all_label_mask, all_tgt_tokens)
if step < 3:
print('*'*10, 'LM', '*'*10, flush=True)
print('all id (utterance and answer)', all_tokens[0], flush=True)
print('all', tokz.decode(all_tokens[0]), flush=True)
print('all mask', all_mask[0], flush=True)
print('all target tokens', all_tgt_tokens[0], flush=True)
print('all tgt', tokz.decode(all_tgt_tokens[0]), flush=True)
print('all label mask (qa)', all_label_mask[0], flush=True)
print('\n', flush=True)
return input_total | null |
165,002 | import os
import argparse
import torch
import shutil
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--eval_during_train',type=bool,default=False)
parser.add_argument('--test_all',type=bool,default=True)
parser.add_argument('--z_dim',type=int, default=768, help='Dimension of the latent variable z.')
parser.add_argument('--save_z',type=bool, default=False, help='Directly save the latent variable z into the memory.')
parser.add_argument('--share_params', type=bool, default=False)
parser.add_argument('--general_prompt', type=bool, default=False)
parser.add_argument('--add_kd', type=bool, default=True)
parser.add_argument('--data_type',type=str,default='intent')
parser.add_argument('--KD_term', type=float, default=0.5, help="Control how many teacher model signal is passed to student.")
parser.add_argument('--KD_temperature', type=float, default=1.0, help="Temperature used to calculate KD loss.")
parser.add_argument('--exact_replay', default=False, type=bool, help='Whether to do exact replay by storing some real samples of old tasks into the memory.')
parser.add_argument('--memory_size', default=10, type=int, help='Number of posterior information of old tasks stored in the memory.')
parser.add_argument("--num_cycle", default=1, type=int, help="Number of cycles for annealing")
parser.add_argument('--cycle_ratio',default=0.9, type=float, help="Ratio for cycle annearling.")
parser.add_argument("--classIL", default=False, type=bool, help="Whether use class incremental learning during testing.")
parser.add_argument('--use_memory', default=False, type=bool, help="Whether store the learned latent variables z and other info into the memory.")
parser.add_argument('--memory_path', default=None, type=str) # Whether store the learned latent variables z and other info into the memory.
parser.add_argument('--experiment', type=str)
parser.add_argument('--tasks',nargs='+', default=['banking'])
parser.add_argument("--data_dir", default="./PLL_DATA/", type=str, help="The path to train/dev/test data files.") # Default parameters are set based on single GPU training
parser.add_argument("--output_dir", default="./output/dstc", type=str,help="The output directory where the model checkpoints and predictions will be written.")
parser.add_argument("--tb_log_dir", default="./tb_logs/dstc", type=str,help="The tensorboard output directory.")
parser.add_argument("--res_dir",default="./res", type=str,help="The path to save scores of experiments.")
parser.add_argument('--nsamples', type=int, default=64) # For generation
parser.add_argument("--gene_batch_size", default=64, type=int) # For generation.
parser.add_argument('--top_k', type=int, default=100)
parser.add_argument('--top_p', type=float, default=0.95)
parser.add_argument('--do_train',type=bool, default=True)
parser.add_argument('--gen_replay',type=bool, default=False, help='Whether use generative replay to avoid forgetting.')
parser.add_argument('--model_path', type=str, help='pretrained model path to local checkpoint')
parser.add_argument('--generate_dur_train', type=bool, help='Generate reconstructed input utterances during training with CVAE.')
parser.add_argument('--temperature',type=float, default=0.95)
parser.add_argument('--pseudo_data_ratio', type=float, default=0.05, help="How many pseudo data to generate for each learned task")
parser.add_argument("--only_decoder", type=bool, help="Not use latent code z, only use prompt to generate pseudo data.")
parser.add_argument("--only_vae", type=bool, help="Not use lm ce loss to update the model.")
parser.add_argument('--latent_size',type=int, default=32, help='dimension of the latent variable z in CVAE.')
parser.add_argument('--alpha_z',type=float, default=0.1, help='Multiply alpha when adding the latent z embedding onto the original embeddings.')
parser.add_argument('--add_input', type=bool, default=False)
parser.add_argument('--add_attn', type=bool, default=False)
parser.add_argument('--add_softmax', type=bool, default=False)
parser.add_argument('--attn_proj_vary', type=bool, default=False)
parser.add_argument('--learn_prior', default=True, type=bool)
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument('--model_type', type=str, default='cvae', choices=['cvae', 'ae_vae_fusion'])
# parser.add_argument('--iterations', type=int, default=200) # wp 850001 wi 300001 ax 300001 yp 800001
# parser.add_argument('--dataset', type=str, default='wi', choices=['ax', 'yp', 'wp', 'wi'], help="Dataset to use for training")
parser.add_argument("--train_batch_size", default=64, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=64, type=int, help="Batch size per GPU/CPU for evaluation.")
# parser.add_argument('--seq-lens', nargs='+', type=int, default=[1024],
# help='seq length per sample. Lists the schedule.')
parser.add_argument('--switch-time', type=float, default=0,
help="Percentage of iterations to spend on short sequence training.")
parser.add_argument('--load', type=str, help='path to load model from') # , default='out/test/'
parser.add_argument('--workers', default=1, type=int, metavar='N',
help='number of data loading workers')
# use GPU
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--no_gpu', action="store_true")
parser.add_argument('--fp16_opt_level', default='O0', type=str, required=False)
# * KL cost annealing, increase beta from beta_0 to 1 in beta_warmup steps
parser.add_argument('--beta_0', default=1.00, type=float)
parser.add_argument('--beta_warmup', type=int, default=50000)
# cyc_vae parameters
parser.add_argument('--cycle', type=int, default=1000)
parser.add_argument("--filename",type=str,help="Data original file to be preprocessed.")
parser.add_argument("--init_model_name_or_path", default="./dir_model/gpt2", type=str, help="Path to init pre-trained model")
parser.add_argument("--num_workers", default=1, type=int, help="workers used to process data")
parser.add_argument("--local_rank", help='used for distributed training', type=int, default=-1)
# Other parameters
parser.add_argument("--ctx_max_len", default=128, type=int, help="Maximum input length for the sequence")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=10, type=int, help="Total number of training epochs for each task.")
parser.add_argument("--warmup_steps", default=-1, type=int, help="Linear warmup step. Will overwrite warmup_proportion.")
parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Linear warmup over warmup_proportion * steps.")
parser.add_argument("--nouse_scheduler", action='store_true', help="dont use get_linear_schedule_with_warmup, use unchanged lr")
parser.add_argument('--logging_steps', type=int, default=10, help="Log every X updates steps.")
parser.add_argument('--save_epochs', type=float, default=2, help="Save checkpoint every X epochs.")
parser.add_argument('--eval_steps', type=int, default=20, help="Eval current model every X steps.")
parser.add_argument('--eval_times_per_task', type=float, default=10, help="How many times to eval in each task, will overwrite eval_steps.")
parser.add_argument('--seed', type=int, default=42, help="Seed for everything")
parser.add_argument("--log_eval_res", default=False, help="Whether to log out results in evaluation process")
parser.add_argument("--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through pytorch implementation) instead of 32-bit")
parser.add_argument("--debug", action="store_true", help="Use debug mode")
args = parser.parse_args()
# making dirs
args.log_file = os.path.join(args.output_dir, 'log.txt')
# if os.path.exists(args.tb_log_dir) and os.path.isdir(args.tb_log_dir):
# os.remove(args.tb_log_dir)
# shutil.rmtree(args.tb_log_dir)
# os.remove(args.log_file)
if args.local_rank in [0, -1]:
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(args.tb_log_dir, exist_ok=True)
else:
while (not os.path.isdir(args.output_dir)) or (not os.path.isdir(args.tb_log_dir)):
pass
if args.debug:
args.logging_steps = 1
torch.manual_seed(0)
torch.backends.cudnn.deterministric = True
# setup distributed training
distributed = (args.local_rank != -1)
if distributed:
print(args.local_rank)
# torch.cuda.set_device(0)
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
else:
if torch.cuda.is_available():
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
args.device = torch.device("cpu")
args.num_train_epochs = {task: args.num_train_epochs for task in args.tasks}
return args | null |
165,003 | import torch
import csv
import os
import re
import json
import numpy as np
from settings import parse_args
class PseudoCLSDataset(PromptCLSDataset):
def __init__(self, taskname, data, tokz, ctx_max_len=100):
self.ctx_max_len = ctx_max_len
self.tokz = tokz
self.max_ans_len = 0
self.data = self.data_tokenization(taskname, data)
The provided code snippet includes necessary dependencies for implementing the `get_dataclass_dict` function. Write a Python function `def get_dataclass_dict(task2data, curr_task, curr_data, tokz, ctx_max_len)` to solve the following problem:
task2data : {'task_name': [data1, data2, data3, ...]}
Here is the function:
def get_dataclass_dict(task2data, curr_task, curr_data, tokz, ctx_max_len):
'''
task2data : {'task_name': [data1, data2, data3, ...]}
'''
data_dict = {}
data_dict[curr_task] = curr_data
for task in task2data:
data_dict[task] = PseudoCLSDataset(task, task2data[task], tokz, ctx_max_len=ctx_max_len)
return data_dict | task2data : {'task_name': [data1, data2, data3, ...]} |
165,004 | import torch
import csv
import os
import re
import json
import numpy as np
from settings import parse_args
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
c = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
return c
def vview(a):
return np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1])))
def sublist_start_index(a, b):
n = min(len(b), len(a))
target_lists = [rolling_window(np.array(a), i) for i in range(1, n + 1)]
res = [np.flatnonzero(vview(target_lists[-1]) == s)
for s in vview(np.array([b]))][0]
if len(res) == 0:
k = 3
for i in range(1,k):
# print(b[:-i],flush=True)
return sublist_start_index(a, b[:-i])
# raise ValueError('The utterence is not in the input.')
else:
return res[0] | null |
165,005 | from mycvae.utils import *
from mycvae.model import *
import pickle
import os
import math
import torch
import torch.nn.functional as F
from torch.nn import DataParallel
import numpy as np
import argparse
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
from tqdm import tqdm
from tqdm import trange
import importlib
import copy
from collections import Counter
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
from dataset import get_datasets
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, TASK2INFO
def repeat_score(text, ngram=[3, 4, 5, 6]):
ngram_list = []
for ng in ngram:
ngram_list.append([text[idx:idx + ng] for idx in range(len(text) - ng - 1)])
max_occurs = []
for ngrams in ngram_list:
count_result = Counter([' '.join(n) for n in ngrams])
try:
max_occurs.append(
max(count_result.values())
)
except:
pass
scores = [max_oc / ((len(text) / ngram[idx]) + ngram[idx]) for idx, max_oc in enumerate(max_occurs)]
return max(scores) if len(scores) >= 1 else 1.0 | null |
165,006 | from mycvae.utils import *
from mycvae.model import *
import pickle
import os
import math
import torch
import torch.nn.functional as F
from torch.nn import DataParallel
import numpy as np
import argparse
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
from tqdm import tqdm
from tqdm import trange
import importlib
import copy
from collections import Counter
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
from dataset import get_datasets
from torch.utils.data import DataLoader
from dataset import PadBatchSeq, TASK2INFO
def sample_sequence(model, tokenizer, length, batch_size=None, x_mask=None, x_tokens=None, x_lens=None,
temperature=1, top_k=100, top_p=0.95, sampling=True, only_decoder=False):
device = 'cuda'
x_tokens = x_tokens.to(device)
if x_mask is not None: x_mask = x_mask.to(device)
if x_lens is not None:
x_reverse_mask = get_reverse_mask(x_lens)
else:
x_reverse_mask = None
eos_token = tokenizer.eos_token_id
# mem = None
# prev = torch.tensor([[eos_token]] * batch_size, dtype=torch.long, device=device)
# prev = prev.view(batch_size, -1)
with torch.no_grad():
if not only_decoder:
prior_mean, prior_logvar = model.encoder_prior(input_ids=x_tokens, attention_mask=x_mask)[:2]
latent_mean, latent_logvar = prior_mean, prior_logvar
z = model.reparameterize(latent_mean, latent_logvar)
assert not torch.isnan(z).any(), 'training get nan z'
add_attn=True
else:
z = None
add_attn=False
# Z: [bs, 768]
# _, mem = model.transformer(input_ids=x_tokens[:, :-1], past=None, attention_mask=x_reverse_mask, representations=z) # x_tokens--prompt
_, mem = model.transformer(input_ids=x_tokens[:, :-1], past=None, representations=z, add_attn=add_attn) # x_tokens--prompt
prev = x_tokens[..., -2].view(batch_size, -1)
# print('prev',prev,flush=True)
output = prev
probability = torch.tensor([], dtype=torch.float, device=device)
if_end = torch.tensor([False] * batch_size, dtype=torch.bool, device=device)
# print('z size',z.size(),flush=True)
for i in range(length): #trange
# print('prev', prev.size(),flush=True)
# one_col = torch.tensor([[True]] * batch_size, dtype=torch.bool, device=device)
# if x_reverse_mask is not None:
# x_reverse_mask = torch.cat((x_reverse_mask, one_col), dim=1)
logits, mem = model.transformer(input_ids=prev, past=mem, representations=z, add_attn=add_attn)
# logits, mem = model.transformer(input_ids=prev, past=mem, attention_mask=x_reverse_mask,representations=z)
logits = model.lm_head(logits)
logits = logits[:, -1, :] / temperature
logits = top_k_top_p_filtering(logits, top_k, top_p)
probs = F.softmax(logits, dim=-1)
if sampling:
next_token = torch.multinomial(probs, num_samples=1)
else:
_, next_token = torch.topk(probs, k=1, dim=-1)
probability = torch.cat((probability, probs.gather(1, next_token)), dim=1)
output = torch.cat((output, next_token), dim=1)
prev = next_token
# early stopping if all sents have ended once
if_end[next_token.view(-1).eq(eos_token)] = True
if if_end.all(): break
return output[:,1:]
# return output
def decode_text(text, eos, tokenizer):
text = text[text.index(eos) + 1:]
if eos in text:
idx = text.index(eos)
text = text[:idx]
text=tokenizer.decode(text).strip()
return text
class PadBatchSeq:
def __init__(self, pad_id=0):
self.pad_id = pad_id
def __call__(self, batch):
# Fetch all ids.
utter_id = [i['utter_id'] for i in batch]
input_id = [i['input_id'] for i in batch]
posterior_id = [i['posterior_id'] for i in batch]
prompt_id = [i['prompt_id'] for i in batch]
gene_prompt_id = [i['gene_prompt_id'] for i in batch]
gene_input_id = [i['gene_input_id'] for i in batch]
gene_posterior_id = [i['gene_posterior_id'] for i in batch]
context_id = [i['context_id'] for i in batch]
general_context_id = [i['general_context_id'] for i in batch]
ans_id = [i['ans_id'] for i in batch]
all_id = [i['all_id'] for i in batch]
gene_all_id = [i['gene_all_id'] for i in batch]
# id check
# for i in batch:
# print('input_id',i['input_id'],flush=True)
# print('context_id',i['context_id'],flush=True)
# Store lengths.
all_lens = [len(i) for i in all_id]
gene_all_lens = [len(i) for i in gene_all_id]
context_lens = [len(i) for i in context_id]
general_context_lens = [len(i) for i in general_context_id]
ans_lens = [len(i) for i in ans_id]
prompt_lens = [len(i) for i in prompt_id]
gene_prompt_lens = [len(i) for i in gene_prompt_id]
input_lens = [len(i) for i in input_id]
gene_input_lens = [len(i) for i in gene_input_id]
posterior_lens = [len(i) for i in posterior_id]
gene_posterior_lens = [len(i) for i in gene_posterior_id]
utter_lens = [len(i) for i in utter_id]
# Construct masks
ans_mask = torch.ByteTensor([[1] * ans_lens[i] + [0] * (max(ans_lens)-ans_lens[i]) for i in range(len(ans_id))])
context_mask = torch.ByteTensor([[1] * context_lens[i] + [0] * (max(context_lens)-context_lens[i]) for i in range(len(context_id))])
general_context_mask = torch.ByteTensor([[1] * general_context_lens[i] + [0] * (max(general_context_lens)-general_context_lens[i]) for i in range(len(general_context_id))])
prompt_mask = torch.ByteTensor([[1] * prompt_lens[i] + [0] * (max(prompt_lens)-prompt_lens[i]) for i in range(len(prompt_id))])
gene_prompt_mask = torch.ByteTensor([[1] * gene_prompt_lens[i] + [0] * (max(gene_prompt_lens)-gene_prompt_lens[i]) for i in range(len(gene_prompt_id))])
input_mask = torch.ByteTensor([[1] * input_lens[i] + [0] * (max(input_lens)-input_lens[i]) for i in range(len(input_id))])
gene_input_mask = torch.ByteTensor([[1] * gene_input_lens[i] + [0] * (max(gene_input_lens)-gene_input_lens[i]) for i in range(len(gene_input_id))])
utter_mask = torch.ByteTensor([[1] * utter_lens[i] + [0] * (max(utter_lens)-utter_lens[i]) for i in range(len(utter_id))])
posterior_mask = torch.ByteTensor([[1] * posterior_lens[i] + [0] * (max(posterior_lens)-posterior_lens[i]) for i in range(len(posterior_id))])
gene_posterior_mask = torch.ByteTensor([[1] * gene_posterior_lens[i] + [0] * (max(gene_posterior_lens)-gene_posterior_lens[i]) for i in range(len(gene_posterior_id))])
all_mask = torch.ByteTensor([[1] * all_lens[i] + [0] * (max(all_lens)-all_lens[i]) for i in range(len(all_id))])
gene_all_mask = torch.ByteTensor([[1] * gene_all_lens[i] + [0] * (max(gene_all_lens)-gene_all_lens[i]) for i in range(len(gene_all_id))])
# * Record the mask so that we do not need to calculate losses on prompt tokens. Change 1202/2021, since I add 'Answer' to prompts.
# * Construct label mask for all (prompts, utterences, answers).
all_label_mask = torch.ByteTensor([[0] * (context_lens[i]) + [1] * (all_lens[i] - context_lens[i]) + [0] * (max(all_lens)-all_lens[i]) for i in range(len(all_id))]) # Only calculate losses on answer tokens.
gene_all_label_mask = torch.ByteTensor([[0] * (general_context_lens[i]) + [1] * (gene_all_lens[i] - general_context_lens[i]) + [0] * (max(gene_all_lens)-gene_all_lens[i]) for i in range(len(gene_all_id))]) # Only calculate losses on answer tokens.
input_label_mask = torch.ByteTensor([[0] * (prompt_lens[i]-1) +[1] * (input_lens[i]-prompt_lens[i]+1) + [0] * (max(input_lens)-input_lens[i]) for i in range(len(input_id))]) # Record the mask so that we do not need to calculate losses on prompt tokens.
gene_input_label_mask = torch.ByteTensor([[0] * (gene_prompt_lens[i]-1) +[1] * (gene_input_lens[i]-gene_prompt_lens[i]+1) + [0] * (max(gene_input_lens)-gene_input_lens[i]) for i in range(len(gene_input_id))]) # Record the mask so that we do not need to calculate losses on prompt tokens.
# * Construct label mask for inputs.
# input_label_mask = []
# # print('max input id',max(input_lens),flush=True)
# # print('input_mask',input_mask.size(),flush=True)
# for i in range(len(input_id)):
# # print(input_id[i],flush=True)
# # print(utter_id[i],flush=True)
# l = sublist_start_index(input_id[i],utter_id[i])
# # print('l',l,'len utter',len(utter_id))
# input_label_mask.append([0] * l + [1] * len(utter_id[i]) + [0] * (max(input_lens)-l-len(utter_id[i])))
# # print(i,len(input_label_mask[i]),flush=True)
# input_label_mask = torch.ByteTensor(input_label_mask)
# print('input_label_mask',input_label_mask.size(),flush=True)
# Return ids and masks
res = {}
res['prompt_id'] = torch.tensor([pad_seq(i, self.pad_id, max(prompt_lens)) for i in prompt_id], dtype=torch.long)
res['gene_prompt_id'] = torch.tensor([pad_seq(i, self.pad_id, max(gene_prompt_lens)) for i in gene_prompt_id], dtype=torch.long)
res['input_id'] = torch.tensor([pad_seq(i, self.pad_id, max(input_lens)) for i in input_id], dtype=torch.long)
res['gene_input_id'] = torch.tensor([pad_seq(i, self.pad_id, max(gene_input_lens)) for i in gene_input_id], dtype=torch.long)
res['posterior_id'] = torch.tensor([pad_seq(i, self.pad_id, max(posterior_lens)) for i in posterior_id], dtype=torch.long)
res['gene_posterior_id'] = torch.tensor([pad_seq(i, self.pad_id, max(gene_posterior_lens)) for i in gene_posterior_id], dtype=torch.long)
res["ans_id"] = torch.tensor([pad_seq(i, self.pad_id, max(ans_lens)) for i in ans_id], dtype=torch.long)
# res["context_id"] = torch.tensor([pad_seq(i, self.pad_id, max(context_lens)) for i in context_id], dtype=torch.long) # Not pad left.
res["context_id"] = torch.tensor([pad_seq(i, self.pad_id, max(context_lens), pad_left=True) for i in context_id], dtype=torch.long)
res["general_context_id"] = torch.tensor([pad_seq(i, self.pad_id, max(general_context_lens), pad_left=True) for i in general_context_id], dtype=torch.long)
res["all_id"] = torch.tensor([pad_seq(i, self.pad_id, max(all_lens)) for i in all_id], dtype=torch.long)
res["gene_all_id"] = torch.tensor([pad_seq(i, self.pad_id, max(gene_all_lens)) for i in gene_all_id], dtype=torch.long)
res["utter_id"] = torch.tensor([pad_seq(i, self.pad_id, max(utter_lens)) for i in utter_id], dtype=torch.long)
res["all_lens"] = torch.tensor(all_lens, dtype=torch.long)
res["context_lens"] = torch.tensor(context_lens, dtype=torch.long)
res["general_context_lens"] = torch.tensor(general_context_lens, dtype=torch.long)
res["ans_lens"] = torch.tensor(ans_lens, dtype=torch.long)
res["prompt_lens"] = torch.tensor(prompt_lens, dtype=torch.long)
res["all_mask"], res["context_mask"], res['prompt_mask'], res['ans_mask'], res['input_mask'] = all_mask, context_mask, prompt_mask, ans_mask, input_mask
res['utter_mask'] = utter_mask
res['posterior_mask'] = posterior_mask
res['input_label_mask'] = input_label_mask
res['all_label_mask'] = all_label_mask
res['general_context_mask'] = general_context_mask
res['gene_all_mask'] = gene_all_mask
res['gene_input_mask'] = gene_input_mask
res['gene_input_label_mask'] = gene_input_label_mask
res['gene_prompt_mask'] = gene_prompt_mask
res['gene_all_label_mask'] = gene_all_label_mask
res['gene_input_label_mask'] = gene_input_label_mask
res['gene_posterior_mask'] = gene_posterior_mask
return PinnedBatch(res)
def get_datasets(path, tasks, tokz, num_workers=8, ctx_max_len=100):
res = {}
for task in tasks:
res[task] = {}
info = TASK2INFO[task]
res[task]['train'] = info['dataset_class'](
task, tokz, os.path.join(path, info['dataset_folder'], 'ripe_data', 'train.json'), num_workers=num_workers, ctx_max_len=ctx_max_len)
res[task]['val'] = TASK2INFO[task]['dataset_class'](
task, tokz, os.path.join(path, info['dataset_folder'], 'ripe_data','valid.json'), num_workers=num_workers, ctx_max_len=ctx_max_len)
res[task]['test'] = TASK2INFO[task]['dataset_class'](
task, tokz, os.path.join(path, info['dataset_folder'], 'ripe_data','test.json'), num_workers=num_workers, ctx_max_len=ctx_max_len)
return res
def run_model():
parser = argparse.ArgumentParser()
# CHANGED
parser.add_argument('--experiment', type=str)
parser.add_argument("--data_dir", default="./PLL_DATA/",
type=str, help="The path to train/dev/test data files.") # Default parameters are set based on single GPU training
parser.add_argument('--tasks',nargs='+', default=['banking'])
parser.add_argument('--model_path', type=str, help='pretrained model path to local checkpoint')
# NON-CHANGED
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=50)
parser.add_argument("--length", type=int, default=-1)
parser.add_argument("--temperature", type=int, default=0.95)
parser.add_argument('--top_p', type=float, default=0.95)
parser.add_argument('--top_k', type=int, default=100)
parser.add_argument('--output_dir', type=str, default='out')
parser.add_argument('--data_type', type=str, default='t1', choices=['t' + str(i) for i in range(9)], help="t: type")
parser.add_argument('--model_type', type=str, default='cvae', choices=['cvae', 'ae_vae_fusion'])
# use GPU
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--no_gpu', action="store_true")
parser.add_argument('--add_input', action="store_true")
parser.add_argument('--add_attn', action="store_true")
parser.add_argument('--add_softmax', action="store_true")
parser.add_argument('--attn_proj_vary', action="store_true")
parser.add_argument('--learn_prior', action="store_true")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Batch size per GPU/CPU for evaluation.")
args = parser.parse_args()
args.log_file = os.path.join(args.output_dir, 'log.txt')
print(args)
args.model_type = 'cvae'
args.learn_prior = True
# GPU
if not torch.cuda.is_available(): args.no_gpu = True
gpu = not args.no_gpu
if gpu: torch.cuda.set_device(args.gpu)
device = torch.device(args.gpu if gpu else "cpu")
# randomness
np.random.seed(args.seed)
prng = np.random.RandomState()
torch.random.manual_seed(args.seed)
if gpu: torch.cuda.manual_seed(args.seed)
if args.batch_size == -1:
args.batch_size = 1
# logging
save_folder = os.path.join(args.output_dir, args.experiment)
os.makedirs(save_folder, exist_ok=True)
# importlib.reload(logging)
logger = get_logger(args.log_file)
logger.info('\n----------------------------------------------------------------------')
print('Loading models...')
cache_dir = os.path.join(args.output_dir, 'model_cache')
os.makedirs(cache_dir, exist_ok=True)
# Load pre-trained teacher tokenizer (vocabulary)
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir=cache_dir)
tokenizer.max_len = int(1e12)
gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2', cache_dir=cache_dir)
print('gpt2_params:', num_params(gpt2_model)) # gpt2: 124439808
config = GPT2Config()
# * Load VAE model
VAE = VAEModel(config, add_input=args.add_input, add_attn=args.add_attn, add_softmax=args.add_softmax,
attn_proj_vary=args.attn_proj_vary, learn_prior=args.learn_prior)
args.load = args.model_path
print('Loading model weights...')
state = torch.load(args.load)
VAE.load_state_dict(state)
print('='*25,'Load trained model successfully.','='*25,flush=True)
gc.collect()
print('VAE_params:', num_params(VAE)) # 286694400
print('Setup data...',flush=True)
seq_len = VAE.config.n_ctx
VAE.config.n_ctx=100 # xiu setting.
print('VAE config n_ctx: sequence length',seq_len,flush=True)
# * Get test loader.
datasets = get_datasets(args.data_dir, args.tasks, tokenizer, num_workers=1, ctx_max_len=100)
for task in datasets:
# data_loaders[task] = {}
# train_sampler = torch.utils.data.RandomSampler(datasets[task]['train'])
# train_loader = DataLoader(datasets[task]['train'],batch_size=args.train_batch_size,sampler=train_sampler,
# num_workers=1, pin_memory=True, collate_fn=PadBatchSeq(tokenizer.eos_token_id))
# val_loader = DataLoader(datasets[task]['val'],batch_size=args.eval_batch_size,sampler=None,
# num_workers=1, pin_memory=True, collate_fn=PadBatchSeq(tokenizer.eos_token_id))
test_loader = DataLoader(datasets[task]['test'],batch_size=args.eval_batch_size,sampler=None,
num_workers=1, pin_memory=True, collate_fn=PadBatchSeq(tokenizer.eos_token_id))
VAE.eval() # be careful about VAE.eval() vs VAE.train()
VAE.to(device)
logger.info('\n----------------------------------------------------------------------')
logger.info("Testing loop. batches: %d" % len(test_loader))
endoftext = tokenizer.convert_tokens_to_ids("<|endoftext|>")
n_samples = 0
bleu4_sum = 0.0
rouge_scores_values_sum = [0.0] * 9
model_type = args.model_type
check_file = open(os.path.join(save_folder, 'check.txt'), 'w', encoding='utf8')
with tqdm(total=len(test_loader)) as pbar:
ITER = enumerate(test_loader)
for i, data in ITER:
x_mask = data['prompt_mask']
x_tokens = data['prompt_id']
# x_lens = data['prompt_lens']
x_lens = torch.tensor([len(i)-1 for i in x_tokens],dtype=torch.long).to(device)
length = args.length
# Changed to train.py
if length == -1:
# length = VAE.config.n_ctx - 1
length = VAE.config.n_ctx - x_tokens.size(1) - 1
# elif length > VAE.config.n_ctx - 1:
elif length > VAE.config.n_ctx - x_tokens.size(1) - 1:
raise ValueError("Can't get samples longer than window size: %s" % VAE.config.n_ctx)
target_tokens = data['input_id'][..., 1:].contiguous()
eff_samples = []
n, l = target_tokens.size()
# storys = [tokenizer.decode(target_tokens[i, :]) for i in range(n)]
# storys = [s[s.find("<|endoftext|>") + len("<|endoftext|>"):] for s in storys]
# storys_str = [s[:s.find("<|endoftext|>") + len("<|endoftext|>")] if "<|endoftext|>" in s else s for s in storys]
for _ in range(1):
all_out = []
sample_time=5 # Sample 5 times.
for zz in range(sample_time):
out = sample_sequence(
model=VAE,
tokenizer=tokenizer,
length=length,
batch_size=x_tokens.size()[0],
x_mask=x_mask,
x_tokens=x_tokens,
x_lens=x_lens,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
)
out = out.tolist()
all_out.append(out)
# * Check latent code z ability.
for ss in range(len(all_out[0])):
check_file.write('\n')
check_file.write('='*20+'SAMPLE: %d'%n_samples+'='*20)
check_file.write('\n')
iii=1
for oout in all_out:
text = decode_text(oout[ss], endoftext, tokenizer)
check_file.write(str(iii)+'==:')
check_file.write(text)
check_file.write('\n')
iii+=1
check_file.write('-'*100)
check_file.flush()
# text = decode_text(out[ss], endoftext, tokenizer)
# check_file.write('-'*100)
# check_file.write('\n')
# check_file.write('SAMPLE: %d'%n_samples)
# check_file.write('='*2)
# check_file.write(':')
# check_file.write(text)
# check_file.write('\n')
# check_file.write('-'*100)
# check_file.flush()
# tx = decode_text(oout[ss], endoftext, tokenizer)
# # print(tx,flush=True)
# check_file.write(str(iii))
# check_file.write('====')
# check_file.write(tx)
# check_file.write('\n')
# iii+=1
n_samples+=1
eff_samples.append((text,0))
if n_samples>=100: # Only evaluate part of the test data.
break
# bleu, rouge score calculation
# score for one long text, higher than 0.075 usually means repetition
# rep_score = repeat_score(text.split(), ngram=[3, 4, 5, 6, 7, 8])
# if rep_score > 0.075:
# # print(rep_score)
# continue
# try:
# # check bleu
# bleu4 = sentence_bleu([storys_str[i].split()], text, smoothing_function=SmoothingFunction().method7)
# # check rouge
# rouge = Rouge()
# rouge_scores = rouge.get_scores(text, storys_str[i])
# rouge_scores_values = [v for k in rouge_scores[0].keys() for v in rouge_scores[0][k].values()]
# bleu4_sum += bleu4
# rouge_scores_values_sum = [v1 + v2 for v1, v2 in zip(rouge_scores_values_sum, rouge_scores_values)]
# n_samples += 1
# except:
# bleu4 = 0.0
# rouge_scores = [{'rouge-1': {'f': 0.0, 'p': 0.0, 'r': 0.0},
# 'rouge-2': {'f': 0.0, 'p': 0.0, 'r': 0.0},
# 'rouge-l': {'f': 0.0, 'p': 0.0, 'r': 0.0}}]
# Store generated sentences.
# eff_samples.append((text,0))
# eff_samples.append((text, bleu4, rouge_scores))
# write samples to file
# for i in range(len(eff_samples)):
# samples_file.write("=" * 50 + " SAMPLE " + str(i) + " " + "=" * 50)
# samples_file.write('\n' * 2)
# samples_file.write("=" * 40 + " Outlines " + "=" * 40)
# samples_file.write('\n' * 2)
# samples_file.write(tokenizer.decode(x_tokens[i, :][x_mask[i, :] == 1].tolist()))
# samples_file.write('\n' * 2)
# samples_file.write("=" * 40 + " Story " + "=" * 40)
# samples_file.write('\n' * 2)
# samples_file.write(storys_str[i])
# samples_file.write('\n' * 2)
# samples_file.write("=" * 40 + " Generated " + "=" * 40)
# samples_file.write('\n' * 2)
# samples_file.write(eff_samples[i][0])
# samples_file.write('\n' * 1)
# samples_file.flush()
logger.info('batch %d finished.'%n_samples)
pbar.update(1)
print('Test complete with %d samples.' % n_samples)
logger.info("Test complete with %d samples."%n_samples)
# bleu4 = round(bleu4_sum / n_samples, 3)
# rouge_scores_values = [round(r / n_samples, 3) for r in rouge_scores_values_sum]
# print(' bleu-4:', bleu4)
# print(' rouge :', rouge_scores_values)
# logger.info(' bleu-4: %f', bleu4)
# logger.info(' rouge : %s', str(rouge_scores_values)) | null |
165,007 | import sys
def uniq_n_gram_length(words, n):
ngrams = make_n_gram(words, n)
return len(list(set(ngrams)))
def distinct0(words):
dis = []
dis.append(uniq_n_gram_length(words, 1))
dis.append(uniq_n_gram_length(words, 2))
dis.append(uniq_n_gram_length(words, 3))
dis.append(uniq_n_gram_length(words, 4))
word_count = len(words)
if 0 == word_count:
return [], 0, []
dis_score = []
for diss in dis:
dis_score.append(diss * 1.0 / word_count)
return dis, word_count, dis_score | null |
165,008 | import sys
def readinputall():
datas = []
for line in sys.stdin:
#words = line.strip().split()
words = line.strip("\n").replace(' </s>', '').split()
#print("words:", words, "words-1:", words[-1], file=sys.stderr)
datas.append(words)
return datas | null |
165,009 | import sys
def readinput(k):
datas = []
count = 0
for line in sys.stdin:
#words = line.strip().split()
words = line.strip("\n").replace(' </s>', '').split()
if len(words) == 0:
count = 0
continue
if count >= k:
continue
count += 1
datas.append(words)
return datas | null |
165,010 | import sys
def round_for_list(x, precision):
return [round(data, precision) for data in x]
def make_all_n_gram(datas):
all_words = len(datas)
all_ngram = [[] for i in range(4)]
all_ngram_len = []
all_ngram_score = []
all_ngram[0] += make_n_gram(datas, 1)
all_ngram[1] += make_n_gram(datas, 2)
all_ngram[2] += make_n_gram(datas, 3)
all_ngram[3] += make_n_gram(datas, 4)
# print all_ngram[0]
# print all_ngram[1]
# print all_ngram[2]
# print all_ngram[3]
for i in range(4):
all_ngram_len.append(get_uniq_length(all_ngram[i]))
all_ngram_score.append(all_ngram_len[i] * 1.0 / all_words)
return all_ngram, all_ngram_len, all_words, all_ngram_score
def distinct(datas):
dataall = []
for data in datas:
dataall += data
ngram, dist_scores_unnorm, word_count, dist_scores = make_all_n_gram(dataall)
dist_scores = round_for_list(dist_scores, 4)
return dist_scores, word_count, dist_scores_unnorm | null |
165,011 | import json
import logging
import os
import sys
import time
from dataclasses import asdict, dataclass, field
from datasets import load_dataset, load_metric
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from datasets import load_dataset
from tqdm import tqdm
import flax
import jax
import jax.numpy as jnp
import optax
from flax import jax_utils, traverse_util
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard
from huggingface_hub import Repository
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.nn import DataParallel
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import transformers
from transformers import get_linear_schedule_with_warmup, Conv1D, AdamW
from torch.utils.tensorboard import SummaryWriter
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
AutoTokenizer,
BatchEncoding,
FlaxT5ForConditionalGeneration,
HfArgumentParser,
PreTrainedTokenizerBase,
T5Config,
is_tensorboard_available,
set_seed,
T5ForConditionalGeneration,
Trainer
)
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
from transformers import (
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
TrainingArguments,
is_torch_tpu_available,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `compute_input_and_target_lengths` function. Write a Python function `def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length)` to solve the following problem:
This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ . Training parameters to avoid padding with random_spans_noise_mask. When training a model with random_spans_noise_mask, we would like to set the other training hyperparmeters in a way that avoids padding. This function helps us compute these hyperparameters. We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens. This function tells us the required number of tokens in the raw example (for split_tokens()) as well as the length of the encoded targets. Note that this function assumes the inputs and targets will have EOS appended and includes that in the reported length. Args: inputs_length: an integer - desired length of the tokenized inputs sequence noise_density: a float mean_noise_span_length: a float Returns: tokens_length: length of original text in tokens targets_length: an integer - length in tokens of encoded targets sequence
Here is the function:
def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ .
Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the other
training hyperparmeters in a way that avoids padding.
This function helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens,
and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for split_tokens())
as well as the length of the encoded targets. Note that this function assumes
the inputs and targets will have EOS appended and includes that in the reported length.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
_input_length = num_nonnoise_tokens + num_noise_spans + 1
_output_length = num_noise_tokens + num_noise_spans + 1
return _input_length, _output_length
tokens_length = inputs_length
while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length:
tokens_length += 1
inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(
tokens_length)
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
return tokens_length, targets_length | This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ . Training parameters to avoid padding with random_spans_noise_mask. When training a model with random_spans_noise_mask, we would like to set the other training hyperparmeters in a way that avoids padding. This function helps us compute these hyperparameters. We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens. This function tells us the required number of tokens in the raw example (for split_tokens()) as well as the length of the encoded targets. Note that this function assumes the inputs and targets will have EOS appended and includes that in the reported length. Args: inputs_length: an integer - desired length of the tokenized inputs sequence noise_density: a float mean_noise_span_length: a float Returns: tokens_length: length of original text in tokens targets_length: an integer - length in tokens of encoded targets sequence |
165,012 | import json
import logging
import os
import sys
import time
from dataclasses import asdict, dataclass, field
from datasets import load_dataset, load_metric
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from datasets import load_dataset
from tqdm import tqdm
import flax
import jax
import jax.numpy as jnp
import optax
from flax import jax_utils, traverse_util
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard
from huggingface_hub import Repository
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.nn import DataParallel
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import transformers
from transformers import get_linear_schedule_with_warmup, Conv1D, AdamW
from torch.utils.tensorboard import SummaryWriter
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
AutoTokenizer,
BatchEncoding,
FlaxT5ForConditionalGeneration,
HfArgumentParser,
PreTrainedTokenizerBase,
T5Config,
is_tensorboard_available,
set_seed,
T5ForConditionalGeneration,
Trainer
)
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
from transformers import (
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
TrainingArguments,
is_torch_tpu_available,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
num_samples = len(samples_idx)
samples_to_remove = num_samples % batch_size
if samples_to_remove != 0:
samples_idx = samples_idx[:-samples_to_remove]
sections_split = num_samples // batch_size
batch_idx = np.split(samples_idx, sections_split)
return batch_idx | null |
165,013 | import json
import logging
import os
import sys
import time
from dataclasses import asdict, dataclass, field
from datasets import load_dataset, load_metric
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from datasets import load_dataset
from tqdm import tqdm
import flax
import jax
import jax.numpy as jnp
import optax
from flax import jax_utils, traverse_util
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard
from huggingface_hub import Repository
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.nn import DataParallel
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import transformers
from transformers import get_linear_schedule_with_warmup, Conv1D, AdamW
from torch.utils.tensorboard import SummaryWriter
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
AutoTokenizer,
BatchEncoding,
FlaxT5ForConditionalGeneration,
HfArgumentParser,
PreTrainedTokenizerBase,
T5Config,
is_tensorboard_available,
set_seed,
T5ForConditionalGeneration,
Trainer
)
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
from transformers import (
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
TrainingArguments,
is_torch_tpu_available,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1) | null |
165,014 | import json
import logging
import os
import sys
import time
from dataclasses import asdict, dataclass, field
from datasets import load_dataset, load_metric
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from datasets import load_dataset
from tqdm import tqdm
import flax
import jax
import jax.numpy as jnp
import optax
from flax import jax_utils, traverse_util
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard
from huggingface_hub import Repository
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.nn import DataParallel
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import transformers
from transformers import get_linear_schedule_with_warmup, Conv1D, AdamW
from torch.utils.tensorboard import SummaryWriter
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
AutoTokenizer,
BatchEncoding,
FlaxT5ForConditionalGeneration,
HfArgumentParser,
PreTrainedTokenizerBase,
T5Config,
is_tensorboard_available,
set_seed,
T5ForConditionalGeneration,
Trainer
)
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
from transformers import (
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
TrainingArguments,
is_torch_tpu_available,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step) | null |
165,015 | import nltk
import re
import random
from random import shuffle
random.seed(1)
def get_only_chars(line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") # replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +', ' ', clean_line) # delete extra spaces
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
def synonym_replacement(words, n):
new_words = words.copy()
random_word_list = list(
set([word for word in words if word not in stop_words]))
random.shuffle(random_word_list)
num_replaced = 0
for random_word in random_word_list:
synonyms = get_synonyms(random_word)
if len(synonyms) >= 1:
synonym = random.choice(list(synonyms))
new_words = [synonym if word == random_word else word for word in new_words]
#print("replaced", random_word, "with", synonym)
num_replaced += 1
if num_replaced >= n: # only replace up to n words
break
#this is stupid but we need it, trust me
sentence = ' '.join(new_words)
new_words = sentence.split(' ')
return new_words
def random_deletion(words, p):
#obviously, if there's only one word, don't delete it
if len(words) == 1:
return words
#randomly delete words with probability p
new_words = []
for word in words:
r = random.uniform(0, 1)
if r > p:
new_words.append(word)
#if you end up deleting all words, just return a random word
if len(new_words) == 0 and len(words)>1:
rand_int = random.randint(0, len(words)-1)
return [words[rand_int]]
return new_words
def random_swap(words, n):
new_words = words.copy()
for _ in range(n):
if len(new_words)>1:
new_words = swap_word(new_words)
else:
new_words = words.copy()
return new_words
def random_insertion(words, n):
new_words = words.copy()
for _ in range(n):
add_word(new_words)
return new_words
def eda(sentence, alpha_sr=0.0, alpha_ri=0.0, alpha_rs=0.1, p_rd=0.05, num_aug=5):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
words = [word for word in words if word!='']
num_words = len(words)
augmented_sentences = []
num_new_per_technique = int(num_aug/4)+1
#sr
if (alpha_sr > 0):
n_sr = max(1, int(alpha_sr*num_words))
for _ in range(num_new_per_technique):
a_words = synonym_replacement(words, n_sr)
augmented_sentences.append(' '.join(a_words))
#ri
if (alpha_ri > 0):
n_ri = max(1, int(alpha_ri*num_words))
for _ in range(num_new_per_technique):
a_words = random_insertion(words, n_ri)
augmented_sentences.append(' '.join(a_words))
#rs
if (alpha_rs > 0):
n_rs = max(1, int(alpha_rs*num_words))
for _ in range(num_new_per_technique):
a_words = random_swap(words, n_rs)
augmented_sentences.append(' '.join(a_words))
#rd
if (p_rd > 0):
for _ in range(num_new_per_technique):
a_words = random_deletion(words, p_rd)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence)
for sentence in augmented_sentences]
shuffle(augmented_sentences)
#trim so that we have the desired number of augmented sentences
if num_aug >= 1:
augmented_sentences = augmented_sentences[:num_aug]
else:
keep_prob = num_aug / len(augmented_sentences)
augmented_sentences = [
s for s in augmented_sentences if random.uniform(0, 1) < keep_prob]
#append the original sentence
augmented_sentences.append(sentence)
shuffle(augmented_sentences)
return augmented_sentences | null |
165,016 | from unifymodel.dataset import PadBatchSeq, pad_seq, get_unlabel_data
import logging
import random
import torch
import numpy as np
from torch.utils.data import DataLoader
import json
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.distributed as dist
import os, time, gc, json, pickle, argparse, math, re
import torch.nn as nn
import torch.utils.data as data
import torch.distributed as dist
import torch.multiprocessing as mp
import copy
from transformers import GPT2Config, GPT2Model, GPT2LMHeadModel, GPT2Tokenizer
import torch.nn.functional as F
from tools.eda import *
loggers = {}
def get_logger(filename, level=logging.INFO, print2screen=True):
global loggers
import logging
if os.path.exists(filename):
os.remove(filename)
if loggers.get(filename):
return loggers.get(filename)
else:
logger = logging.getLogger(filename)
logger.setLevel(level)
fh = logging.FileHandler(filename, encoding='utf-8')
fh.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('[%(asctime)s][%(filename)s][line: %(lineno)d][%(levelname)s] >> %(message)s')
# formatter = logging.Formatter('[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] >> %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
if print2screen:
logger.addHandler(ch)
loggers[filename] = logger
return logger | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.