id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
164,296 | import os
import re
import json
import pickle
import random
from template_config import *
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
def get_all_question_query_pairs(data):
question_query_pairs = []
for item in data:
question_query_pairs.append((item['question_toks'], item['query'], item['db_id']))
# question query pairs: [([question_tok], query(str), db_id(str)]
return question_query_pairs | null |
164,297 | import os
import re
import json
import pickle
import random
from template_config import *
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
The provided code snippet includes necessary dependencies for implementing the `is_value` function. Write a Python function `def is_value(token)` to solve the following problem:
as values can either be a numerical digit or a string literal, then we can detect if a token is a value by matching with regex
Here is the function:
def is_value(token):
"""
as values can either be a numerical digit or a string literal, then we can
detect if a token is a value by matching with regex
"""
is_number = True
try:
float(token)
except ValueError:
is_number = False
is_string = token.startswith("\"") or token.startswith("\'") or token.endswith("\"") or token.endswith("\'")
return is_number or is_string | as values can either be a numerical digit or a string literal, then we can detect if a token is a value by matching with regex |
164,298 | import os
import re
import json
import pickle
import random
from template_config import *
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
def filter_string(cs):
return "".join([c.upper() for c in cs if c.isalpha() or c == ' ']) | null |
164,299 | import os
import re
import json
import pickle
import random
from template_config import *
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
def tune_pattern_with_index(pattern):
general_pattern_list = []
for x in pattern.split(" "):
if "{COLUMN" in x:
general_pattern_list.append(x + "}")
continue
return " ".join(general_pattern_list) | null |
164,300 | import os
import re
import json
import pickle
import random
from template_config import *
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
def clean_select(clause, table_dict):
clause = [x[:-1]+"OLD}" if x[-1] == '}' else x+'OLD}'
for x in clause if 'AGG' in x or 'COLUMN' in x]
clause = ' , '.join(clause).split(' ')
clause = [x for i, x in enumerate(clause)
if x != ',' or not i or 'COLUMN' in clause[i-1]]
clause += ('of '+' , '.join(table_dict[:MAX_TABLE_USED])).split(' ')
return clause | null |
164,301 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
eval_type = 'dev'
import os
import json
def get_pattern_question(train_qq_pairs, tables):
pattern_question_dict = defaultdict(list)
detailed_pattern_question_dict = defaultdict(list)
# train_qq_pairs
for eid, (question, query, bd_id) in enumerate(train_qq_pairs):
table = tables[bd_id]
if eid % 500 == 0:
print("processing eid: ", eid)
# # for debuging
# if ' '.join(question) != 'Find the number of followers for each user .':
# continue
pattern, *dicts = strip_query_full_dict(query, table)
question_template = process_question_full_dict(question, *dicts)
name_dicts = {**dicts[0], **dicts[1], **dicts[2], **dicts[3]}
gen_pattern = general_pattern(pattern)
more_pattern = sub_pattern(pattern)
tu_pattern = tune_pattern(pattern)
# tu_pattern = tune_pattern(pattern[pattern.index("WHERE"):] if "WHERE" in pattern else pattern)
pattern_question_dict[tu_pattern].append(' '.join(question) + " ||| " + query + " ||| " +
question_template + " ||| " + more_pattern
+ " ||| " + query)
detailed_pattern_question_dict[tu_pattern].append(
{
'question': ' '.join(question),
'query': query,
'template': question_template,
'concise pattern': more_pattern,
'name dict': name_dicts
}
)
# print("\n--------------------------------------")
# print("original question: {}".format(' '.join(question).encode('utf-8')))
# print("question: {}".format(question_template.encode('utf-8')))
# print("query: {}".format(query))
# print("pattern: {}".format(pattern))
# print("values: {}".format(values))
# print("nums: {}".format(nums))
# print("columns: {}".format(columns))
# In[10]:
print("total pattern number: {}".format(len(pattern_question_dict)))
pattern_question_dict = sorted(pattern_question_dict.items(), key=lambda kv: len(kv[1]), reverse=True)
# [(pattern(str), [question(str)])]
detailed_pattern_question_dict = sorted(detailed_pattern_question_dict.items(),
key=lambda kv: (len(kv[1]), kv[0]), reverse=True)
detailed_pattern_question_dict = [(x, sorted(y, key=lambda z: -z['template'].count('{')))
for x, y in detailed_pattern_question_dict]
return pattern_question_dict, detailed_pattern_question_dict
def data_to_components(data_path):
test_data = json.load(open(os.path.join(data_path, "{}.json".format(eval_type))))
tables_org = json.load(open(os.path.join(data_path, "tables.json")))
tables = {tab['db_id']: tab for tab in tables_org}
question_query_pairs = []
for item in test_data:
question_query_pairs.append((item['question_toks'], item['query'], item['db_id']))
# question query pairs: [([question_tok], query(str), db_id(str)]
train_pdq, detailed_train_pdq = get_pattern_question(question_query_pairs, tables)
return question_query_pairs, detailed_train_pdq | null |
164,302 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
import os
def test_path(path, verbose=True):
for subdir in os.listdir(path):
subpath = os.path.join(path, subdir, 'generator')
for iter in range(6):
working_path = os.path.join(subpath, str(iter))
file_path = os.path.join(working_path, 'test/epoch_10.json')
try:
with open(file_path, 'r', encoding='utf-8') as file:
...
except Exception as e:
if verbose:
print(e)
continue
yield file_path | null |
164,303 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
import json
def load_jsonl(path):
data = []
with open(path, 'r', encoding='utf-8') as file:
for line in file:
sample = json.loads(line)
data.append(sample)
return data | null |
164,304 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
import json
def load_json(path):
with open(path, 'r', encoding='utf-8') as file:
data = file.read()
return json.loads(data) | null |
164,305 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
def statis_eval_json(data):
labels = [int(x['label']) for x in data]
print('True:', sum(labels))
print('False:', len(labels) - sum(labels)) | null |
164,306 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
import re
def extract_coponents(data, clean_number=True, clean_coma=True, lower_case=True, strip_all=True):
# data [[template, [{template,question,"query","name dict":{}, "concise pattern"}]]
sql_dict = {}
for temp in data:
samples = temp[1]
for sample in samples:
if clean_number:
query = sample['query']
new_name = dict(sample['name dict'])
for x, y in sample['name dict'].items():
if str.isdigit(y):
if re.search(r"\b{}\b".format(y), query):
query = re.sub(r"\b{}\b".format(y), '__FOUND__', query)
else:
del new_name[x]
sample['name dict'] = new_name
if clean_coma:
for x in sample['name dict']:
sample['name dict'][x] = sample['name dict'][x].replace('\'', '').replace('\"', '')
if lower_case:
for x in sample['name dict']:
sample['name dict'][x] = sample['name dict'][x].lower()
if strip_all:
for x in sample['name dict']:
sample['name dict'][x] = sample['name dict'][x].replace('%', '')
sql_dict[sample['query']] = sample
return sql_dict | null |
164,307 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
def debug(sam):
if sam['label'] == 1:
print(question)
print(sql)
print() | null |
164,308 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
import json
def extract_translated_sqls(mapping_path, orgin_templates):
ret = {}
with open(mapping_path,'r',encoding='utf-8') as file:
data = json.loads(file.read())
for x, y in origin_templates.items():
try:
ret[data[x]] = y['name dict']
except:
print(x, "not found")
return ret | null |
164,309 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
def template_analysis(temp_dict):
# temp_dict {'path':[(temp, True/False)]}
for temp in temp_dict.values():
total_out = sum(x[1] == 0 for x in temp)
if total_out:
print(temp, total_out)
for x in temp_dict.keys():
for y in temp_dict.keys():
overlap = len(set(temp_dict[x])&set(temp_dict[y]))
total = len(set(temp_dict[x]))
if overlap and overlap != total:
print(x, y)
print(len(set(temp_dict[x]) & set(temp_dict[y])), len(set(temp_dict[x]))) | null |
164,310 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
import json
def pred_analysis(pred_dict, name):
selected_dict = [(x,y) for x,y in pred_dict.items() if name in x]
selected_dict.sort(key=lambda x: x[0])
previous_pred = None
previous_data = []
for i, (name, pred) in enumerate(selected_dict):
data = []
with open(name, 'r', encoding='utf-8') as infile:
for line in infile:
data.append(json.loads(line))
if i == 0 :
previous_data = data
previous_pred = pred
else:
diff = [(data[x], previous_data[x]) for x in range(len(pred)) if pred[x]!=previous_pred[x] and previous_pred[x] == 1]
previous_pred = pred
previous_data = data
print('new false:',len(diff))
for sample, old_sample in diff:
print('False:')
for x, y in sample.items():
print(x, y)
print(trans_sql_to_names[sample['logic']])
print()
print('Previous Ture:')
for x, y in old_sample.items():
print(x, y)
print(trans_sql_to_names[sample['logic']])
print() | null |
164,311 | import json
import re
from nltk.metrics import accuracy
import os
from .utils import *
number_dict = {'2': 'two', '3': 'three', '4': 'four', '5': 'five', '6': 'six'}
agg_dict = {'count': ['more', 'number', 'how many', 'most', 'one', 'at least', 'only', 'more than', 'fewer than'],
'avg': ['average', 'mean'],
'max': ['maximum', 'largest', 'longest', 'oldest', 'top', 'best', 'most', 'highest', 'latest', 'larger than any', 'lowest rank','predominantly'],
'min':['minimum', 'smallest', 'shortest', 'worst', 'youngest', 'least', 'lowest', 'earliest', 'any', 'highest rank']}
sc_dict = {'asc': ['least', 'smallest', 'lowest', 'ascending', 'fewest', 'alphabetical order','lexicographical order'],
'desc': ['most', 'largest', 'highest', 'descending', 'greatest','reverse alphabetical order','reversed lexicographical order']}
op_dict = {'>': ['more than', 'older than', 'bigger than', 'larger than', 'higher than', 'more', 'after', 'greater', 'above', 'over', 'at least'],
'<': ['less than', 'fewer than', 'younger than', 'smaller than', 'lower than', 'less', 'before', 'shorter', 'below', 'under', 'lighter'],}
import re
def question_test(sql, name_dict, question):
label = 1
error = []
# not issues
if ('not' in question or 'n\'t' in question or 'without' in question) and \
'!=' not in name_dict.values() and \
'except' not in sql.lower() and 'not' not in sql.lower():
label = 0
error.append('not')
# reverse number
if any(y in question and x not in sql for x, y in number_dict.items()):
label -= 1
error.append('number')
# DISTINCT
if 'distinct' in sql.lower() and ('different' not in question or 'distinct' not in question):
# doesn't matters in spider dataset
...
# components issues
for key, val in name_dict.items():
# agg issues
if 'AGG' in key:
for agg in agg_dict:
if agg in val and not any(x in question for x in agg_dict[agg]):
label -= 1
error.append(agg)
# sc issues
if 'SC' in key:
val = val.lower()
for sc in sc_dict:
if sc in val and not any(x in question for x in sc_dict[sc]):
label -= 1
error.append(sc)
# op issues
if 'OP' in key:
for op in op_dict:
if op == val and not any(x in question for x in op_dict[op]):
label -= 1
error.append(op)
# deal with value
if 'VALUE' in key:
if val == '1' or val == 't':
continue
if val in number_dict:
tem = number_dict[val]
if re.search(r"{}".format(tem), question):
question = re.sub(r"{}".format(tem), '__FOUND__', question)
continue
val = val.strip('\'').strip('\"')
if re.search(r"{}".format(val), question):
question = re.sub(r"{}".format(val), '__FOUND__', question)
else:
label -= 1
error.append(val)
# deal with columns before FROM
if 'COLUMN' in key:
# not useful
# pos = sql.lower().find(val)
# pos_from = sql.lower().find('from')
# if pos<pos_from:
# val = val.split('_')
# if any(x not in question for x in val):
# label -= 1
# error.append('_'.join(val))
...
return label, error | null |
164,312 | import os
from config import *
import random
import json
from tqdm import tqdm
from sql_formatter.formatting import translate_sql
import sqlite3
import multiprocessing
from multiprocessing import Manager
import time
random.seed(33)
def mkdir(path):
def read_json(path):
def write_json(path, data):
def preprocess_spider(rawdata, t):
def get_dbschema(path):
def mutate_sql(index, data, time_out, sql_dict, db_schema, db_dir):
def create_output(t, idir, odir):
rawdir = os.path.join(odir, Raw)
preprocessdir = os.path.join(odir, Preprocess)
mkdir(rawdir)
mkdir(preprocessdir)
if t == 'spider':
traindata = read_json(os.path.join(idir, Spider_train))
otherdata = read_json(os.path.join(idir, Spider_others))
devdata = read_json(os.path.join(idir, Spider_dev))
rawtrain = []
rawdev = []
rawtest = devdata
rawoutofdomain = otherdata
random.shuffle(traindata)
train_len = round(len(traindata) * 0.8)
print("spider raw starts")
for i, data in enumerate(tqdm(traindata)):
if i < train_len:
rawtrain.append(data)
else:
rawdev.append(data)
print("spider raw done")
write_json(os.path.join(rawdir, Trainjson), rawtrain)
write_json(os.path.join(rawdir, Devjson), rawdev)
write_json(os.path.join(rawdir, Testjson), rawtest)
write_json(os.path.join(rawdir, Outofdomainjson), rawoutofdomain)
print("spider preprocess starts")
preprocesstrain = preprocess_spider(rawtrain, 'train')
write_json(os.path.join(preprocessdir, Trainjson), preprocesstrain)
preprocessdev = preprocess_spider(rawdev, 'dev')
write_json(os.path.join(preprocessdir, Devjson), preprocessdev)
preprocesstest = preprocess_spider(rawtest, 'test')
write_json(os.path.join(preprocessdir, Testjson), preprocesstest)
preprocessoutofdomain = preprocess_spider(rawoutofdomain, 'outofdomain')
write_json(os.path.join(preprocessdir, Outofdomainjson), preprocessoutofdomain)
print("spider preprocess done")
print("mutate starts")
db_schema = get_dbschema(os.path.join(idir, Spider_table))
total_data = []
total_data += traindata + devdata + otherdata
sql_dict = {}
for index, data in enumerate(tqdm(total_data)):
time_out = 3
mutate_sql(index, data, time_out, sql_dict, db_schema, os.path.join(idir, Spider_database))
write_json(os.path.join(preprocessdir, Mutationjson), sql_dict)
print("mutate done")
else:
print("spider preprocess starts")
preprocesstrain = preprocess_spider(rawtrain, 'train')
write_json(os.path.join(preprocessdir, Trainjson), preprocesstrain)
print("spider preprocess done")
"""print("mutate starts")
db_schema = get_dbschema(os.path.join(idir, Spider_table))
total_data = []
total_data += traindata + devdata + otherdata
sql_dict = {}
for index, data in enumerate(tqdm(total_data)):
time_out = 3
mutate_sql(index, data, time_out, sql_dict, db_schema, os.path.join(idir, Spider_database))
write_json(os.path.join(preprocessdir, Mutationjson), sql_dict)
print("mutate done")""" | null |
164,313 | import argparse
from utils import *
def get_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--type', type=str, required=True, help='dataset type, ie. spider')
parser.add_argument('--input', type=str, required=True, help='input dir')
parser.add_argument('--output', type=str, required=True, help='output dir')
print(parser.parse_args())
return parser.parse_args() | null |
164,314 | import json
import random
import csv
def load_json(path):
with open(path, 'r', encoding='utf-8') as file:
data = file.read()
return json.loads(data) | null |
164,315 | import json
import random
import csv
def random_choose(data, max_size):
new_dict = {}
for origin, mutated in data.items():
mutated = dict(random.sample(list(mutated.items()), min(len(mutated), max_size)))
new_dict[origin] = mutated
return new_dict | null |
164,316 | import json
import random
import csv
def write_json(path, data):
with open(path, 'w', encoding='utf-8') as file:
json.dump(data, file) | null |
164,317 | import json
import random
import csv
def load_csv(path):
data = []
for i, line in enumerate(csv.reader(open(labeled_path, encoding='utf-8'))):
if i == 0:
continue
data.append(line)
return data | null |
164,318 | from tqdm import tqdm
from sql_formatter.formatting import translate_sql
import json
import random
import multiprocessing
from multiprocessing import Manager
def translate_sql(sql):
formatter = Formatter()
if sql.split()[0] == '\"l' and sql.split()[-1] == 'r\"':
print("2:", sql)
translated_struct_sql = sql.replace('\"l','(').replace('r\"',')')
print("2:", translated_struct_sql)
translated_sql = ' '.join(translated_struct_sql.replace('(','').replace(')','').split())
return translated_sql, translated_struct_sql
if " (SELECT " in sql or " ( SELECT " in sql:
if " (SELECT " in sql:
start_pos = sql.index("(SELECT") + 1
if " ( SELECT " in sql:
start_pos = sql.index("( SELECT") + 1
parenthesis = ['(']
clause_len = len(sql[start_pos:])
for i, char in enumerate(sql[start_pos:]):
if char == ')':
parenthesis.pop()
if not parenthesis:
clause_len = i
break
elif char == '(':
parenthesis.append(char)
sub_sql = sql[start_pos : start_pos + clause_len]
#print('sub_sql', sub_sql)
translated_sub_sql = "\"l {} r\"".format(translate_sql(sub_sql)[1])
sql = sql[ : start_pos - 1] + translated_sub_sql + sql[start_pos + clause_len + 1:]
_, translated_struct_sql = translate_sql(sql)
translated_struct_sql = translated_struct_sql.replace('( l ','').replace(' r )','')
translated_sql = ' '.join(translated_struct_sql.replace('(','').replace(')','').split())
return translated_sql, translated_struct_sql
if " EXCEPT " in sql:
translated_sqls = []
translated_struct_sqls = []
sqls = sql.split(" EXCEPT ")
for index, statement in enumerate(sqls):
translated_sql, translated_struct_sql = translate_sql(statement)
translated_sqls.append(translated_sql)
if index > 0:
translated_struct_sql = "( " + translated_struct_sql + ")"
translated_struct_sqls.append(translated_struct_sql)
translated_sql = ', and except that '.join(translated_sqls)
translated_struct_sql = ', and except that '.join(translated_struct_sqls)
return translated_sql, translated_struct_sql
if " INTERSECT " in sql:
translated_sqls = []
translated_struct_sqls = []
sqls = sql.split(" INTERSECT ")
for index, statement in enumerate(sqls):
translated_sql, translated_struct_sql = translate_sql(statement)
translated_sqls.append(translated_sql)
if index > 0:
translated_struct_sql = "( " + translated_struct_sql + ")"
translated_struct_sqls.append(translated_struct_sql)
translated_sql = ', and intersect with '.join(translated_sqls)
translated_struct_sql = ', and intersect with '.join(translated_struct_sqls)
return translated_sql, translated_struct_sql
stmt = parse(sql)
translated_struct_sql = formatter.format(stmt)
try:
stmt = parse(sql)
translated_struct_sql = formatter.format(stmt)
except Exception as e:
print("Error:", e, '\n')
print("Error:", sql, '\n')
translated_struct_sql = formatter.preprocess(sql)
translated_sql = ' '.join(translated_struct_sql.replace('(','').replace(')','').split())
return translated_sql, translated_struct_sql
def trans_sql(raw_sql, sql):
try:
translated_sql, translated_struct_sql = translate_sql(sql)
#return_dict[raw_sql] = (raw_sql, translated_struct_sql)
return translated_struct_sql
except:
print("ERROR with Processing SQL: {}".format(sql))
return | null |
164,319 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
def fuzzy_match_filter(t, col, val, negate=False):
trim_t = t[col].str.replace(" ", "")
trim_val = val.replace(" ", "")
if negate:
res = t[~trim_t.str.contains(trim_val, regex=False)]
else:
res = t[trim_t.str.contains(trim_val, regex=False)]
res = res.reset_index(drop=True)
return res | null |
164,320 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
month_map = {'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6,
'july': 7, 'august': 8, 'september': 9, 'october': 10, 'november': 11, 'december': 12,
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10,
'nov': 11, 'dec': 12}
pat_num = r"([-+]?\s?\d*(?:\s?[:,.]\s?\d+)+\b|[-+]?\s?\d+\b|\d+\s?(?=st|nd|rd|th))"
pat_add = r"((?<==\s)\d+)"
pat_year = r"\b(\d\d\d\d)\b"
pat_day = r"\b(\d\d?)\b"
pat_month = r"\b((?:jan(?:uary)?|feb(?:ruary)?|mar(?:rch)?|apr(?:il)?|may|jun(?:e)?|jul(?:y)?|aug(?:ust)?|sep(?:tember)?|oct(?:ober)?|nov(?:ember)?|dec(?:ember)?))\b"
The provided code snippet includes necessary dependencies for implementing the `fuzzy_compare_filter` function. Write a Python function `def fuzzy_compare_filter(t, col, val, type)` to solve the following problem:
fuzzy compare and filter out rows. return empty pd if invalid type: eq, not_eq, greater, greater_eq, less, less_eq
Here is the function:
def fuzzy_compare_filter(t, col, val, type):
'''
fuzzy compare and filter out rows.
return empty pd if invalid
type: eq, not_eq, greater, greater_eq, less, less_eq
'''
t[col] = t[col].astype('str')
# dates
if len(re.findall(pat_month, val)) > 0:
year_list = t[col].str.extract(pat_year, expand=False)
day_list = t[col].str.extract(pat_day, expand=False)
month_list = t[col].str.extract(pat_month, expand=False)
month_num_list = month_list.map(month_map)
# pandas at most 2262
year_list = year_list.fillna("2260").astype("int")
day_list = day_list.fillna("1").astype("int")
month_num_list = month_num_list.fillna("1").astype("int")
# print (year_list)
# print (day_list)
# print (month_num_list)
date_frame = pd.to_datetime(pd.DataFrame({'year': year_list, 'month': month_num_list, 'day': day_list}))
# print (date_frame)
# for val
year_val = re.findall(pat_year, val)
if len(year_val) == 0:
year_val = year_list.iloc[0]
else:
year_val = int(year_val[0])
day_val = re.findall(pat_day, val)
if len(day_val) == 0:
day_val = day_list.iloc[0]
else:
day_val = int(day_val[0])
month_val = re.findall(pat_month, val)
if len(month_val) == 0:
month_val = month_num_list.iloc[0]
else:
month_val = month_map[month_val[0]]
date_val = pd.datetime(year_val, month_val, day_val)
# print (date_val)
if type == "greater":
res = t[date_frame > date_val]
elif type == "greater_eq":
res = t[date_frame >= date_val]
elif type == "less":
res = t[date_frame < date_val]
elif type == "less_eq":
res = t[date_frame <= date_val]
elif type == "eq":
res = t[date_frame == date_val]
elif type == "not_eq":
res = t[~date_frame != date_val]
res = res.reset_index(drop=True)
return res
# numbers, or mixed numbers and strings
val_pat = re.findall(pat_num, val)
if len(val_pat) == 0:
# return pd.DataFrame(columns=list(t.columns))
# fall back to full string matching
if type == "eq":
return t[t[col].str.contains(val, regex=False)]
elif type == "not_eq":
return t[~t[col].str.contains(val, regex=False)]
else:
return pd.DataFrame(columns=list(t.columns))
# return pd.DataFrame(columns=list(t.columns))
num = val_pat[0].replace(",", "")
num = num.replace(":", "")
num = num.replace(" ", "")
try:
num = float(num)
except:
num = num.replace(".", "")
num = float(num)
# print (num)
pats = t[col].str.extract(pat_add, expand=False)
if pats.isnull().all():
pats = t[col].str.extract(pat_num, expand=False)
if pats.isnull().all():
return pd.DataFrame(columns=list(t.columns))
nums = pats.str.replace(",", "")
nums = nums.str.replace(":", "")
nums = nums.str.replace(" ", "")
try:
nums = nums.astype("float")
except:
nums = nums.str.replace(".", "")
nums = nums.astype("float")
# print (nums)
if type == "greater":
res = t[np.greater(nums, num)]
elif type == "greater_eq":
res = t[np.greater_equal(nums, num)]
elif type == "less":
res = t[np.less(nums, num)]
elif type == "less_eq":
res = t[np.less_equal(nums, num)]
elif type == "eq":
res = t[np.isclose(nums, num)]
elif type == "not_eq":
res = t[~np.isclose(nums, num)]
res = res.reset_index(drop=True)
return res
# all invalid
return pd.DataFrame(columns=list(t.columns)) | fuzzy compare and filter out rows. return empty pd if invalid type: eq, not_eq, greater, greater_eq, less, less_eq |
164,321 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
month_map = {'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6,
'july': 7, 'august': 8, 'september': 9, 'october': 10, 'november': 11, 'december': 12,
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10,
'nov': 11, 'dec': 12}
pat_num = r"([-+]?\s?\d*(?:\s?[:,.]\s?\d+)+\b|[-+]?\s?\d+\b|\d+\s?(?=st|nd|rd|th))"
pat_year = r"\b(\d\d\d\d)\b"
pat_day = r"\b(\d\d?)\b"
pat_month = r"\b((?:jan(?:uary)?|feb(?:ruary)?|mar(?:rch)?|apr(?:il)?|may|jun(?:e)?|jul(?:y)?|aug(?:ust)?|sep(?:tember)?|oct(?:ober)?|nov(?:ember)?|dec(?:ember)?))\b"
class ExeError(object):
def __init__(self, message="exe error"):
self.message = message
def obj_compare(num1, num2, round=False, type="eq"):
tolerance = 0.15 if round else 1e-9
# both numeric
try:
num_1 = float(num1)
num_2 = float(num2)
# if negate:
# return (not math.isclose(num_1, num_2, rel_tol=tolerance))
# return math.isclose(num_1, num_2, rel_tol=tolerance)
if type == "eq":
return math.isclose(num_1, num_2, rel_tol=tolerance)
elif type == "not_eq":
return (not math.isclose(num_1, num_2, rel_tol=tolerance))
elif type == "greater":
return num_1 > num_2
elif type == "less":
return num_1 < num_2
elif type == "diff":
return num_1 - num_2
except ValueError:
# strings
# mixed numbers and strings
num1 = str(num1)
num2 = str(num2)
# dates
# num1
if len(re.findall(pat_month, num1)) > 0:
year_val1 = re.findall(pat_year, num1)
if len(year_val1) == 0:
year_val1 = int("2260")
else:
year_val1 = int(year_val1[0])
day_val1 = re.findall(pat_day, num1)
if len(day_val1) == 0:
day_val1 = int("1")
else:
day_val1 = int(day_val1[0])
month_val1 = re.findall(pat_month, num1)
if len(month_val1) == 0:
month_val1 = int("1")
else:
month_val1 = month_map[month_val1[0]]
try:
date_val1 = pd.datetime(year_val1, month_val1, day_val1)
except:
return ExeError
# num2
year_val2 = re.findall(pat_year, num2)
if len(year_val2) == 0:
year_val2 = int("2260")
else:
year_val2 = int(year_val2[0])
day_val2 = re.findall(pat_day, num2)
if len(day_val2) == 0:
day_val2 = int("1")
else:
day_val2 = int(day_val2[0])
month_val2 = re.findall(pat_month, num2)
if len(month_val2) == 0:
month_val2 = int("1")
else:
month_val2 = month_map[month_val2[0]]
try:
date_val2 = pd.datetime(year_val2, month_val2, day_val2)
except:
return ExeError
# if negate:
# return date_val1 != date_val2
# else:
# return date_val1 == date_val2
if type == "eq":
return date_val1 == date_val2
elif type == "not_eq":
return date_val1 != date_val2
elif type == "greater":
return date_val1 > date_val2
elif type == "less":
return date_val1 < date_val2
# for diff return string
elif type == "diff":
return str((date_val1 - date_val2).days) + " days"
# mixed string and numerical
val_pat1 = re.findall(pat_num, num1)
val_pat2 = re.findall(pat_num, num2)
if len(val_pat1) == 0 or len(val_pat2) == 0:
# fall back to full string matching
if type == "not_eq":
return (num1 not in num2) and (num2 not in num1)
elif type == "eq":
return num1 in num2 or num2 in num1
else:
return ExeError()
num_1 = val_pat1[0].replace(",", "")
num_1 = num_1.replace(":", "")
num_1 = num_1.replace(" ", "")
try:
num_1 = float(num_1)
except:
num_1 = num_1.replace(".", "")
num_1 = float(num_1)
num_2 = val_pat2[0].replace(",", "")
num_2 = num_2.replace(":", "")
num_2 = num_2.replace(" ", "")
try:
num_2 = float(num_2)
except:
num_2 = num_2.replace(".", "")
num_2 = float(num_2)
# if negate:
# return (not math.isclose(num_1, num_2, rel_tol=tolerance))
# return math.isclose(num_1, num_2, rel_tol=tolerance)
if type == "eq":
return math.isclose(num_1, num_2, rel_tol=tolerance)
elif type == "not_eq":
return (not math.isclose(num_1, num_2, rel_tol=tolerance))
elif type == "greater":
return num_1 > num_2
elif type == "less":
return num_1 < num_2
elif type == "diff":
return num_1 - num_2 | null |
164,322 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
pat_num = r"([-+]?\s?\d*(?:\s?[:,.]\s?\d+)+\b|[-+]?\s?\d+\b|\d+\s?(?=st|nd|rd|th))"
pat_add = r"((?<==\s)\d+)"
The provided code snippet includes necessary dependencies for implementing the `agg` function. Write a Python function `def agg(t, col, type)` to solve the following problem:
sum or avg for aggregation
Here is the function:
def agg(t, col, type):
'''
sum or avg for aggregation
'''
# unused
if t.dtypes[col] == np.int64 or t.dtypes[col] == np.float64:
if type == "sum":
res = t[col].sum()
elif type == "avg":
res = t[col].mean()
return res
else:
pats = t[col].str.extract(pat_add, expand=False)
if pats.isnull().all():
pats = t[col].str.extract(pat_num, expand=False)
if pats.isnull().all():
return 0.0
pats.fillna("0.0")
nums = pats.str.replace(",", "")
nums = nums.str.replace(":", "")
nums = nums.str.replace(" ", "")
try:
nums = nums.astype("float")
except:
nums = nums.str.replace(".", "")
nums = nums.astype("float")
# print (nums)
if type == "sum":
return nums.sum()
elif type == "mean":
return nums.mean() | sum or avg for aggregation |
164,323 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
class ExeError(object):
def __init__(self, message="exe error"):
self.message = message
def hop_op(t, col):
if len(t) == 0:
return ExeError()
return t[col].values[0] | null |
164,324 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
month_map = {'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6,
'july': 7, 'august': 8, 'september': 9, 'october': 10, 'november': 11, 'december': 12,
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10,
'nov': 11, 'dec': 12}
pat_num = r"([-+]?\s?\d*(?:\s?[:,.]\s?\d+)+\b|[-+]?\s?\d+\b|\d+\s?(?=st|nd|rd|th))"
pat_add = r"((?<==\s)\d+)"
pat_year = r"\b(\d\d\d\d)\b"
pat_day = r"\b(\d\d?)\b"
pat_month = r"\b((?:jan(?:uary)?|feb(?:ruary)?|mar(?:rch)?|apr(?:il)?|may|jun(?:e)?|jul(?:y)?|aug(?:ust)?|sep(?:tember)?|oct(?:ober)?|nov(?:ember)?|dec(?:ember)?))\b"
class ExeError(object):
def __init__(self, message="exe error"):
self.message = message
The provided code snippet includes necessary dependencies for implementing the `nth_maxmin` function. Write a Python function `def nth_maxmin(t, col, order=1, max_or_min="max", arg=False)` to solve the following problem:
for max, min, argmax, argmin, nth_max, nth_min, nth_argmax, nth_argmin return string or rows
Here is the function:
def nth_maxmin(t, col, order=1, max_or_min="max", arg=False):
'''
for max, min, argmax, argmin,
nth_max, nth_min, nth_argmax, nth_argmin
return string or rows
'''
order = int(order)
### return the original content for max,min
# dates
date_pats = t[col].str.extract(pat_month, expand=False)
if not date_pats.isnull().all():
year_list = t[col].str.extract(pat_year, expand=False)
day_list = t[col].str.extract(pat_day, expand=False)
month_list = t[col].str.extract(pat_month, expand=False)
month_num_list = month_list.map(month_map)
# pandas at most 2262
year_list = year_list.fillna("2260").astype("int")
day_list = day_list.fillna("1").astype("int")
month_num_list = month_num_list.fillna("1").astype("int")
# print (year_list)
# print (day_list)
# print (month_num_list)
try:
date_series = pd.to_datetime(pd.DataFrame({'year': year_list, 'month': month_num_list, 'day': day_list}))
# print (date_series)
if max_or_min == "max":
tar_row = date_series.nlargest(order).iloc[[-1]]
elif max_or_min == "min":
tar_row = date_series.nsmallest(order).iloc[[-1]]
ind = list(tar_row.index.values)
if arg:
res = t.iloc[ind]
else:
res = t.iloc[ind][col].values[0]
return res
except:
pass
# mixed string and numerical
pats = t[col].str.extract(pat_add, expand=False)
if pats.isnull().all():
pats = t[col].str.extract(pat_num, expand=False)
if pats.isnull().all():
return ExeError()
nums = pats.str.replace(",", "")
nums = nums.str.replace(":", "")
nums = nums.str.replace(" ", "")
try:
nums = nums.astype("float")
except:
nums = nums.str.replace(".", "")
nums = nums.astype("float")
try:
if max_or_min == "max":
tar_row = nums.nlargest(order).iloc[[-1]]
elif max_or_min == "min":
tar_row = nums.nsmallest(order).iloc[[-1]]
ind = list(tar_row.index.values)
# print (ind)
# print (t.iloc[ind][col].values)
if arg:
res = t.iloc[ind]
else:
res = t.iloc[ind][col].values[0]
except:
return ExeError()
# print (res)
return res | for max, min, argmax, argmin, nth_max, nth_min, nth_argmax, nth_argmin return string or rows |
164,325 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
def is_ascii(s):
return all(ord(c) < 128 for c in s) | null |
164,326 | import csv
from collections import defaultdict
import re
from .APIs import *
import nltk
from nltk.corpus import stopwords
from sklearn.metrics import classification_report, accuracy_score
def load_data():
reader = csv.reader(open("logic2text_labeled.csv", encoding='utf-8'))
data = []
for i, row in enumerate(reader):
if i == 0: continue
data.append(row)
return data | null |
164,327 | import csv
from collections import defaultdict
import re
from .APIs import *
import nltk
from nltk.corpus import stopwords
from sklearn.metrics import classification_report, accuracy_score
def digit_match(x, nl):
found = 0
if x == '1':
found = 1
if int(x) <= 10:
if re.search(order_dict[int(x)], nl) or re.search(number_dict[str(int(x))], nl):
found = 1
elif format(int(x), ',') in nl or x in nl:
found = 1
return found
import re
APIs = {}
APIs['count'] = {"argument": ['row'], 'output': 'num',
'function': lambda t: len(t),
'tostr': lambda t: "count {{ {} }}".format(t),
'to_nl': lambda t: "the number of ( {} )".format(t),
'append': True}
APIs['only'] = {"argument": ['row'], 'output': 'bool',
"function": lambda t: len(t) == 1,
"tostr": lambda t: "only {{ {} }}".format(t),
"to_nl": lambda t: "( {} ) only contains one value".format(t),
'alias': ['only', 'one'],
'append': None}
APIs['str_hop'] = {"argument": ['row', 'header'], 'output': 'str',
'function': lambda t, col: hop_op(t, col),
'tostr': lambda t, col: "hop {{ {} ; {} }}".format(t, col),
'to_nl': lambda t, col: "( {} ) of ( {} )".format(col, t),
'append': True}
APIs['num_hop'] = {"argument": ['row', 'header'], 'output': 'obj',
'function': lambda t, col: hop_op(t, col),
'tostr': lambda t, col: "hop {{ {} ; {} }}".format(t, col),
'to_nl': lambda t, col: "( {} ) of ( {} )".format(col, t),
'append': True}
APIs['avg'] = {"argument": ['row', 'header'], 'output': 'num',
"function": lambda t, col: agg(t, col, "mean"),
"tostr": lambda t, col: "avg {{ {} ; {} }}".format(t, col),
"to_nl": lambda t, col: "average value of ( {} ) of ( {} )".format(col, t), # TODO
'alias': ['average', 'mean'],
'append': True}
APIs['sum'] = {"argument": ['row', 'header'], 'output': 'num',
"function": lambda t, col: agg(t, col, "sum"),
"tostr": lambda t, col: "sum {{ {} ; {} }}".format(t, col),
"to_nl": lambda t, col: "sum of ( {} ) of ( {} )".format(col, t),
'alias': ['sum', 'total'],
'append': True}
APIs['max'] = {"argument": ['row', 'header'], 'output': 'obj',
"function": lambda t, col: nth_maxmin(t, col, order=1, max_or_min="max", arg=False),
"tostr": lambda t, col: "max {{ {} ; {} }}".format(t, col),
"to_nl": lambda t, col: "maximum of ( {} ) of ( {} )".format(col, t),
'alias': max_dict,
'append': True}
APIs['min'] = {"argument": ['row', 'header'], 'output': 'obj',
"function": lambda t, col: nth_maxmin(t, col, order=1, max_or_min="min", arg=False),
"tostr": lambda t, col: "min {{ {} ; {} }}".format(t, col),
"to_nl": lambda t, col: "minimum of ( {} ) of ( {} )".format(col, t),
'alias': min_dict,
'append': True}
APIs['argmax'] = {"argument": ['row', 'header'], 'output': 'row',
'function': lambda t, col: nth_maxmin(t, col, order=1, max_or_min="max", arg=True),
'tostr': lambda t, col: "argmax {{ {} ; {} }}".format(t, col),
'to_nl': lambda t, col: "the row with maximum of ( {} ) of ( {} )".format(col, t),
'alias': max_dict,
'append': False}
APIs['argmin'] = {"argument": ['row', 'header'], 'output': 'row',
'function': lambda t, col: nth_maxmin(t, col, order=1, max_or_min="min", arg=True),
'tostr': lambda t, col: "argmin {{ {} ; {} }}".format(t, col),
'to_nl': lambda t, col: "the row with minimum of ( {} ) of ( {} )".format(col, t),
'alias': min_dict,
'append': False}
APIs['nth_argmax'] = {"argument": ['row', 'header', 'num'], 'output': 'row',
'function': lambda t, col, ind: nth_maxmin(t, col, order=ind, max_or_min="max", arg=True),
'tostr': lambda t, col, ind: "nth_argmax {{ {} ; {} ; {} }}".format(t, col, ind),
'to_nl': lambda t, col, ind: "the row with the ( {} ) largest of ( {} ) of ( {} )".format(ind, col, t),
'alias': max_dict,
'append': False}
APIs['nth_argmin'] = {"argument": ['row', 'header', 'num'], 'output': 'row',
'function': lambda t, col, ind: nth_maxmin(t, col, order=ind, max_or_min="min", arg=True),
'tostr': lambda t, col, ind: "nth_argmin {{ {} ; {} ; {} }}".format(t, col, ind),
'to_nl': lambda t, col, ind: "the row with the ( {} ) smallest of ( {} ) of ( {} )".format(ind, col, t),
'alias': min_dict,
'append': False}
APIs['nth_max'] = {"argument": ['row', 'header', 'num'], 'output': 'num',
"function": lambda t, col, ind: nth_maxmin(t, col, order=ind, max_or_min="max", arg=False),
"tostr": lambda t, col, ind: "nth_max {{ {} ; {} ; {} }}".format(t, col, ind),
"to_nl": lambda t, col, ind: "the ( {} ) maximum value of ( {} ) of ( {} )".format(ind, col, t),
'alias': max_dict,
'append': True}
APIs['nth_min'] = {"argument": ['row', 'header', 'num'], 'output': 'num',
"function": lambda t, col, ind: nth_maxmin(t, col, order=ind, max_or_min="min", arg=False),
"tostr": lambda t, col, ind: "nth_min {{ {} ; {} ; {} }}".format(t, col, ind),
"to_nl": lambda t, col, ind: "the ( {} ) minimum value of ( {} ) of ( {} )".format(ind, col, t),
'alias': min_dict,
'append': True}
APIs['diff'] = {"argument": ['obj', 'obj'], 'output': 'str',
'function': lambda t1, t2: obj_compare(t1, t2, type="diff"),
'tostr': lambda t1, t2: "diff {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "difference between ( {} ) and ( {} )".format(t1, t2),
'append': True}
APIs['greater'] = {"argument": ['obj', 'obj'], 'output': 'bool',
'function': lambda t1, t2: obj_compare(t1, t2, type="greater"),
'tostr': lambda t1, t2: "greater {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "( {} ) is greater than ( {} )".format(t1, t2),
'alias': greater_dict,
'append': False}
APIs['less'] = {"argument": ['obj', 'obj'], 'output': 'bool',
'function': lambda t1, t2: obj_compare(t1, t2, type="less"),
'tostr': lambda t1, t2: "less {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "( {} ) is less than ( {} )".format(t1, t2),
'alias': less_dict,
'append': True}
APIs['eq'] = {"argument": ['obj', 'obj'], 'output': 'bool',
'function': lambda t1, t2: obj_compare(t1, t2, type="eq"),
'tostr': lambda t1, t2: "eq {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "( {} ) is equal to ( {} )".format(t1, t2),
'append': None}
APIs['not_eq'] = {"argument": ['obj', 'obj'], 'output': 'bool',
'function': lambda t1, t2: obj_compare(t1, t2, type="not_eq"),
'tostr': lambda t1, t2: "not_eq {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "( {} ) is not equal to ( {} )".format(t1, t2),
"append": None}
APIs['str_eq'] = {"argument": ['str', 'str'], 'output': 'bool',
'function': lambda t1, t2: t1 in t2 or t2 in t1,
'tostr': lambda t1, t2: "eq {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "( {} ) is the same as ( {} )".format(t1, t2),
"append": None}
APIs['not_str_eq'] = {"argument": ['str', 'str'], 'output': 'bool',
'function': lambda t1, t2: t1 not in t2 and t2 not in t1,
'tostr': lambda t1, t2: "not_eq {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "( {} ) is not the same as ( {} )".format(t1, t2),
"append": None}
APIs['round_eq'] = {"argument": ['obj', 'obj'], 'output': 'bool',
'function': lambda t1, t2: obj_compare(t1, t2, round=True, type="eq"),
'tostr': lambda t1, t2: "round_eq {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "( {} ) is about ( {} )".format(t1, t2),
'append': None}
APIs['and'] = {"argument": ['bool', 'bool'], 'output': 'bool',
'function': lambda t1, t2: t1 and t2,
'tostr': lambda t1, t2: "and {{ {} ; {} }}".format(t1, t2),
'to_nl': lambda t1, t2: "both ( {} ) and ( {} ) are true".format(t1, t2),
"append": None}
APIs["filter_str_eq"] = {"argument": ['row', 'header', 'str'], "output": "row",
"function": lambda t, col, value: fuzzy_match_filter(t, col, value),
"tostr": lambda t, col, value: "filter_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that fuzzy matches ( {} )".format(col, t, value),
'append': False}
APIs["filter_str_not_eq"] = {"argument": ['row', 'header', 'str'], "output": "row",
"function": lambda t, col, value: fuzzy_match_filter(t, col, value, negate=True),
"tostr": lambda t, col, value: "filter_not_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that does not fuzzy matches ( {} )".format(col, t, value),
'append': False}
APIs["filter_eq"] = {"argument": ['row', 'header', 'obj'], "output": "row",
"function": lambda t, col, value: fuzzy_compare_filter(t, col, value, type="eq"),
"tostr": lambda t, col, value: "filter_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that fuzzy equals ( {} )".format(col, t, value),
'append': False}
APIs["filter_not_eq"] = {"argument": ['row', 'header', 'obj'], "output": "row",
"function": lambda t, col, value: fuzzy_compare_filter(t, col, value, type="not_eq"),
"tostr": lambda t, col, value: "filter_not_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that does not fuzzy equals ( {} )".format(col, t, value),
'append': False}
APIs["filter_less"] = {"argument": ['row', 'header', 'obj'], "output": "row",
"function": lambda t, col, value: fuzzy_compare_filter(t, col, value, type="less"),
"tostr": lambda t, col, value: "filter_less {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that is fuzzy less than ( {} )".format(col, t, value),
'alias': less_dict,
"append": False}
APIs["filter_greater"] = {"argument": ['row', 'header', 'obj'], "output": "row",
"function": lambda t, col, value: fuzzy_compare_filter(t, col, value, type="greater"),
"tostr": lambda t, col, value: "filter_greater {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that is fuzzy greater than ( {} )".format(col, t, value),
'alias': greater_dict,
"append": False}
APIs["filter_greater_eq"] = {"argument": ['row', 'header', 'obj'], "output": "row",
"function": lambda t, col, value: fuzzy_compare_filter(t, col, value, type="greater_eq"),
"tostr": lambda t, col, value: "filter_greater_eq {{ {} ; {} ; {} }}".format(t, col,value),
'alias': greater_dict,
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that is fuzzy great than or equal to ( {} )".format(col, t, value),
"append": False}
APIs["filter_less_eq"] = {"argument": ['row', 'header', 'obj'], "output": "row",
"function": lambda t, col, value: fuzzy_compare_filter(t, col, value, type="less_eq"),
"tostr": lambda t, col, value: "filter_less_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "( {} ) of ( {} ) that is fuzzy less than or equal to ( {} )".format(col, t, value),
"alias": less_dict,
"append": False}
APIs["filter_all"] = {"argument": ['row', 'header'], "output": "row",
"function": lambda t, col: t,
"tostr": lambda t, col: "filter_all {{ {} ; {} }}".format(t, col),
"to_nl": lambda t, col: "all ( {} ) of ( {} )".format(col, t),
'append': False}
APIs["all_str_eq"] = {"argument": ['row', 'header', 'str'], "output": "bool",
"function": lambda t, col, value: len(t) == len(fuzzy_match_filter(t, col, value)),
"tostr": lambda t, col, value: "all_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is ( {} )".format(col, t, value),
"append": None}
APIs["all_str_not_eq"] = {"argument": ['row', 'header', 'str'], "output": "bool",
"function": lambda t, col, value: 0 == len(fuzzy_match_filter(t, col, value)),
"to_nl": lambda t, col, value: "all_not_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is not ( {} )".format(col, t, value),
"append": None}
APIs["all_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) == len(fuzzy_compare_filter(t, col, value, type="eq")),
"tostr": lambda t, col, value: "all_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is about ( {} )".format(col, t, value),
"append": None}
APIs["all_not_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: 0 == len(fuzzy_compare_filter(t, col, value, type="eq")),
"tostr": lambda t, col, value: "all_not_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is not about ( {} )".format(col, t, value),
"append": None}
APIs["all_less"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) == len(fuzzy_compare_filter(t, col, value, type="less")),
"tostr": lambda t, col, value: "all_less {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is less than ( {} )".format(col, t, value),
"alias": less_dict,
"append": None}
APIs["all_less_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) == len(
fuzzy_compare_filter(t, col, value, type="less_eq")),
"tostr": lambda t, col, value: "all_less_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is less than or equal to ( {} )".format(col, t, value),
"alias": less_dict,
"append": None}
APIs["all_greater"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) == len(
fuzzy_compare_filter(t, col, value, type="greater")),
"tostr": lambda t, col, value: "all_greater {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is greater than ( {} )".format(col, t, value),
"alias": greater_dict,
"append": None}
APIs["all_greater_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) == len(
fuzzy_compare_filter(t, col, value, type="greater_eq")),
"tostr": lambda t, col, value: "all_greater_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "all ( {} ) of ( {} ) is greater or equal to ( {} )".format(col, t, value),
"alias": greater_dict,
"append": None}
APIs["most_str_eq"] = {"argument": ['row', 'header', 'str'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 <= len(fuzzy_match_filter(t, col, value)),
"tostr": lambda t, col, value: "most_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "most of ( {} ) of ( {} ) is ( {} )".format(col, t, value),
"alias": most_dict,
"append": None}
APIs["most_str_not_eq"] = {"argument": ['row', 'header', 'str'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 > len(fuzzy_match_filter(t, col, value)),
"tostr": lambda t, col, value: "most_not_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "most ( {} ) of ( {} ) is not ( {} )".format(col, t, value),
"alias": most_dict,
"append": None}
APIs["most_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 <= len(fuzzy_compare_filter(t, col, value, type="eq")),
"tostr": lambda t, col, value: "most_eq {{ {} ; {} ; {} }}".format(t, col, value),
"alias": most_dict,
"to_nl": lambda t, col, value: "most of ( {} ) of ( {} ) is ( {} )".format(col, t, value),
"append": None}
APIs["most_not_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 > len(
fuzzy_compare_filter(t, col, value, type="eq")),
"tostr": lambda t, col, value: "most_not_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "most of ( {} ) of ( {} ) is not ( {} )".format(col, t, value),
"alias": most_dict,
"append": None}
APIs["most_less"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 <= len(
fuzzy_compare_filter(t, col, value, type="less")),
"tostr": lambda t, col, value: "most_less {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "most of ( {} ) of ( {} ) is less than ( {} )".format(col, t, value),
"alias": less_dict + most_dict,
"append": None}
APIs["most_less_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 <= len(
fuzzy_compare_filter(t, col, value, type="less_eq")),
"tostr": lambda t, col, value: "most_less_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "most of ( {} ) of ( {} ) is less than or equal to ( {} )".format(col, t, value),
"alias": most_dict + less_dict,
"append": None}
APIs["most_greater"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 <= len(
fuzzy_compare_filter(t, col, value, type="greater")),
"tostr": lambda t, col, value: "most_greater {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "most of ( {} ) of ( {} ) is greater than ( {} )".format(col, t, value),
"alias": most_dict + greater_dict,
"append": None}
APIs["most_greater_eq"] = {"argument": ['row', 'header', 'obj'], "output": "bool",
"function": lambda t, col, value: len(t) // 3 <= len(
fuzzy_compare_filter(t, col, value, type="greater_eq")),
"tostr": lambda t, col, value: "most_greater_eq {{ {} ; {} ; {} }}".format(t, col, value),
"to_nl": lambda t, col, value: "most of ( {} ) of ( {} ) is greater than or equal to ( {} )".format(col, t, value),
"alias": most_dict + greater_dict,
"append": None}
def logic_matching(logic, nl, truth=None, forbid_content = False):
origin_logic = logic
logic = logic.replace('{', '|')
logic = logic.replace('}', '|').replace(';', '|')
logic = [x.strip() for x in logic.split('|') if len(x.strip())]
processed_logic = []
if 'not' in nl and 'not' not in logic: processed_logic.append('not')
# In this version, we only add "not" as the reversed judgement
for x in logic:
# white_list = ['eq', 'hop', 'count', 'and', 'nth', 'diff', 'filter_all']
# black_list = ['most_eq']
# if x in APIs.keys() and all(y not in x for y in black_list) and \
# any(y in x for y in white_list) : continue
found = 0
if x in APIs.keys():
if 'alias' in APIs[x].keys():
for regex in APIs[x]['alias']:
if re.search(regex, nl) is not None:
# nl = re.sub(regex, ' _ ', nl)
found = 1
break
else:
# if not in the list, found = 1
found = 1
# regex = x.split('_')
# regex = ['equal' if x == 'eq' else x for x in regex]
# for token in regex:
#
# if re.search(token, nl) is not None:
# # nl = re.sub(token, ' _ ', nl)
# found = 1
elif x.isdigit():
if digit_match(x, nl):
found = 1
elif truth is not None and not digit_match(x, truth):
found = 1
else:
found = 1
# if x in truth and x not in nl:
# found = 0
if found == 0:
processed_logic.append(x)
return processed_logic | null |
164,328 | import csv
from collections import defaultdict
import re
from .APIs import *
import nltk
from nltk.corpus import stopwords
from sklearn.metrics import classification_report, accuracy_score
def count_label(data):
count = defaultdict(int)
labels = [x[-1] for x in data]
for label in labels:
count[label] += 1
print(count) | null |
164,329 | import json
from tqdm import tqdm
from collections import defaultdict
from .TreeNode import *
class Node(object):
def __init__(self, full_table, dict_in):
def eval(self):
def to_nl(self):
def to_code(self):
def _mutate_dict(self, dict_in, alpha=0.5, beta=0.5, gamma=0.6, theta=0.15, omega=0.2):
def mutate(self, mutate_num_max=500, alpha=0.5, beta=0.5, gamma=0.6, theta=0.15, omega=0.2):
def load_node_from_json(json_in):
with open(json_in) as f:
data_in = json.load(f)
for data in tqdm(data_in):
nl = data['logic_str']
logic = data["logic"]
table_header = data["table_header"]
table_cont = data["table_cont"]
try:
pd_in = defaultdict(list)
for ind, header in enumerate(table_header):
for inr, row in enumerate(table_cont):
# remove last summarization row
if inr == len(table_cont) - 1 \
and ("all" in row[0] or "total" in row[0] or "sum" in row[0] or \
"a l l" in row[0] or "t o t a l" in row[0] or "s u m" in row[0]):
continue
pd_in[header].append(row[ind])
pd_table = pd.DataFrame(pd_in)
except Exception:
continue
root = Node(pd_table, logic)
yield root, nl | null |
164,334 | import numpy
import re
import math
import pandas as pd
import numpy as np
import datetime
class ExeError(object):
def __init__(self, message="exe error"):
def hop_op(t, col):
if len(t) == 0:
return ExeError()
return t[col].values[0] | null |
164,337 | import sys
import os
import json
import csv
import random
from tqdm import tqdm
from collections import defaultdict
import numpy as np
import pandas as pd
from logictools.TreeNode import *
The provided code snippet includes necessary dependencies for implementing the `execute_all` function. Write a Python function `def execute_all(json_in)` to solve the following problem:
execute all logic forms
Here is the function:
def execute_all(json_in):
'''
execute all logic forms
'''
nl_list = []
with open(json_in) as f:
data_in = json.load(f)
num_all = 0
num_correct = 0
for data in tqdm(data_in):
num_all += 1
logic = data["logic"]
table_header = data["table_header"]
table_cont = data["table_cont"]
try:
pd_in = defaultdict(list)
for ind, header in enumerate(table_header):
for inr, row in enumerate(table_cont):
# remove last summarization row
if inr == len(table_cont) - 1 \
and ("all" in row[0] or "total" in row[0] or "sum" in row[0] or \
"a l l" in row[0] or "t o t a l" in row[0] or "s u m" in row[0]):
continue
pd_in[header].append(row[ind])
pd_table = pd.DataFrame(pd_in)
except Exception:
continue
root = Node(pd_table, logic)
nl = root.to_nl()
nl_list.append(nl)
print("Question:", data['interpret'])
print("Code:", data['logic_str'])
print("Translated:", nl)
print('\n')
print("All: ", num_all)
return num_all, num_correct, nl_list | execute all logic forms |
164,345 | import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
import json
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizer,
set_seed,
)
from generator.models.relogic import RelogicModel
from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic
from generator.scorers.text_generation import TextGenerationScorer
from generator.trainer_refer import Generator_Trainer
from generator.training_args import Generator_TrainingArguments
from evaluator.models.adversarial_evaluator import AdversarialModel
from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial
from evaluator.scorers.adv_eval import EvalScorer
from evaluator.trainer import Evaluator_Trainer
from evaluator.training_args import Evaluator_TrainingArguments
class RelogicModel(nn.Module):
"""
output: tuple: (loss, ) in training
"""
def __init__(self, pretrain_config):
super().__init__()
self.bert = BartForConditionalGeneration.from_pretrained(pretrain_config)
self.tokenizer = BartTokenizer.from_pretrained(pretrain_config)
special_tokens_dict = {'additional_special_tokens': ['<SQL>', '<LOGIC>']}
self.tokenizer.add_special_tokens(special_tokens_dict)
self.bert.resize_token_embeddings(len(self.tokenizer))
def forward(self, *input, **kwargs):
input_ids = kwargs.pop("input_ids")
pad_token_id = kwargs.pop("pad_token_id")
attention_mask = (input_ids != pad_token_id).long()
num_return_sequences = 5
if self.training:
output_ids = kwargs.pop('labels')
y_ids = output_ids[:, :-1].contiguous()
lm_labels = output_ids[:, 1:].clone()
lm_labels[output_ids[:, 1:] == pad_token_id] = -100
outputs = self.bert(input_ids,
attention_mask=attention_mask, decoder_input_ids=y_ids, lm_labels=lm_labels, )
return (outputs[0],)
else:
reranker = kwargs.pop("reranker")
label_eos_id = kwargs.pop("label_eos_id")
label_bos_id = kwargs.pop("label_bos_id")
label_padding_id = kwargs.pop("label_padding_id")
if not reranker:
generated_ids = self.bert.generate(
input_ids=input_ids,
attention_mask=attention_mask,
num_beams=5,
max_length=60,
length_penalty=3.0,
early_stopping=True,
use_cache=True,
decoder_start_token_id=label_bos_id,
eos_token_id=label_eos_id,
pad_token_id=label_padding_id
)
output_ids = kwargs.pop('labels')
y_ids = output_ids[:, :-1].contiguous()
lm_labels = output_ids[:, 1:].clone()
lm_labels[output_ids[:, 1:] == pad_token_id] = -100
outputs = self.bert(input_ids,
attention_mask=attention_mask, decoder_input_ids=y_ids, lm_labels=lm_labels, )
return (outputs[0].detach(), generated_ids)
else:
generated_ids = self.bert.generate(
input_ids=input_ids,
attention_mask=attention_mask,
num_beams=5,
max_length=60,
length_penalty=4.0,
early_stopping=True,
use_cache=True,
decoder_start_token_id=label_bos_id,
eos_token_id=label_eos_id,
pad_token_id=label_padding_id,
num_return_sequences=num_return_sequences
)
reranker_inputs = []
for i in range(input_ids.shape[0]):
input_logic = self.tokenizer.decode(input_ids[i], skip_special_tokens=True)
for j in range(num_return_sequences):
pred_text = self.tokenizer.decode(generated_ids[num_return_sequences*i + j], skip_special_tokens=True)
reranker_input_token = [self.tokenizer.cls_token] + self.tokenizer.tokenize(input_logic) + [self.tokenizer.eos_token] + self.tokenizer.tokenize(pred_text) + [self.tokenizer.sep_token]
reranker_input_token_ids = self.tokenizer.convert_tokens_to_ids(reranker_input_token)
reranker_inputs.append(reranker_input_token_ids)
reranker_input_ids = pad_and_tensorize_sequence(reranker_inputs, padding_value=self.tokenizer.pad_token_id)
reranker_input_ids = reranker_input_ids.to(input_ids.device)
reranker_labels = torch.zeros(reranker_input_ids.shape[0],dtype=torch.long).to(input_ids.device)
rerank_inputs = {
"encoder_input_ids":reranker_input_ids,
"pad_token_id":pad_token_id,
"labels":reranker_labels,
}
rerank_result = reranker(**rerank_inputs)
seq_len = generated_ids.shape[-1]
rerank_score = rerank_result[1]
rerank_mat = rerank_score.view(-1, num_return_sequences)
batch_num = rerank_mat.shape[0]
max_rerank_id = rerank_mat.argmax(dim=1).view(batch_num, 1, 1).repeat(1, 1, seq_len)
generated_ids = generated_ids.view(-1, num_return_sequences, seq_len)
reranked_generated_ids = generated_ids.gather(1, max_rerank_id).squeeze()
loss = torch.FloatTensor(0).to(generated_ids.device).detach()
return (loss, reranked_generated_ids)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory: directory to which to save.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
# model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
class DataCollatorForRelogic:
"""
"""
tokenizer: PreTrainedTokenizer
def __post_init__(self):
self.label_bos_id = self.tokenizer.cls_token_id
self.label_eos_id = self.tokenizer.sep_token_id
def collate_batch(self, examples):
logics = [example["logic"] for example in examples]
original_logic = [example["original_logic"] for example in examples]
text_ids_sequences = [example["text_token_ids"] for example in examples]
logic_ids_sequences = [example["logic_token_ids"] for example in examples]
padded_text_ids_tensor = pad_and_tensorize_sequence(
text_ids_sequences, padding_value=self.tokenizer.pad_token_id)
padded_logic_ids_tensor = pad_and_tensorize_sequence(
logic_ids_sequences, padding_value=self.tokenizer.pad_token_id)
return {
"logics": logics,
"original_logic": original_logic,
"input_ids": padded_logic_ids_tensor,
"labels": padded_text_ids_tensor,
"pad_token_id": self.tokenizer.pad_token_id,
"label_eos_id": self.label_eos_id,
"label_bos_id": self.label_bos_id,
"label_padding_id": self.tokenizer.pad_token_id
}
class TextGenerationScorer:
def __init__(self, tokenizer, bos_id, eos_id, output_path):
self.bos_id = bos_id
self.eos_id = eos_id
self.output_path = output_path
self.tokenizer = tokenizer
def __call__(self, prediction, epoch = 0, snow_ball = False, mode_name='eval'):
epoch = 0 if not epoch else epoch
if snow_ball:
output_path = self.output_path + 'augmentation.json'
else:
output_dir = os.path.join(self.output_path, mode_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = output_dir + os.sep + 'epoch_{}'.format(int(epoch)) + '.json'
preds = prediction.predictions
preds_size = prediction.predictions_size
label_ids = prediction.label_ids
label_size = prediction.label_size
logics = prediction.logics
original_logic = prediction.original_logic
p_start, l_start = 0, 0
correct, total = 0, 0
ref = []
hyp = []
if is_rank_0():
fout = open(output_path, "w")
for idx, (p_size, l_size) in enumerate(zip(preds_size, label_size)):
p_end = p_start + p_size
l_end = l_start + l_size
pred = self.get_sequence(preds[p_start: p_end])
label = self.get_sequence(label_ids[l_start: l_end])
p_start = p_end
l_start = l_end
if pred == label:
correct += 1
total += 1
if is_rank_0():
pred_text = self.tokenizer.decode(pred, skip_special_tokens=True, clean_up_tokenization_spaces=True).strip()
label_text = self.tokenizer.decode(label, skip_special_tokens=True, clean_up_tokenization_spaces=True).strip()
ref.append(label_text)
hyp.append(pred_text)
if snow_ball:
fout.write(
json.dumps({
"idx": idx,
"mutated_logic": logics[idx],
"mutated_text": pred_text,
"original_logic":original_logic[idx],
"original_text": label_text}) + "\n")
else:
fout.write(
json.dumps({
"idx": idx,
"logic": logics[idx],
"pred": pred_text,
"label": label_text}) + "\n")
# score = list_bleu([ref], hyp, tmp_dir='tmp/tmp_bleu')
score = list_bleu([ref], hyp)
return {
"bleu": score,
"accuracy": correct / total,
"correct": correct,
"total": total
}
def get_sequence(self, seq):
processed_seq = []
for idx in seq:
if idx == self.bos_id:
continue
if idx == self.eos_id:
break
processed_seq.append(int(idx))
return processed_seq
class Generator_Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
"""
model: PreTrainedModel
args: Generator_TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None
global_step: Optional[int] = None
epoch: Optional[float] = None
def __init__(
self,
model: PreTrainedModel,
args: Generator_TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
tb_writer: Optional["SummaryWriter"] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,
model_name = "",
output_dump_dir = '',
reranker = None
):
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
Args:
prediction_loss_only:
(Optional) in evaluation and prediction, only return the loss
"""
self.model = model.to(args.device)
self.model_name = model_name
self.args = args
self.data_collator = data_collator if data_collator is not None else default_data_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizers = optimizers
self.output_dir = output_dump_dir
self.reranker = reranker
if tb_writer is not None:
self.tb_writer = tb_writer
elif is_tensorboard_available() and self.is_world_master():
self.tb_writer = SummaryWriter(log_dir=self.args.gen_logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
if is_wandb_available():
self._setup_wandb()
else:
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
set_seed(self.args.gen_seed)
# Create output directory if needed
if self.is_world_master():
os.makedirs(self.output_dir, exist_ok=True)
#set logger
LOG_FILE = os.path.join(self.output_dir, 'log.txt')
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setLevel(level=logging.DEBUG)
logger.addHandler(file_handler)
if is_torch_tpu_available():
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
self.data_collator = self.data_collator.collate_batch
warnings.warn(
(
"The `data_collator` should now be a simple callable (function, class with `__call__`), classes "
+ "with a `collate_batch` are deprecated and won't be supported in a future version."
),
FutureWarning,
)
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if is_torch_tpu_available():
train_sampler = get_tpu_sampler(self.train_dataset)
else:
train_sampler = (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
data_loader = DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.gen_dataloader_drop_last,
)
return data_loader
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(eval_dataset)
else:
sampler = SequentialSampler(eval_dataset)
data_loader = DataLoader(
eval_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.gen_dataloader_drop_last,
)
return data_loader
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
# We use the same batch_size as for eval.
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(test_dataset)
else:
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.gen_dataloader_drop_last,
)
return data_loader
def get_optimizers(
self, num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well.
If you want to use something else, you can pass a tuple in the Trainer's init,
or override this method in a subclass.
"""
if self.optimizers is not None:
return self.optimizers
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.gen_weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.gen_learning_rate, eps=self.args.gen_adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.gen_warmup_steps, num_training_steps=num_training_steps
)
return optimizer, scheduler
def _setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface
You can also override the following environment variables:
Environment:
WANDB_WATCH:
(Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging
or "all" to log gradients and parameters
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if self.is_world_master():
logger.info(
'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
)
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args))
# keep track of model topology and gradients, unsupported on TPU
if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
wandb.watch(
self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.gen_logging_steps)
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get num of examples from a DataLoader, by accessing its Dataset.
"""
return len(dataloader.dataset)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Args:
model_path:
(Optional) Local path to model if model to train has been instantiated from a local path
If present, we will try reloading the optimizer/scheduler states from there.
"""
train_dataloader = self.get_train_dataloader()
if self.args.gen_max_steps > 0:
t_total = self.args.gen_max_steps
num_train_epochs = (
self.args.gen_max_steps // (len(train_dataloader) // self.args.gen_gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gen_gradient_accumulation_steps * self.args.gen_num_train_epochs)
num_train_epochs = self.args.gen_num_train_epochs
optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)
# Check if saved optimizer or scheduler states exist
if (
model_path is not None
and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.gen_device)
)
scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
model = self.model
if self.args.gen_fp16:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.gen_fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=True,
)
# if self.tb_writer is not None:
# self.tb_writer.add_text("args", self.args.gen_to_json_string())
# self.tb_writer.add_hparams(self.args.gen_to_sanitized_dict(), metric_dict={})
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.gen_train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gen_gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_examples(train_dataloader))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.gen_per_device_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gen_gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
self.global_step = 0
self.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
self.global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = self.global_step // (len(train_dataloader) // self.args.gen_gradient_accumulation_steps)
steps_trained_in_current_epoch = self.global_step % (
len(train_dataloader) // self.args.gen_gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
self.global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=not self.is_local_master() or not self.args.gen_logging_tqdm
)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.gen_device]).per_device_loader(
self.args.gen_device
)
epoch_iterator = tqdm(parallel_loader, desc="Iteration", disable=not self.is_local_master() or not self.args.gen_logging_tqdm)
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=not self.is_local_master() or not self.args.gen_logging_tqdm)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
tr_loss += self._training_step(model, inputs, optimizer)
if (step + 1) % self.args.gen_gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.gen_gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if self.args.gen_fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.gen_max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.gen_max_grad_norm)
if is_torch_tpu_available():
xm.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
model.zero_grad()
self.global_step += 1
self.epoch = epoch + (step + 1) / len(epoch_iterator)
if (self.args.gen_logging_steps > 0 and self.global_step % self.args.gen_logging_steps == 0) or (
self.global_step == 1 and self.args.gen_logging_first_step
):
logs: Dict[str, float] = {}
logs["loss"] = (tr_loss - logging_loss) / self.args.gen_logging_steps
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else scheduler.get_lr()[0]
)
logging_loss = tr_loss
self._log(logs)
if (self.args.gen_eval_epochs > 0 and self.epoch % self.args.gen_eval_epochs == 0):
if self.args.gen_evaluate_during_training:
self.evaluate()
if self.args.gen_save_epochs > 0 and self.epoch % self.args.gen_save_epochs == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.model
else:
assert model is self.model
# Save model checkpoint
output_dir = os.path.join(self.output_dir, f"{PREFIX_CHECKPOINT_DIR}-epoch-{self.epoch}")
self.save_model(output_dir)
if self.is_world_master():
self._rotate_checkpoints()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
xm.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
elif self.is_world_master():
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if self.args.gen_max_steps > 0 and self.global_step > self.args.gen_max_steps:
epoch_iterator.close()
break
if self.args.gen_max_steps > 0 and self.global_step > self.args.gen_max_steps:
train_iterator.close()
break
if self.args.gen_tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
if self.tb_writer:
self.tb_writer.close()
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(self.global_step, tr_loss / self.global_step)
def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:
if self.epoch is not None:
logs["epoch"] = self.epoch
if self.global_step is None:
# when logging evaluation metrics without training
self.global_step = 0
if self.tb_writer:
for k, v in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, self.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
'"%s" of type %s for key "%s" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute.",
v,
type(v),
k,
)
self.tb_writer.flush()
if is_wandb_available():
if self.is_world_master():
wandb.log(logs, step=self.global_step)
output = {**logs, **{"step": self.global_step}}
if iterator is not None:
iterator.write(output)
else:
logger.info(output)
def _training_step(
self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer
) -> float:
model.train()
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gen_gradient_accumulation_steps > 1:
loss = loss / self.args.gen_gradient_accumulation_steps
if self.args.gen_fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def is_local_master(self) -> bool:
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_master(self) -> bool:
"""
This will be True only in one process, even in distributed mode,
even when training on multiple machines.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Saving best-practices: if you use default names for the model,
you can reload it using from_pretrained().
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_master():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
xm.rendezvous("saving_checkpoint")
self.model.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# if not isinstance(self.model, PreTrainedModel):
# raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.gen_save_total_limit is None or self.args.gen_save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.gen_save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.gen_save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False
) -> Dict[str, float]:
"""
Run evaluation and return metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent.
Args:
eval_dataset: (Optional) Pass a dataset if you wish to override
the one on the instance.
Returns:
A dict containing:
- the eval loss
- the potential metrics computed from the predictions
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation", prediction_loss_only = prediction_loss_only, snow_ball=snow_ball)
self._log(output.metrics)
if self.args.gen_tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
return output.metrics
def predict(self, test_dataset: Dataset, mode_name = 'test') -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
"""
test_dataloader = self.get_test_dataloader(test_dataset)
output = self._prediction_loop(test_dataloader, description="Prediction", mode_name=mode_name)
return output
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False, mode_name: Optional[str] = 'eval'
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
NOTE: One issue is on the size of prediction and labels.
For current code, it considers all the prediction and labels in different batch have same length of sequence.
This is not true for our application. To make this more general, I will reformat the predictions and labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running {}: {} *****".format(description, mode_name))
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
logics : List[str] = []
original_logic: List[str] = []
probabilities: List[float] = []
preds: torch.Tensor = None
preds_size: torch.Tensor = None
label_ids: torch.Tensor = None
label_size: torch.Tensor = None
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.gen_device]).per_device_loader(self.args.gen_device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
inputs['reranker'] = self.reranker
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
logics += inputs["logics"]
original_logic += inputs["original_logic"]
else:
logits = outputs[0]
if len(logits.shape) == 1:
logits = logits.view(1,-1)
if not prediction_loss_only:
# Change the way of concat
# We need to make sure that the size of preds and labels is (batch_size, sequence_length)
if preds is None:
preds = logits.detach()
preds_size = preds.new_full(size=preds.size()[:1], fill_value=preds.size(1)).detach()
preds = preds.view(-1)
else:
preds_size = torch.cat((preds_size, logits.new_full(size=logits.size()[:1], fill_value=logits.size(1)).detach()), dim=0)
preds = torch.cat((preds, logits.detach().view(-1)), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
label_size = label_ids.new_full(size=label_ids.size()[:1], fill_value=label_ids.size(1)).detach()
label_ids = label_ids.view(-1)
else:
label_size = torch.cat((label_size, inputs["labels"].new_full(size=inputs["labels"].size()[:1], fill_value=inputs["labels"].size(1)).detach()), dim=0)
label_ids = torch.cat((label_ids, inputs["labels"].detach().view(-1)), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
# preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
preds, preds_size = self.distributed_concat_with_size(preds, preds_size, num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
# label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
label_ids, label_size = self.distributed_concat_with_size(label_ids, label_size, num_total_examples=self.num_examples(dataloader))
elif is_torch_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
# NOTE: We do not modify this for now.
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
preds_size = preds_size.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
label_size = label_size.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPredictionWithSize(predictions=preds, predictions_size=preds_size, label_ids=label_ids, label_size=label_size , logics=logics, original_logic=original_logic), epoch = self.epoch, snow_ball=snow_ball, mode_name=mode_name)
else:
metrics = {}
if len(eval_losses) > 0:
metrics["eval_loss"] = np.mean(eval_losses)
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
logger.info(" Batch size = %s", preds)
# return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
return PredictionOutputWithSize(predictions=preds, predictions_size=preds_size, label_ids=label_ids, label_size=label_size, metrics=metrics)
def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
output = concat[:num_total_examples]
return output
def distributed_concat_tensor(self, tensor: torch.Tensor):
assert self.args.local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
return concat
def distributed_concat_varsize_tensor(self, tensor: torch.Tensor):
assert self.args.local_rank != -1
sizes = self.distributed_concat_tensor(tensor.new_full(size=(1,), fill_value=tensor.size(0)))
max_size = sizes.max().item()
padded = tensor.new_zeros(max_size)
padded[:tensor.size(0)] = tensor
padded_agg = self.distributed_concat_tensor(padded)
slices = []
for i, size in enumerate(sizes):
start_idx = i * max_size
end_idx = start_idx + size.item()
slices.append(padded_agg[start_idx: end_idx])
ret = torch.cat(slices, dim=0)
return ret
def distributed_concat_with_size(self, tensor: torch.Tensor, size: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.local_rank != -1
# output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
# output_sizes = [size.clone() for _ in range(torch.distributed.get_world_size())]
# torch.distributed.all_gather(output_tensors, tensor)
# torch.distributed.all_gather(output_sizes, size)
# concat = torch.cat(output_tensors, dim=0)
# concat_sizes = torch.cat(output_sizes, dim=0)
concat_sizes = self.distributed_concat_tensor(size)
concat = self.distributed_concat_varsize_tensor(tensor)
output_sizes = concat_sizes[:num_total_examples]
assert output_sizes.sum() == concat.size(0)
return concat, output_sizes
def train_generator(config_name, data_path, preprocess_path, tokenizer, data_args,training_args, model_args, output_dump_dir, eval_dataset, test_dataset, outdomain_test_dataset, snow_ball=False, generator = None, reranker = None, augmented=False, snowball_iteration = 0, total_snowball_iteration = 1, multi_task=False):
#logger.info("Start trainning generator on dataset: {}".format(data_path))
if not generator or model_args.refresh_model:
generator = RelogicModel(config_name)
if training_args.gen_wo_gen_rerank:
reranker = None
# train_dataset = RelogicDataset(tokenizer=tokenizer,
# file_path=data_path,
# preprocess_path=preprocess_path,
# block_size=data_args.block_size,
# translated_logic=data_args.translated_logic,
# augmented=augmented,
# snowball_iteration = snowball_iteration,
# total_snowball_iteration = total_snowball_iteration,
# multi_task=multi_task)
train_dataset = None
data_collator = DataCollatorForRelogic(tokenizer=tokenizer)
label_bos_id = data_collator.label_bos_id
label_eos_id = data_collator.label_eos_id
scorer = TextGenerationScorer(bos_id=label_bos_id, eos_id=label_eos_id, tokenizer=tokenizer, output_path=output_dump_dir)
# Initialize our Trainer
trainer = Generator_Trainer(
model=generator,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=scorer,
prediction_loss_only = False,
model_name=training_args.gen_model,
output_dump_dir = output_dump_dir,
reranker = reranker
)
# Training
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
# trainer.train(model_path=model_path)
# trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(output_dump_dir)
if training_args.gen_do_test:
output_test = trainer.predict(test_dataset=test_dataset,
mode_name='test').predictions
if training_args.gen_do_out_domain_test:
output_out_test = trainer.predict(test_dataset=outdomain_test_dataset,
mode_name='out_domain_test').predictions
with open('generator/test.json','w') as f:
json.dump(output_test.tolist(),f)
with open('generator/out_test.json','w') as f:
json.dump(output_out_test.tolist(),f)
torch.cuda.empty_cache()
return generator | null |
164,346 | import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
import json
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizer,
set_seed,
)
from generator.models.relogic import RelogicModel
from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic
from generator.scorers.text_generation import TextGenerationScorer
from generator.trainer_refer import Generator_Trainer
from generator.training_args import Generator_TrainingArguments
from evaluator.models.adversarial_evaluator import AdversarialModel
from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial
from evaluator.scorers.adv_eval import EvalScorer
from evaluator.trainer import Evaluator_Trainer
from evaluator.training_args import Evaluator_TrainingArguments
class AdversarialModel(nn.Module):
"""
output: tuple: (loss, ) in training
"""
def __init__(self, config_name):
super().__init__()
self.bart = BartForSequenceClassification.from_pretrained(config_name)
self.tokenizer = BartTokenizer.from_pretrained(config_name)
special_tokens_dict = {'additional_special_tokens': ['<SQL>', '<LOGIC>']}
self.tokenizer.add_special_tokens(special_tokens_dict)
self.bart.resize_token_embeddings(len(self.tokenizer))
self.prelu = nn.PReLU()
self.fc = nn.Linear(3, 1)
self.sigmoid = nn.Sigmoid()
self.loss = nn.BCELoss()
def label_smoothing(self, labels, epsilon=0.1):
K = 2 # number of channels
return ((1-epsilon) * labels) + (epsilon / K)
def forward(self, *input, **kwargs):
encoder_inputs = kwargs.pop("encoder_input_ids").contiguous()
labels = kwargs.pop('labels').unsqueeze(1)
pad_token_id = kwargs.pop("pad_token_id")
attention_mask = (encoder_inputs != pad_token_id).long()
outputs = self.bart(encoder_inputs,
attention_mask=attention_mask)
# for i in range(encoder_inputs.shape[0]):
# print("Input", self.tokenizer.decode(encoder_inputs[i], skip_special_tokens=True))
#3 logits -> 1 score
#print("outputs", outputs)
# score = self.prelu(outputs[0])
# score = self.fc(score)
# score = self.sigmoid(score)
score = torch.sum(outputs[0], 1).view(-1, 1)
score = self.sigmoid(score)
labels = labels.float()
# print("score", score.view(1, -1))
# print("labels", labels.view(1, -1))
labels = self.label_smoothing(labels)
loss = self.loss(score, labels)
if self.training:
return (loss, score)
else:
return (loss.detach(), score)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory: directory to which to save.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
# model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
class AdversarialDataset(Dataset):
"""
Dataset for training task: SQL (+ schema) -> text
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path, block_size, translated_logic=False, local_rank=-1,evaluate = False, multi_task=False):
assert os.path.isfile(file_path)
logger.info("Creating features from dataset file at {}".format(file_path))
replacee = 'spider'
replacer = 'logic2text'
replaceetoken = ['<SQL>']
replacertoken = ['<LOGIC>']
if replacee not in file_path:
replacee = 'logic2text'
replacer = 'spider'
replaceetoken = ['<LOGIC>']
replacertoken = ['<SQL>']
if multi_task:
print('replacee: {}, replacer: {}'.format(replacee, replacer))
raw_file = open(file_path, encoding='utf-8')
self.preprocess_data(translated_logic, tokenizer, raw_file, evaluate, replaceetoken)
if multi_task and os.path.exists(file_path.replace(replacee, replacer)):
raw_file = open(file_path.replace(replacee, replacer), encoding='utf-8')
self.preprocess_data(translated_logic, tokenizer, raw_file, evaluate, replacertoken)
def preprocess_data(self, translated_logic, tokenizer, raw_file, evaluate, datastart):
logic_key = 'sql'
text_key = 'question'
if translated_logic:
logic_key = 'translated_sql'
self.examples = []
invali_idx = []
add_prefix_space = isinstance(tokenizer, BartTokenizer) or isinstance(tokenizer, RobertaTokenizer)
for idx, line in tqdm(enumerate(raw_file)):
example = json.loads(line)
if evaluate:
logic = example[logic_key]
text = example[text_key]
remark = example['remark']
label = int(example['label'])
sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(logic,
add_prefix_space=add_prefix_space) + [
tokenizer.eos_token] + tokenizer.tokenize(text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
sql_question_token_ids = tokenizer.convert_tokens_to_ids(sql_question_token)
self.examples.append({
"sql_question_token_ids": sql_question_token_ids,
"label": label,
"logic": logic,
"text": text,
"remark": remark})
else:
mutated_logic = example["mutated_logic"]
mutated_text = example['mutated_text']
original_logic = example['original_logic']
original_text = example['original_text']
# negative down-sampling
if random.random() < 0.3:
neg_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(mutated_logic,
add_prefix_space=add_prefix_space) + [
tokenizer.eos_token] + tokenizer.tokenize(original_text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
neg_sql_question_token_ids = tokenizer.convert_tokens_to_ids(neg_sql_question_token)
self.examples.append({
"sql_question_token_ids": neg_sql_question_token_ids,
"label": 0,
"logic": mutated_logic,
"text": original_text,
"remark": 'negative'})
else:
neg_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(original_logic,
add_prefix_space=add_prefix_space) + [
tokenizer.eos_token] + tokenizer.tokenize(mutated_text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
neg_sql_question_token_ids = tokenizer.convert_tokens_to_ids(neg_sql_question_token)
self.examples.append({
"sql_question_token_ids": neg_sql_question_token_ids,
"label": 0,
"logic": original_logic,
"text": mutated_text,
"remark": 'negative'})
# postive up-sampling
if random.random() < 0.8:
pos_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(original_logic,
add_prefix_space=add_prefix_space) + [
tokenizer.eos_token] + tokenizer.tokenize(original_text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
pos_sql_question_token_ids = tokenizer.convert_tokens_to_ids(pos_sql_question_token)
self.examples.append({
"sql_question_token_ids": pos_sql_question_token_ids,
"label": 1,
"logic": original_logic,
"text": original_text,
"remark": 'positive'})
else:
pos_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(mutated_logic,
add_prefix_space=add_prefix_space) + [
tokenizer.eos_token] + tokenizer.tokenize(mutated_text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
pos_sql_question_token_ids = tokenizer.convert_tokens_to_ids(pos_sql_question_token)
self.examples.append({
"sql_question_token_ids": pos_sql_question_token_ids,
"label": 1,
"logic": mutated_logic,
"text": mutated_text,
"remark": 'positive'})
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
example = self.examples[i]
return example
class DataCollatorForAdversarial:
"""
"""
tokenizer: PreTrainedTokenizer
def __post_init__(self):
self.label_bos_id = self.tokenizer.cls_token_id
self.label_eos_id = self.tokenizer.sep_token_id
def collate_batch(self, examples):
sql_question_ids_sequences = [example["sql_question_token_ids"] for example in examples]
logics = [example["logic"] for example in examples]
texts = [example["text"] for example in examples]
labels = [example["label"] for example in examples]
remarks = [example["remark"] for example in examples]
padded_sql_question_ids_tensor = pad_and_tensorize_sequence(
sql_question_ids_sequences, padding_value=self.tokenizer.pad_token_id)
try:
label_tensor = pad_and_tensorize_sequence(
labels, tensorize = True, padding_value=self.tokenizer.pad_token_id)
except:
print(labels)
return {
"encoder_input_ids": padded_sql_question_ids_tensor,
"labels": label_tensor,
"logics": logics,
"texts": texts,
"remarks": remarks,
"pad_token_id": self.tokenizer.pad_token_id,
"label_eos_id": self.label_eos_id,
"label_bos_id": self.label_bos_id,
"label_padding_id": self.tokenizer.pad_token_id
}
class EvalScorer:
def __init__(self, tokenizer, bos_id, eos_id, output_path):
self.output_path = output_path
#compute the Precision, Recall, F1 and AUC
def compute_score(self, scores, labels,threshold=0.5):
tp, fp, tn, fn = 0.0, 0.0, 0.0, 0.0
accuracy, precision, recall, f1, auc_score = 0.0, 0.0, 0.0, 0.0, 0.0
scores = scores.cpu()
labels = labels.cpu()
predictions = scores > threshold
predictions = predictions.long()
accuracy = accuracy_score(labels, predictions)
precision = precision_score(labels, predictions)
recall = recall_score(labels, predictions)
f1 = f1_score(labels, predictions)
fpr, tpr, thresholds = roc_curve(labels, scores)
auc_score = auc(fpr, tpr)
return accuracy, precision, recall, f1, auc_score
def __call__(self, prediction, epoch = 0, dump_output=True, mode_name='eval'):
output_dir = os.path.join(self.output_path, mode_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = output_dir + os.sep + 'epoch_{}'.format(int(epoch)) + '.json'
acc, p, r, f1, auc = self.compute_score(prediction["pred_scores"], prediction["pred_labels"])
if dump_output:
if is_rank_0():
fout = open(output_path, "w")
for idx, (logic, text, remark, label, score) in enumerate(zip(prediction["logics"], prediction["texts"], prediction["remarks"], prediction["pred_labels"], prediction["pred_scores"])):
if is_rank_0():
fout.write(
json.dumps({
"logic":logic,
"text":text,
"remark":remark,
"label":str(label.item()),
"score":str(score.item())
}) + '\n'
)
return {
"eval_accuracy":acc,
"eval_precision":p,
"eval_recall" :r,
"eval_F1" : f1,
"eval_AUC":auc
}
class Evaluator_Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
"""
model: PreTrainedModel
args: Evaluator_TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None
global_step: Optional[int] = None
epoch: Optional[float] = None
def __init__(
self,
model: PreTrainedModel,
args: Evaluator_TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
tb_writer: Optional["SummaryWriter"] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,
model_name = "",
output_dump_dir = ''
):
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
Args:
prediction_loss_only:
(Optional) in evaluation and prediction, only return the loss
"""
self.model = model.to(args.device)
self.model_name = model_name
self.args = args
self.data_collator = data_collator if data_collator is not None else default_data_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizers = optimizers
self.output_dir = output_dump_dir
self.auc_score = 0
self.early_stopping = EarlyStopping(patience=7, output_dir=output_dump_dir)
if tb_writer is not None:
self.tb_writer = tb_writer
elif is_tensorboard_available() and self.is_world_master():
self.tb_writer = SummaryWriter(log_dir=self.args.eval_logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
if is_wandb_available():
self._setup_wandb()
else:
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
set_seed(self.args.eval_seed)
# Create output directory if needed
if self.is_world_master():
os.makedirs(self.output_dir, exist_ok=True)
#set logger
LOG_FILE = os.path.join(self.output_dir, 'log.txt')
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setLevel(level=logging.DEBUG)
logger.addHandler(file_handler)
if is_torch_tpu_available():
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
self.data_collator = self.data_collator.collate_batch
warnings.warn(
(
"The `data_collator` should now be a simple callable (function, class with `__call__`), classes "
+ "with a `collate_batch` are deprecated and won't be supported in a future version."
),
FutureWarning,
)
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if is_torch_tpu_available():
train_sampler = get_tpu_sampler(self.train_dataset)
else:
train_sampler = (
RandomSampler(self.train_dataset)
if self.args.eval_local_rank == -1
else DistributedSampler(self.train_dataset)
)
data_loader = DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.eval_dataloader_drop_last,
)
return data_loader
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.eval_local_rank != -1:
sampler = SequentialDistributedSampler(eval_dataset)
else:
sampler = SequentialSampler(eval_dataset)
data_loader = DataLoader(
eval_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.eval_dataloader_drop_last,
)
return data_loader
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
# We use the same batch_size as for eval.
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.eval_local_rank != -1:
sampler = SequentialDistributedSampler(test_dataset)
else:
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.eval_dataloader_drop_last,
)
return data_loader
def get_optimizers(
self, num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well.
If you want to use something else, you can pass a tuple in the Trainer's init,
or override this method in a subclass.
"""
if self.optimizers is not None:
return self.optimizers
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.eval_weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.eval_learning_rate, eps=self.args.eval_adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.eval_warmup_steps, num_training_steps=num_training_steps
)
return optimizer, scheduler
def _setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface
You can also override the following environment variables:
Environment:
WANDB_WATCH:
(Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging
or "all" to log gradients and parameters
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if self.is_world_master():
logger.info(
'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
)
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args))
# keep track of model topology and gradients, unsupported on TPU
if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
wandb.watch(
self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.eval_logging_steps)
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get num of examples from a DataLoader, by accessing its Dataset.
"""
return len(dataloader.dataset)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Args:
model_path:
(Optional) Local path to model if model to train has been instantiated from a local path
If present, we will try reloading the optimizer/scheduler states from there.
"""
train_dataloader = self.get_train_dataloader()
if self.args.eval_max_steps > 0:
t_total = self.args.eval_max_steps
num_train_epochs = (
self.args.eval_max_steps // (len(train_dataloader) // self.args.eval_gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.eval_gradient_accumulation_steps * self.args.eval_num_train_epochs)
num_train_epochs = self.args.eval_num_train_epochs
optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)
# Check if saved optimizer or scheduler states exist
if (
model_path is not None
and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.eval_device)
)
scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
model = self.model
if self.args.eval_fp16:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.eval_fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.args.eval_local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.eval_local_rank],
output_device=self.args.eval_local_rank,
find_unused_parameters=True,
)
if self.tb_writer is not None:
self.tb_writer.add_text("args", self.args.eval_to_json_string())
self.tb_writer.add_hparams(self.args.eval_to_sanitized_dict(), metric_dict={})
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.eval_train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.eval_gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.eval_local_rank != -1 else 1)
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_examples(train_dataloader))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.eval_per_device_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.eval_gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
self.global_step = 0
self.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
self.global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = self.global_step // (len(train_dataloader) // self.args.eval_gradient_accumulation_steps)
steps_trained_in_current_epoch = self.global_step % (
len(train_dataloader) // self.args.eval_gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
self.global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=not self.is_local_master() or not self.args.eval_logging_tqdm
)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.eval_device]).per_device_loader(
self.args.eval_device
)
epoch_iterator = tqdm(parallel_loader, desc="Iteration", disable=not self.is_local_master() or not self.args.eval_logging_tqdm)
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=not self.is_local_master() or not self.args.eval_logging_tqdm)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
tr_loss += self._training_step(model, inputs, optimizer)
if (step + 1) % self.args.eval_gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.eval_gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if self.args.eval_fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.eval_max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.eval_max_grad_norm)
if is_torch_tpu_available():
xm.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
model.zero_grad()
self.global_step += 1
self.epoch = epoch + (step + 1) / len(epoch_iterator)
if (self.args.eval_logging_steps > 0 and self.global_step % self.args.eval_logging_steps == 0) or (
self.global_step == 1 and self.args.eval_logging_first_step
):
logs: Dict[str, float] = {}
logs["loss"] = (tr_loss - logging_loss) / self.args.eval_logging_steps
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else scheduler.get_lr()[0]
)
logging_loss = tr_loss
self._log(logs)
if (self.args.eval_eval_epochs > 0 and self.epoch % self.args.eval_eval_epochs == 0):
if self.args.eval_evaluate_during_training:
self.evaluate()
if self.global_step % 500 == 0:
if self.args.eval_evaluate_during_training:
self.evaluate(dump_output=True)
self.early_stopping(self.auc_score, self.model)
# early stopping
if self.early_stopping.early_stop:
logger.info("Early stopping with the AUC: {}".format(self.auc_score))
break
if self.args.eval_save_epochs > 0 and self.epoch % self.args.eval_save_epochs == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.model
else:
assert model is self.model
# Save model checkpoint
output_dir = os.path.join(self.output_dir, f"{PREFIX_CHECKPOINT_DIR}-epoch-{self.epoch}")
self.save_model(output_dir)
if self.is_world_master():
self._rotate_checkpoints()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
xm.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
elif self.is_world_master():
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if self.args.eval_max_steps > 0 and self.global_step > self.args.eval_max_steps:
epoch_iterator.close()
break
#load model from early stopping checkpoint
self.model.load_state_dict(torch.load(os.path.join(self.output_dir, 'checkpoint.pt')))
if self.args.eval_max_steps > 0 and self.global_step > self.args.eval_max_steps:
train_iterator.close()
break
if self.args.eval_tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
if self.tb_writer:
self.tb_writer.close()
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(self.global_step, tr_loss / self.global_step)
def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:
if self.epoch is not None:
logs["epoch"] = self.epoch
if self.global_step is None:
# when logging evaluation metrics without training
self.global_step = 0
if self.tb_writer:
for k, v in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, self.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
'"%s" of type %s for key "%s" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute.",
v,
type(v),
k,
)
self.tb_writer.flush()
if is_wandb_available():
if self.is_world_master():
wandb.log(logs, step=self.global_step)
output = {**logs, **{"step": self.global_step}}
if iterator is not None:
iterator.write(output)
else:
logger.info(output)
def _training_step(
self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer
) -> float:
model.train()
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
#print("Loss", loss)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.eval_gradient_accumulation_steps > 1:
loss = loss / self.args.eval_gradient_accumulation_steps
if self.args.eval_fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def is_local_master(self) -> bool:
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.eval_local_rank in [-1, 0]
def is_world_master(self) -> bool:
"""
This will be True only in one process, even in distributed mode,
even when training on multiple machines.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.eval_local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Saving best-practices: if you use default names for the model,
you can reload it using from_pretrained().
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_master():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
xm.rendezvous("saving_checkpoint")
self.model.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# if not isinstance(self.model, PreTrainedModel):
# raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.eval_save_total_limit is None or self.args.eval_save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.eval_save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.eval_save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None, dump_output:bool = True
) -> Dict[str, float]:
"""
Run evaluation and return metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent.
Args:
eval_dataset: (Optional) Pass a dataset if you wish to override
the one on the instance.
Returns:
A dict containing:
- the eval loss
- the potential metrics computed from the predictions
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation", prediction_loss_only = prediction_loss_only, dump_output=dump_output)
self._log(output.metrics)
if self.args.eval_tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
return output.metrics
def predict(self, test_dataset: Dataset, mode_name='test') -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
"""
test_dataloader = self.get_test_dataloader(test_dataset)
output = self._prediction_loop(test_dataloader, description="Prediction", mode_name=mode_name)
self._log(output.metrics)
return output.metrics
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, dump_output:bool = True, mode_name: str ='pred'
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
NOTE: One issue is on the size of prediction and labels.
For current code, it considers all the prediction and labels in different batch have same length of sequence.
This is not true for our application. To make this more evaleral, I will reformat the predictions and labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running {}: {} *****".format(description, mode_name))
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
logics : List[str] = []
pred_scores: torch.Tensor = None
pred_labels: torch.Tensor = None
texts : List[str] = []
remarks : List[str] = []
probabilities: List[float] = []
preds: torch.Tensor = None
preds_size: torch.Tensor = None
label_ids: torch.Tensor = None
label_size: torch.Tensor = None
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.eval_device]).per_device_loader(self.args.eval_device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
logics += inputs["logics"]
texts += inputs["texts"]
remarks += inputs["remarks"]
if pred_labels is None:
pred_labels= inputs["labels"].detach().view(-1)
pred_scores = logits.detach().view(-1)
else:
pred_labels = torch.cat((pred_labels, inputs["labels"].detach().view(-1)), dim=0)
pred_scores = torch.cat((pred_scores, logits.detach().view(-1)), dim=0)
if self.args.eval_local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
# preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
preds, preds_size = self.distributed_concat_with_size(preds, preds_size, num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
# label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
label_ids, label_size = self.distributed_concat_with_size(label_ids, label_size, num_total_examples=self.num_examples(dataloader))
elif is_torch_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
# NOTE: We do not modify this for now.
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
preds_size = preds_size.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
label_size = label_size.cpu().numpy()
#print(self.compute_metrics, preds , label_ids)
if self.compute_metrics is not None:
eval_predictions ={
"pred_scores": pred_scores,
"pred_labels": pred_labels,
"logics" : logics,
"texts" : texts,
"remarks" : remarks
}
metrics = self.compute_metrics(eval_predictions, epoch = self.epoch, dump_output=dump_output, mode_name=mode_name)
self.auc_score = float(metrics['eval_AUC'])
else:
metrics = {}
if len(eval_losses) > 0:
metrics["eval_loss"] = np.mean(eval_losses)
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
# return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
return PredictionOutputWithSize(predictions=preds, predictions_size=preds_size, label_ids=label_ids, label_size=label_size, metrics=metrics)
def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.eval_local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
output = concat[:num_total_examples]
return output
def distributed_concat_tensor(self, tensor: torch.Tensor):
assert self.args.eval_local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
return concat
def distributed_concat_varsize_tensor(self, tensor: torch.Tensor):
assert self.args.eval_local_rank != -1
sizes = self.distributed_concat_tensor(tensor.new_full(size=(1,), fill_value=tensor.size(0)))
max_size = sizes.max().item()
padded = tensor.new_zeros(max_size)
padded[:tensor.size(0)] = tensor
padded_agg = self.distributed_concat_tensor(padded)
slices = []
for i, size in enumerate(sizes):
start_idx = i * max_size
end_idx = start_idx + size.item()
slices.append(padded_agg[start_idx: end_idx])
ret = torch.cat(slices, dim=0)
return ret
def distributed_concat_with_size(self, tensor: torch.Tensor, size: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.eval_local_rank != -1
# output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
# output_sizes = [size.clone() for _ in range(torch.distributed.get_world_size())]
# torch.distributed.all_gather(output_tensors, tensor)
# torch.distributed.all_gather(output_sizes, size)
# concat = torch.cat(output_tensors, dim=0)
# concat_sizes = torch.cat(output_sizes, dim=0)
concat_sizes = self.distributed_concat_tensor(size)
concat = self.distributed_concat_varsize_tensor(tensor)
output_sizes = concat_sizes[:num_total_examples]
assert output_sizes.sum() == concat.size(0)
return concat, output_sizes
def train_evaluator(config_name, data_path, tokenizer, data_args,training_args, model_args, output_dump_dir, eval_dataset, test_dataset, snow_ball=True, evaluator = None, multi_task=False):
#logger.info("Start trainning generator on dataset: {}".format(data_path))
if not evaluator or model_args.refresh_model:
evaluator = AdversarialModel(config_name)
train_dataset = AdversarialDataset(tokenizer=tokenizer,
file_path=data_path,
block_size=data_args.block_size,
translated_logic=data_args.translated_logic,
evaluate = False, multi_task=multi_task)
data_collator = DataCollatorForAdversarial(tokenizer=tokenizer)
label_bos_id = data_collator.label_bos_id
label_eos_id = data_collator.label_eos_id
scorer = EvalScorer(bos_id=label_bos_id, eos_id=label_eos_id, tokenizer=tokenizer, output_path=output_dump_dir)
# Initialize our Trainer
trainer = Evaluator_Trainer(
model=evaluator,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=scorer,
prediction_loss_only = False,
model_name=training_args.eval_model,
output_dump_dir = output_dump_dir
)
# Training
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(output_dump_dir)
if training_args.eval_do_test:
trainer.predict(test_dataset=test_dataset,
mode_name='test')
torch.cuda.empty_cache()
return evaluator | null |
164,347 | import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
import json
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizer,
set_seed,
)
from generator.models.relogic import RelogicModel
from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic
from generator.scorers.text_generation import TextGenerationScorer
from generator.trainer_refer import Generator_Trainer
from generator.training_args import Generator_TrainingArguments
from evaluator.models.adversarial_evaluator import AdversarialModel
from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial
from evaluator.scorers.adv_eval import EvalScorer
from evaluator.trainer import Evaluator_Trainer
from evaluator.training_args import Evaluator_TrainingArguments
class RelogicDataset(Dataset):
"""
Dataset for training task: logic (+ schema) -> text
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path, block_size, local_rank=-1,translated_logic=False, snow_ball=False, preprocess_path = None,
mutation_data_path = None, aug_sample_num = 5, augmented=False, snowball_iteration = 0, total_snowball_iteration = 1, multi_task = False):
assert os.path.isfile(file_path)
logger.info("Creating features from dataset file at {}".format(file_path))
add_prefix_space = isinstance(tokenizer, BartTokenizer) or isinstance(tokenizer, RobertaTokenizer)
self.examples = []
invalid_idx = []
logic_key = 'query'
text_key = 'question'
total = 0
replacee = 'spider'
replacer = 'logic2text'
replaceetoken = ['<SQL>']
replacertoken = ['<LOGIC>']
if replacee not in file_path:
replacee = 'logic2text'
replacer = 'spider'
replaceetoken = ['<LOGIC>']
replacertoken = ['<SQL>']
if multi_task:
print('replacee: {}, replacer: {}'.format(replacee, replacer))
raw_file = open(file_path, encoding='utf-8')
self.process_data(augmented, snowball_iteration, total_snowball_iteration, aug_sample_num, raw_file,
tokenizer,
add_prefix_space,
translated_logic, preprocess_path, snow_ball, mutation_data_path, logic_key, text_key,
invalid_idx, replaceetoken)
if multi_task and os.path.exists(file_path.replace(replacee, replacer)):
raw_file = open(file_path.replace(replacee, replacer), encoding='utf-8')
if translated_logic and preprocess_path:
preprocess_path = preprocess_path.replace(replacee, replacer)
if snow_ball and mutation_data_path:
mutation_data_path = mutation_data_path.replace(replacee, replacer)
self.process_data(augmented, snowball_iteration, total_snowball_iteration, aug_sample_num, raw_file, tokenizer,
add_prefix_space,
translated_logic, preprocess_path, snow_ball,
mutation_data_path, logic_key, text_key, invalid_idx, replacertoken)
logger.info('Invalid examples ids: {}'.format(invalid_idx))
def process_data(self, augmented, snowball_iteration, total_snowball_iteration, aug_sample_num, raw_file, tokenizer, add_prefix_space,
translated_logic, preprocess_path, snow_ball, mutation_data_path, logic_key, text_key, invalid_idx, datastart):
if augmented:
logic_dict = {}
max_sample_num = int((float(snowball_iteration) / float(total_snowball_iteration)) * aug_sample_num)
logger.info("Max sampling number for generator augmentation is {}".format(max_sample_num))
for line in raw_file:
example = json.loads(line)
mutated_logic = example["mutated_logic"]
mutated_text = example["mutated_text"]
original_logic = example["original_logic"]
original_text = example["original_text"]
if original_logic not in logic_dict:
logic_dict[original_logic] = 0
original_text_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(original_text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
original_logic_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(original_logic,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
original_text_token_ids = tokenizer.convert_tokens_to_ids(original_text_tokens)
original_logic_token_ids = tokenizer.convert_tokens_to_ids(original_logic_tokens)
self.examples.append({
"logic": mutated_logic,
"original_logic": original_logic,
"text_token_ids": original_text_token_ids,
"logic_token_ids": original_logic_token_ids})
if logic_dict[original_logic] <= max_sample_num:
logic_dict[original_logic] += 1
mutated_text_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(mutated_text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
mutated_logic_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(mutated_logic,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
mutated_text_token_ids = tokenizer.convert_tokens_to_ids(mutated_text_tokens)
mutated_logic_token_ids = tokenizer.convert_tokens_to_ids(mutated_logic_tokens)
self.examples.append({
"logic": mutated_logic,
"original_logic": original_logic,
"text_token_ids": mutated_text_token_ids,
"logic_token_ids": mutated_logic_token_ids})
else:
raw_examples = json.load(raw_file)
if translated_logic and preprocess_path:
preprocess_file = open(preprocess_path, encoding='utf-8')
preprocess_mapping = json.load(preprocess_file)
if snow_ball and mutation_data_path:
mutation_file = open(mutation_data_path, encoding='utf-8')
mutation_mapping = json.load(mutation_file)
for idx, example in tqdm(enumerate(raw_examples)):
logic = example[logic_key]
text = example[text_key]
if snow_ball:
try:
sample_num = min(aug_sample_num, len(mutation_mapping[logic]))
if translated_logic:
mutations = random.sample(list(mutation_mapping[logic].values()), sample_num)
else:
mutations = random.sample(list(mutation_mapping[logic].keys()), sample_num)
if translated_logic:
logic = preprocess_mapping[logic]
except:
invalid_idx.append(idx)
continue
for mutated_logic in mutations:
try:
text_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(text,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
logic_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(mutated_logic,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
text_token_ids = tokenizer.convert_tokens_to_ids(text_tokens)
logic_token_ids = tokenizer.convert_tokens_to_ids(logic_tokens)
self.examples.append({
"logic": mutated_logic,
"original_logic": logic,
"text_token_ids": text_token_ids,
"logic_token_ids": logic_token_ids})
except:
continue
else:
if translated_logic:
try:
logic = preprocess_mapping[logic]
except:
invalid_idx.append(idx)
continue
text_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(text, add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
logic_tokens = datastart + [tokenizer.cls_token] + tokenizer.tokenize(logic,
add_prefix_space=add_prefix_space) + [
tokenizer.sep_token]
text_token_ids = tokenizer.convert_tokens_to_ids(text_tokens)
logic_token_ids = tokenizer.convert_tokens_to_ids(logic_tokens)
self.examples.append({
"logic": logic,
"original_logic": logic,
"text_token_ids": text_token_ids,
"logic_token_ids": logic_token_ids})
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
class DataCollatorForRelogic:
"""
"""
tokenizer: PreTrainedTokenizer
def __post_init__(self):
self.label_bos_id = self.tokenizer.cls_token_id
self.label_eos_id = self.tokenizer.sep_token_id
def collate_batch(self, examples):
logics = [example["logic"] for example in examples]
original_logic = [example["original_logic"] for example in examples]
text_ids_sequences = [example["text_token_ids"] for example in examples]
logic_ids_sequences = [example["logic_token_ids"] for example in examples]
padded_text_ids_tensor = pad_and_tensorize_sequence(
text_ids_sequences, padding_value=self.tokenizer.pad_token_id)
padded_logic_ids_tensor = pad_and_tensorize_sequence(
logic_ids_sequences, padding_value=self.tokenizer.pad_token_id)
return {
"logics": logics,
"original_logic": original_logic,
"input_ids": padded_logic_ids_tensor,
"labels": padded_text_ids_tensor,
"pad_token_id": self.tokenizer.pad_token_id,
"label_eos_id": self.label_eos_id,
"label_bos_id": self.label_bos_id,
"label_padding_id": self.tokenizer.pad_token_id
}
class TextGenerationScorer:
def __init__(self, tokenizer, bos_id, eos_id, output_path):
self.bos_id = bos_id
self.eos_id = eos_id
self.output_path = output_path
self.tokenizer = tokenizer
def __call__(self, prediction, epoch = 0, snow_ball = False, mode_name='eval'):
epoch = 0 if not epoch else epoch
if snow_ball:
output_path = self.output_path + 'augmentation.json'
else:
output_dir = os.path.join(self.output_path, mode_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = output_dir + os.sep + 'epoch_{}'.format(int(epoch)) + '.json'
preds = prediction.predictions
preds_size = prediction.predictions_size
label_ids = prediction.label_ids
label_size = prediction.label_size
logics = prediction.logics
original_logic = prediction.original_logic
p_start, l_start = 0, 0
correct, total = 0, 0
ref = []
hyp = []
if is_rank_0():
fout = open(output_path, "w")
for idx, (p_size, l_size) in enumerate(zip(preds_size, label_size)):
p_end = p_start + p_size
l_end = l_start + l_size
pred = self.get_sequence(preds[p_start: p_end])
label = self.get_sequence(label_ids[l_start: l_end])
p_start = p_end
l_start = l_end
if pred == label:
correct += 1
total += 1
if is_rank_0():
pred_text = self.tokenizer.decode(pred, skip_special_tokens=True, clean_up_tokenization_spaces=True).strip()
label_text = self.tokenizer.decode(label, skip_special_tokens=True, clean_up_tokenization_spaces=True).strip()
ref.append(label_text)
hyp.append(pred_text)
if snow_ball:
fout.write(
json.dumps({
"idx": idx,
"mutated_logic": logics[idx],
"mutated_text": pred_text,
"original_logic":original_logic[idx],
"original_text": label_text}) + "\n")
else:
fout.write(
json.dumps({
"idx": idx,
"logic": logics[idx],
"pred": pred_text,
"label": label_text}) + "\n")
# score = list_bleu([ref], hyp, tmp_dir='tmp/tmp_bleu')
score = list_bleu([ref], hyp)
return {
"bleu": score,
"accuracy": correct / total,
"correct": correct,
"total": total
}
def get_sequence(self, seq):
processed_seq = []
for idx in seq:
if idx == self.bos_id:
continue
if idx == self.eos_id:
break
processed_seq.append(int(idx))
return processed_seq
class Generator_Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
"""
model: PreTrainedModel
args: Generator_TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None
global_step: Optional[int] = None
epoch: Optional[float] = None
def __init__(
self,
model: PreTrainedModel,
args: Generator_TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
tb_writer: Optional["SummaryWriter"] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,
model_name = "",
output_dump_dir = '',
reranker = None
):
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
Args:
prediction_loss_only:
(Optional) in evaluation and prediction, only return the loss
"""
self.model = model.to(args.device)
self.model_name = model_name
self.args = args
self.data_collator = data_collator if data_collator is not None else default_data_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizers = optimizers
self.output_dir = output_dump_dir
self.reranker = reranker
if tb_writer is not None:
self.tb_writer = tb_writer
elif is_tensorboard_available() and self.is_world_master():
self.tb_writer = SummaryWriter(log_dir=self.args.gen_logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
if is_wandb_available():
self._setup_wandb()
else:
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
set_seed(self.args.gen_seed)
# Create output directory if needed
if self.is_world_master():
os.makedirs(self.output_dir, exist_ok=True)
#set logger
LOG_FILE = os.path.join(self.output_dir, 'log.txt')
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setLevel(level=logging.DEBUG)
logger.addHandler(file_handler)
if is_torch_tpu_available():
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
self.data_collator = self.data_collator.collate_batch
warnings.warn(
(
"The `data_collator` should now be a simple callable (function, class with `__call__`), classes "
+ "with a `collate_batch` are deprecated and won't be supported in a future version."
),
FutureWarning,
)
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if is_torch_tpu_available():
train_sampler = get_tpu_sampler(self.train_dataset)
else:
train_sampler = (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
data_loader = DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.gen_dataloader_drop_last,
)
return data_loader
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(eval_dataset)
else:
sampler = SequentialSampler(eval_dataset)
data_loader = DataLoader(
eval_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.gen_dataloader_drop_last,
)
return data_loader
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
# We use the same batch_size as for eval.
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(test_dataset)
else:
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.gen_dataloader_drop_last,
)
return data_loader
def get_optimizers(
self, num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well.
If you want to use something else, you can pass a tuple in the Trainer's init,
or override this method in a subclass.
"""
if self.optimizers is not None:
return self.optimizers
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.gen_weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.gen_learning_rate, eps=self.args.gen_adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.gen_warmup_steps, num_training_steps=num_training_steps
)
return optimizer, scheduler
def _setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface
You can also override the following environment variables:
Environment:
WANDB_WATCH:
(Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging
or "all" to log gradients and parameters
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if self.is_world_master():
logger.info(
'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
)
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args))
# keep track of model topology and gradients, unsupported on TPU
if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
wandb.watch(
self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.gen_logging_steps)
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get num of examples from a DataLoader, by accessing its Dataset.
"""
return len(dataloader.dataset)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Args:
model_path:
(Optional) Local path to model if model to train has been instantiated from a local path
If present, we will try reloading the optimizer/scheduler states from there.
"""
train_dataloader = self.get_train_dataloader()
if self.args.gen_max_steps > 0:
t_total = self.args.gen_max_steps
num_train_epochs = (
self.args.gen_max_steps // (len(train_dataloader) // self.args.gen_gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gen_gradient_accumulation_steps * self.args.gen_num_train_epochs)
num_train_epochs = self.args.gen_num_train_epochs
optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)
# Check if saved optimizer or scheduler states exist
if (
model_path is not None
and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.gen_device)
)
scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
model = self.model
if self.args.gen_fp16:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.gen_fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=True,
)
# if self.tb_writer is not None:
# self.tb_writer.add_text("args", self.args.gen_to_json_string())
# self.tb_writer.add_hparams(self.args.gen_to_sanitized_dict(), metric_dict={})
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.gen_train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gen_gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_examples(train_dataloader))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.gen_per_device_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gen_gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
self.global_step = 0
self.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
self.global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = self.global_step // (len(train_dataloader) // self.args.gen_gradient_accumulation_steps)
steps_trained_in_current_epoch = self.global_step % (
len(train_dataloader) // self.args.gen_gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
self.global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=not self.is_local_master() or not self.args.gen_logging_tqdm
)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.gen_device]).per_device_loader(
self.args.gen_device
)
epoch_iterator = tqdm(parallel_loader, desc="Iteration", disable=not self.is_local_master() or not self.args.gen_logging_tqdm)
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=not self.is_local_master() or not self.args.gen_logging_tqdm)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
tr_loss += self._training_step(model, inputs, optimizer)
if (step + 1) % self.args.gen_gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.gen_gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if self.args.gen_fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.gen_max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.gen_max_grad_norm)
if is_torch_tpu_available():
xm.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
model.zero_grad()
self.global_step += 1
self.epoch = epoch + (step + 1) / len(epoch_iterator)
if (self.args.gen_logging_steps > 0 and self.global_step % self.args.gen_logging_steps == 0) or (
self.global_step == 1 and self.args.gen_logging_first_step
):
logs: Dict[str, float] = {}
logs["loss"] = (tr_loss - logging_loss) / self.args.gen_logging_steps
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else scheduler.get_lr()[0]
)
logging_loss = tr_loss
self._log(logs)
if (self.args.gen_eval_epochs > 0 and self.epoch % self.args.gen_eval_epochs == 0):
if self.args.gen_evaluate_during_training:
self.evaluate()
if self.args.gen_save_epochs > 0 and self.epoch % self.args.gen_save_epochs == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.model
else:
assert model is self.model
# Save model checkpoint
output_dir = os.path.join(self.output_dir, f"{PREFIX_CHECKPOINT_DIR}-epoch-{self.epoch}")
self.save_model(output_dir)
if self.is_world_master():
self._rotate_checkpoints()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
xm.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
elif self.is_world_master():
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if self.args.gen_max_steps > 0 and self.global_step > self.args.gen_max_steps:
epoch_iterator.close()
break
if self.args.gen_max_steps > 0 and self.global_step > self.args.gen_max_steps:
train_iterator.close()
break
if self.args.gen_tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
if self.tb_writer:
self.tb_writer.close()
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(self.global_step, tr_loss / self.global_step)
def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:
if self.epoch is not None:
logs["epoch"] = self.epoch
if self.global_step is None:
# when logging evaluation metrics without training
self.global_step = 0
if self.tb_writer:
for k, v in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, self.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
'"%s" of type %s for key "%s" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute.",
v,
type(v),
k,
)
self.tb_writer.flush()
if is_wandb_available():
if self.is_world_master():
wandb.log(logs, step=self.global_step)
output = {**logs, **{"step": self.global_step}}
if iterator is not None:
iterator.write(output)
else:
logger.info(output)
def _training_step(
self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer
) -> float:
model.train()
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gen_gradient_accumulation_steps > 1:
loss = loss / self.args.gen_gradient_accumulation_steps
if self.args.gen_fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def is_local_master(self) -> bool:
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_master(self) -> bool:
"""
This will be True only in one process, even in distributed mode,
even when training on multiple machines.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Saving best-practices: if you use default names for the model,
you can reload it using from_pretrained().
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_master():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
xm.rendezvous("saving_checkpoint")
self.model.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# if not isinstance(self.model, PreTrainedModel):
# raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.gen_save_total_limit is None or self.args.gen_save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.gen_save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.gen_save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False
) -> Dict[str, float]:
"""
Run evaluation and return metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent.
Args:
eval_dataset: (Optional) Pass a dataset if you wish to override
the one on the instance.
Returns:
A dict containing:
- the eval loss
- the potential metrics computed from the predictions
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation", prediction_loss_only = prediction_loss_only, snow_ball=snow_ball)
self._log(output.metrics)
if self.args.gen_tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
return output.metrics
def predict(self, test_dataset: Dataset, mode_name = 'test') -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
"""
test_dataloader = self.get_test_dataloader(test_dataset)
output = self._prediction_loop(test_dataloader, description="Prediction", mode_name=mode_name)
return output
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False, mode_name: Optional[str] = 'eval'
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
NOTE: One issue is on the size of prediction and labels.
For current code, it considers all the prediction and labels in different batch have same length of sequence.
This is not true for our application. To make this more general, I will reformat the predictions and labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running {}: {} *****".format(description, mode_name))
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
logics : List[str] = []
original_logic: List[str] = []
probabilities: List[float] = []
preds: torch.Tensor = None
preds_size: torch.Tensor = None
label_ids: torch.Tensor = None
label_size: torch.Tensor = None
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.gen_device]).per_device_loader(self.args.gen_device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
inputs['reranker'] = self.reranker
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
logics += inputs["logics"]
original_logic += inputs["original_logic"]
else:
logits = outputs[0]
if len(logits.shape) == 1:
logits = logits.view(1,-1)
if not prediction_loss_only:
# Change the way of concat
# We need to make sure that the size of preds and labels is (batch_size, sequence_length)
if preds is None:
preds = logits.detach()
preds_size = preds.new_full(size=preds.size()[:1], fill_value=preds.size(1)).detach()
preds = preds.view(-1)
else:
preds_size = torch.cat((preds_size, logits.new_full(size=logits.size()[:1], fill_value=logits.size(1)).detach()), dim=0)
preds = torch.cat((preds, logits.detach().view(-1)), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
label_size = label_ids.new_full(size=label_ids.size()[:1], fill_value=label_ids.size(1)).detach()
label_ids = label_ids.view(-1)
else:
label_size = torch.cat((label_size, inputs["labels"].new_full(size=inputs["labels"].size()[:1], fill_value=inputs["labels"].size(1)).detach()), dim=0)
label_ids = torch.cat((label_ids, inputs["labels"].detach().view(-1)), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
# preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
preds, preds_size = self.distributed_concat_with_size(preds, preds_size, num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
# label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
label_ids, label_size = self.distributed_concat_with_size(label_ids, label_size, num_total_examples=self.num_examples(dataloader))
elif is_torch_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
# NOTE: We do not modify this for now.
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
preds_size = preds_size.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
label_size = label_size.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPredictionWithSize(predictions=preds, predictions_size=preds_size, label_ids=label_ids, label_size=label_size , logics=logics, original_logic=original_logic), epoch = self.epoch, snow_ball=snow_ball, mode_name=mode_name)
else:
metrics = {}
if len(eval_losses) > 0:
metrics["eval_loss"] = np.mean(eval_losses)
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
logger.info(" Batch size = %s", preds)
# return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
return PredictionOutputWithSize(predictions=preds, predictions_size=preds_size, label_ids=label_ids, label_size=label_size, metrics=metrics)
def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
output = concat[:num_total_examples]
return output
def distributed_concat_tensor(self, tensor: torch.Tensor):
assert self.args.local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
return concat
def distributed_concat_varsize_tensor(self, tensor: torch.Tensor):
assert self.args.local_rank != -1
sizes = self.distributed_concat_tensor(tensor.new_full(size=(1,), fill_value=tensor.size(0)))
max_size = sizes.max().item()
padded = tensor.new_zeros(max_size)
padded[:tensor.size(0)] = tensor
padded_agg = self.distributed_concat_tensor(padded)
slices = []
for i, size in enumerate(sizes):
start_idx = i * max_size
end_idx = start_idx + size.item()
slices.append(padded_agg[start_idx: end_idx])
ret = torch.cat(slices, dim=0)
return ret
def distributed_concat_with_size(self, tensor: torch.Tensor, size: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.local_rank != -1
# output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
# output_sizes = [size.clone() for _ in range(torch.distributed.get_world_size())]
# torch.distributed.all_gather(output_tensors, tensor)
# torch.distributed.all_gather(output_sizes, size)
# concat = torch.cat(output_tensors, dim=0)
# concat_sizes = torch.cat(output_sizes, dim=0)
concat_sizes = self.distributed_concat_tensor(size)
concat = self.distributed_concat_varsize_tensor(tensor)
output_sizes = concat_sizes[:num_total_examples]
assert output_sizes.sum() == concat.size(0)
return concat, output_sizes
def augment_data(data_path, preprocess_path, mutation_data_path, tokenizer, data_args,training_args, model_args, output_dump_dir, snow_ball, generator = None, reranker = None, multi_task=False):
if training_args.gen_wo_aug_rerank:
reranker = None
aug_dataset = RelogicDataset(tokenizer=tokenizer,
file_path=data_path,
preprocess_path=preprocess_path,
mutation_data_path=mutation_data_path,
block_size=data_args.block_size,
translated_logic=data_args.translated_logic,
snow_ball=snow_ball, multi_task=multi_task)
data_collator = DataCollatorForRelogic(tokenizer=tokenizer)
label_bos_id = data_collator.label_bos_id
label_eos_id = data_collator.label_eos_id
scorer = TextGenerationScorer(bos_id=label_bos_id, eos_id=label_eos_id, tokenizer=tokenizer, output_path=output_dump_dir)
# Initialize our Trainer
trainer = Generator_Trainer(
model=generator,
args=training_args,
data_collator=data_collator,
eval_dataset=aug_dataset,
compute_metrics=scorer,
prediction_loss_only = False,
model_name=training_args.gen_model,
output_dump_dir = output_dump_dir,
reranker = reranker
)
eval_output = trainer.evaluate(snow_ball=snow_ball)
torch.cuda.empty_cache() | null |
164,348 | import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
import json
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizer,
set_seed,
)
from generator.models.relogic import RelogicModel
from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic
from generator.scorers.text_generation import TextGenerationScorer
from generator.trainer_refer import Generator_Trainer
from generator.training_args import Generator_TrainingArguments
from evaluator.models.adversarial_evaluator import AdversarialModel
from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial
from evaluator.scorers.adv_eval import EvalScorer
from evaluator.trainer import Evaluator_Trainer
from evaluator.training_args import Evaluator_TrainingArguments
def create_save_dir(model_args, snowball_iteration = 0):
generator_output_dump_dir = model_args.output_dir + '/generator/{}/'.format(snowball_iteration)
if not os.path.exists(generator_output_dump_dir):
os.makedirs(generator_output_dump_dir)
evaluator_output_dump_dir = model_args.output_dir + '/evaluator/{}/'.format(snowball_iteration)
if not os.path.exists(evaluator_output_dump_dir):
os.makedirs(evaluator_output_dump_dir)
aug_output_dump_dir = model_args.output_dir + '/augmentation/{}/'.format(snowball_iteration)
if not os.path.exists(aug_output_dump_dir):
os.makedirs(aug_output_dump_dir)
return generator_output_dump_dir, evaluator_output_dump_dir, aug_output_dump_dir | null |
164,349 | def reverse(sql,table):
temp = {}
temp['select'] = selectl(sql['select'],table)
temp['from'] = froml(sql['from'],table)
if len(sql['groupBy']) > 0:
temp['groupBy'] = groupbyl(sql['groupBy'],table)
else:
temp['groupBy'] = None
if len(sql['orderBy']) > 0:
temp['orderBy'] = orderbyl(sql['orderBy'],table,sql['limit'])
else:
temp['orderBy'] = None
if len(sql['where']) > 0:
temp['where'] = wherel(sql['where'],table)
else:
temp['where'] = None
if len(sql['having']) > 0:
temp['having'] = havingl(sql['having'],table)
else:
temp['having'] = None
if sql['intersect'] == None:
temp['intersect'] = None
else:
temp['intersect'] = intersectl(sql['intersect'],table)
if sql['union'] == None:
temp['union'] = None
else:
temp['union'] = unionl(sql['union'],table)
if sql['except'] == None:
temp['except'] = None
else:
temp['except'] = intersectl(sql['except'],table)
return temp
def exceptl(texcept,table):
result = []
if isinstance(texcept,dict):
result.append([reverse(texcept,table)])
return result | null |
164,354 | import traceback
import re
import sys
import json
import sqlite3
import sqlparse
import random
from os import listdir, makedirs
from collections import OrderedDict
from nltk import word_tokenize, tokenize
from os.path import isfile, isdir, join, split, exists, splitext
from utils.process_sql import get_sql
def get_schemas_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
db_names = [db['db_id'] for db in data]
tables = {}
schemas = {}
for db in data:
db_id = db['db_id']
schema = {} #{'table': [col.lower, ..., ]} * -> __all__
column_names_original = db['column_names_original']
table_names_original = db['table_names_original']
column_names = db['column_names']
table_names = db['table_names']
tables[db_id] = {'column_names_original': column_names_original, 'table_names_original': table_names_original, 'column_names': column_names, 'table_names': table_names}
for i, tabn in enumerate(table_names_original):
table = str(tabn.lower())
cols = [str(col.lower()) for td, col in column_names_original if td == i]
schema[table] = cols
schemas[db_id] = schema
return schemas, db_names, tables | null |
164,355 | def get_labels(sql_struct,slot,cur_nest):
if len(sql_struct['select']) > 0:
if cur_nest != '':
slot = get_select_labels(sql_struct['select'],slot,cur_nest+' SELECT')
else:
slot = get_select_labels(sql_struct['select'],slot,'SELECT')
if sql_struct['from']:
if cur_nest != '':
slot = get_from_labels(sql_struct['from'],slot,'FROM')
else:
slot = get_from_labels(sql_struct['from'],slot,'FROM')
if len(sql_struct['where']) > 0:
if cur_nest != '':
slot = get_where_labels(sql_struct['where'],slot,cur_nest+' WHERE')
else:
slot = get_where_labels(sql_struct['where'],slot,'WHERE')
if len(sql_struct['groupBy']) > 0:
if cur_nest != '':
slot = get_groupby_labels(sql_struct['groupBy'],slot,cur_nest+' GROUP_BY')
else:
slot = get_groupby_labels(sql_struct['groupBy'],slot,'GROUP_BY')
if len(sql_struct['having']) > 0:
if cur_nest != '':
slot = get_having_labels(sql_struct['having'],slot,cur_nest+' HAVING')
else:
slot = get_having_labels(sql_struct['having'],slot,'HAVING')
if len(sql_struct['orderBy']) > 0:
if cur_nest != '':
slot = get_orderby_labels(sql_struct['orderBy'],sql_struct['limit'],slot,cur_nest+' ORDER_BY')
else:
slot = get_orderby_labels(sql_struct['orderBy'],sql_struct['limit'],slot,'ORDER_BY')
if sql_struct['intersect']:
if cur_nest != '':
slot = get_intersect_labels(sql_struct['intersect'],slot,cur_nest+' INTERSECT')
else:
slot = get_intersect_labels(sql_struct['intersect'],slot,'INTERSECT')
if sql_struct['except']:
if cur_nest != '':
slot = get_except_labels(sql_struct['except'],slot,cur_nest+' EXCEPT')
else:
slot = get_except_labels(sql_struct['except'],slot,'EXCEPT')
if sql_struct['union']:
if cur_nest != '':
slot = get_union_labels(sql_struct['union'],slot,cur_nest+' UNION')
else:
slot = get_union_labels(sql_struct['union'],slot,'UNION')
return slot
def get_label(sql,column_len):
thelabel = []
slot = {}
for idx in range(column_len):
slot[idx] = ""
for value in get_labels(sql,slot,'').values():
thelabel.append(value)
return thelabel | null |
164,356 | def get_table_labels(sql_struct,slot,cur_nest):
if sql_struct['from']:
if cur_nest != '':
slot = get_from_table_labels(sql_struct['from'],slot,cur_nest)
else:
slot = get_from_table_labels(sql_struct['from'],slot,cur_nest)
if len(sql_struct['where']) > 0:
if cur_nest != '':
slot = get_where_table_labels(sql_struct['where'],slot,cur_nest)
else:
slot = get_where_table_labels(sql_struct['where'],slot,cur_nest)
if len(sql_struct['having']) > 0:
if cur_nest != '':
slot = get_having_table_labels(sql_struct['having'],slot,cur_nest)
else:
slot = get_having_table_labels(sql_struct['having'],slot,cur_nest)
if sql_struct['intersect']:
if cur_nest != '':
slot = get_intersect_table_labels(sql_struct['intersect'],slot,cur_nest)
else:
slot = get_intersect_table_labels(sql_struct['intersect'],slot,cur_nest)
if sql_struct['except']:
if cur_nest != '':
slot = get_except_table_labels(sql_struct['except'],slot,cur_nest)
else:
slot = get_except_table_labels(sql_struct['except'],slot,cur_nest)
if sql_struct['union']:
if cur_nest != '':
slot = get_union_table_labels(sql_struct['union'],slot,cur_nest)
else:
slot = get_union_table_labels(sql_struct['union'],slot,cur_nest)
return slot
def get_table_label(sql,table_len):
thelabel = []
slot = {}
for idx in range(table_len):
slot[idx] = ""
for value in get_table_labels(sql,slot,'').values():
thelabel.append(value)
return thelabel | null |
164,372 | import math
import os
import numpy as np
from space.args import str2bool
from space.data.batch import batch
from space.data.dataset import LazyDataset
from space.data.sampler import RandomSampler
from space.data.sampler import SequentialSampler
from space.data.sampler import SortedSampler
def get_data_loader(batch_size, reader, hparams, file, collate_fn, is_test):
class SequentialDataLoaderWrapper:
def __init__(self, data_loaders):
def __iter__(self):
def __len__(self):
def get_sequential_data_loader(batch_size, reader, hparams, data_paths, collate_fn, data_type):
data_loaders = []
for data_path in data_paths:
file = os.path.join(data_path, f'{data_type}.{hparams.tokenizer_type}.jsonl')
data_loaders.append(get_data_loader(batch_size=batch_size, reader=reader, hparams=hparams, file=file,
collate_fn=collate_fn, is_test=(data_type != 'train')))
data_loader = SequentialDataLoaderWrapper(data_loaders)
return data_loader | null |
164,373 | import multiprocessing
import random
from itertools import chain
import os
import glob
import json
import numpy as np
import time
import re
from tqdm import tqdm
from space.args import str2bool
from space.data.tokenizer import Tokenizer
from space.utils import ontology
from space.utils.scores import tree_edit_score
def max_lens(X):
lens = [len(X)]
while isinstance(X[0], list):
lens.append(max(map(len, X)))
X = [x for xs in X for x in xs]
return lens
def list2np(X, padding=0, dtype="int64"):
shape = max_lens(X)
ret = np.full(shape, padding, dtype=np.int32)
if len(shape) == 1:
ret = np.array(X)
elif len(shape) == 2:
for i, x in enumerate(X):
ret[i, :len(x)] = np.array(x)
elif len(shape) == 3:
for i, xs in enumerate(X):
for j, x in enumerate(xs):
ret[i, j, :len(x)] = np.array(x)
return ret.astype(dtype) | null |
164,374 | import os
import random
from collections import OrderedDict, defaultdict
from itertools import chain
import json
import sqlite3 as sql
import numpy as np
import spacy
from tqdm import tqdm
from nltk.tokenize import word_tokenize as nltk_word_tokenize
from nltk.stem import WordNetLemmatizer
from space.args import str2bool
from space.data.tokenizer import Tokenizer
from space.utils import ontology, utils
from space.utils.db_ops import MultiWozDB
from space.utils.ontologies import CamRest676Ontology, KvretOntology
def max_lens(X):
lens = [len(X)]
while isinstance(X[0], list):
lens.append(max(map(len, X)))
X = [x for xs in X for x in xs]
return lens
def list2np(X, padding=0, dtype="int64"):
shape = max_lens(X)
ret = np.full(shape, padding, dtype=np.int32)
if len(shape) == 1:
ret = np.array(X)
elif len(shape) == 2:
for i, x in enumerate(X):
ret[i, :len(x)] = np.array(x)
elif len(shape) == 3:
for i, xs in enumerate(X):
for j, x in enumerate(xs):
ret[i, j, :len(x)] = np.array(x)
return ret.astype(dtype) | null |
164,377 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
from space.metrics.metrics import bleu
from space.metrics.metrics import distinct
def get_logger(log_path, name="default"):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler(log_path, mode="w")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | null |
164,378 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
from space.metrics.metrics import bleu
from space.metrics.metrics import distinct
class MetricsTracker(object):
""" Tracking metrics. """
def __init__(self):
self.metrics_val = defaultdict(float) # 记录最新一个batch返回的指标
self.metrics_avg = defaultdict(float) # 维护一个epoch内已训练batches的平均指标
self.num_samples = 0
def update(self, metrics, num_samples):
for key, val in metrics.items():
if val is not None:
val = float(val) # [val] -> val
self.metrics_val[key] = val
avg_val = (self.metrics_avg.get(key, 0) * self.num_samples +
val * num_samples) / (self.num_samples + num_samples)
self.metrics_avg[key] = avg_val
self.num_samples += num_samples
def clear(self):
self.metrics_val = defaultdict(float)
self.metrics_avg = defaultdict(float)
self.num_samples = 0
def items(self):
return self.metrics_avg.items()
def get(self, name):
if self.num_samples == 0:
raise ValueError("There is no data in Metrics.")
return self.metrics_avg.get(name)
def state_dict(self):
return {
"metrics_val": self.metrics_val,
"metrics_avg": self.metrics_avg,
"num_samples": self.num_samples,
}
def load_state_dict(self, state_dict):
self.metrics_val = state_dict["metrics_val"]
self.metrics_avg = state_dict["metrics_avg"]
self.num_samples = state_dict["num_samples"]
def value(self):
metric_strs = []
for key, val in self.metrics_val.items():
metric_str = f"{key.upper()}-{val:.3f}"
metric_strs.append(metric_str)
if "token_nll" in self.metrics_val:
metric_str = f"TOKEN_PPL-{math.exp(self.metrics_val['token_nll']):.3f}"
metric_strs.append(metric_str)
metric_strs = " ".join(metric_strs)
return metric_strs
def summary(self):
metric_strs = []
for key, val in self.metrics_avg.items():
metric_str = f"{key.upper()}-{val:.3f}"
metric_strs.append(metric_str)
if "token_nll" in self.metrics_avg:
metric_str = f"TOKEN_PPL-{math.exp(self.metrics_avg['token_nll']):.3f}"
metric_strs.append(metric_str)
metric_strs = " ".join(metric_strs)
return metric_strs
def distinct(seqs):
""" Calculate intra/inter distinct 1/2. """
batch_size = len(seqs)
intra_dist1, intra_dist2 = [], []
unigrams_all, bigrams_all = Counter(), Counter()
for seq in seqs:
unigrams = Counter(seq)
bigrams = Counter(zip(seq, seq[1:]))
intra_dist1.append((len(unigrams)+1e-12) / (len(seq)+1e-5))
intra_dist2.append((len(bigrams)+1e-12) / (max(0, len(seq)-1)+1e-5))
unigrams_all.update(unigrams)
bigrams_all.update(bigrams)
inter_dist1 = (len(unigrams_all)+1e-12) / (sum(unigrams_all.values())+1e-5)
inter_dist2 = (len(bigrams_all)+1e-12) / (sum(bigrams_all.values())+1e-5)
intra_dist1 = np.average(intra_dist1)
intra_dist2 = np.average(intra_dist2)
return intra_dist1, intra_dist2, inter_dist1, inter_dist2
def bleu(hyps, refs):
""" Calculate bleu 1/2. """
bleu_1 = []
bleu_2 = []
for hyp, ref in zip(hyps, refs):
try:
score = bleu_score.sentence_bleu(
[ref], hyp,
smoothing_function=SmoothingFunction().method7,
weights=[1, 0, 0, 0])
except:
score = 0
bleu_1.append(score)
try:
score = bleu_score.sentence_bleu(
[ref], hyp,
smoothing_function=SmoothingFunction().method7,
weights=[0.5, 0.5, 0, 0])
except:
score = 0
bleu_2.append(score)
bleu_1 = np.average(bleu_1)
bleu_2 = np.average(bleu_2)
return bleu_1, bleu_2
def evaluate_generation_result(results):
tgt = [result["tgt"].split(" ") for result in results]
pred = [result["preds"][np.argmax(result["scores"])]
if isinstance(result["preds"], list)
else result["preds"]
for result in results]
pred = [p.split(" ") for p in pred]
metrics = {}
metrics_tracker = MetricsTracker()
bleu1, bleu2 = bleu(pred, tgt)
metrics.update({"bleu_1": bleu1, "bleu_2": bleu2})
intra_dist1, intra_dist2, inter_dist1, inter_dist2 = distinct(pred)
metrics.update({"intra_dist_1": intra_dist1,
"intra_dist_2": intra_dist2,
"inter_dist_1": inter_dist1,
"inter_dist_2": inter_dist2})
avg_len = sum(map(len, pred)) / len(pred)
metrics.update({"len": avg_len})
metrics_tracker.update(metrics, num_samples=1) # 一次更新所有数据的指标到位,没有累积更新,故num_sample取为1
return metrics_tracker | null |
164,379 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
def get_logger(log_path, name="default"):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler(log_path, mode="w")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | null |
164,380 | import math
import torch
import numpy as np
from space.args import str2bool
def repeat(var, times):
if isinstance(var, list):
return [repeat(x, times) for x in var]
elif isinstance(var, dict):
return {k: repeat(v, times) for k, v in var.items()}
elif isinstance(var, torch.Tensor):
var = var.unsqueeze(1)
expand_times = [1] * len(var.shape)
expand_times[1] = times
dtype = var.dtype
var = var.float()
var = var.repeat(*expand_times)
shape = [var.shape[0] * var.shape[1]] + list(var.shape[2:])
var = var.reshape(*shape)
var = torch.tensor(var, dtype=dtype)
return var
else:
return var | null |
164,381 | import math
import torch
import numpy as np
from space.args import str2bool
def gather(var, idx):
if isinstance(var, list):
return [gather(x, idx) for x in var]
elif isinstance(var, dict):
return {k: gather(v, idx) for k, v in var.items()}
elif isinstance(var, torch.Tensor):
out = var.index_select(dim=0, index=idx)
return out
else:
return var | null |
164,382 | import logging
import json
import numpy as np
from collections import OrderedDict
from space.utils import ontology
def clean_replace(s, r, t, forward=True, backward=False):
def clean_replace_single(s, r, t, forward, backward, sidx=0):
# idx = s[sidx:].find(r)
idx = s.find(r)
if idx == -1:
return s, -1
idx_r = idx + len(r)
if backward:
while idx > 0 and s[idx - 1]:
idx -= 1
elif idx > 0 and s[idx - 1] != ' ':
return s, -1
if forward:
while idx_r < len(s) and (s[idx_r].isalpha() or s[idx_r].isdigit()):
idx_r += 1
elif idx_r != len(s) and (s[idx_r].isalpha() or s[idx_r].isdigit()):
return s, -1
return s[:idx] + t + s[idx_r:], idx_r
# source, replace, target = s, r, t
# count = 0
sidx = 0
while sidx != -1:
s, sidx = clean_replace_single(s, r, t, forward, backward, sidx)
# count += 1
# print(s, sidx)
# if count == 20:
# print(source, '\n', replace, '\n', target)
# quit()
return s | null |
164,383 | import logging
import json
import numpy as np
from collections import OrderedDict
from space.utils import ontology
def py2np(list):
return np.array(list) | null |
164,384 | import logging
import json
import numpy as np
from collections import OrderedDict
from space.utils import ontology
def write_dict(fn, dic):
with open(fn, 'w') as f:
json.dump(dic, f, indent=2) | null |
164,388 | db_tokens = ['<sos_db>', '<eos_db>',
'[book_nores]', '[book_fail]', '[book_success]',
'[db_nores]', '[db_0]', '[db_1]', '[db_2]', '[db_3]']
def get_special_tokens(other_tokens):
special_tokens = ['<go_r>', '<go_b>', '<go_a>', '<go_d>',
'<eos_u>', '<eos_r>', '<eos_b>', '<eos_a>', '<eos_d>', '<eos_q>',
'<sos_u>', '<sos_r>', '<sos_b>', '<sos_a>', '<sos_d>', '<sos_q>'] \
+ db_tokens \
+ other_tokens
return special_tokens | null |
164,390 | import re
from space.utils import ontology
def my_clean_text(text):
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
return text | null |
164,391 | import re
from space.utils import ontology
def clean_text(text):
text = text.strip()
text = text.lower()
text = text.replace(u"’", "'")
text = text.replace(u"‘", "'")
text = text.replace(';', ',')
text = text.replace('"', ' ')
text = text.replace('/', ' and ')
text = text.replace("don't", "do n't")
text = clean_time(text)
baddata = { r'c\.b (\d), (\d) ([a-z])\.([a-z])': r'cb\1\2\3\4',
'c.b. 1 7 d.y': 'cb17dy',
'c.b.1 7 d.y': 'cb17dy',
'c.b 25, 9 a.q': 'cb259aq',
'isc.b 25, 9 a.q': 'is cb259aq',
'c.b2, 1 u.f': 'cb21uf',
'c.b 1,2 q.a':'cb12qa',
'0-122-336-5664': '01223365664',
'postcodecb21rs': 'postcode cb21rs',
r'i\.d': 'id',
' i d ': 'id',
'Telephone:01223358966': 'Telephone: 01223358966',
'depature': 'departure',
'depearting': 'departing',
'-type': ' type',
r"b[\s]?&[\s]?b": "bed and breakfast",
"b and b": "bed and breakfast",
r"guesthouse[s]?": "guest house",
r"swimmingpool[s]?": "swimming pool",
"wo n\'t": "will not",
" \'d ": " would ",
" \'m ": " am ",
" \'re' ": " are ",
" \'ll' ": " will ",
" \'ve ": " have ",
r'^\'': '',
r'\'$': '',
}
for tmpl, good in baddata.items():
text = re.sub(tmpl, good, text)
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
with open('tools/mapping.pair', 'r') as fin:
for line in fin.readlines():
fromx, tox = line.replace('\n', '').split('\t')
text = ' ' + text + ' '
text = text.replace(' ' + fromx + ' ', ' ' + tox + ' ')[1:-1]
return text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value | null |
164,392 | from space.utils.decorators import ignore_nodes
def jaccard_dis_sim(x, y):
"""
Jaccard Distance Similarity
"""
res = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
if union_cardinality:
return res / float(union_cardinality), 1
else:
return 0., 0
def clean_frame(frame):
cleaned_frame = {}
for domain, domain_frame in frame.items():
cleaned_frame[domain.strip().lower()] = clean_domain_frame(frame=domain_frame)
return cleaned_frame
def construct_frame_graph(frame):
assert frame
domain_nodes, act_nodes, slot_nodes, value_nodes = [], [], [], [] # 1-gram list (nodes)
domain_act_edges, act_slot_edges, slot_value_edges = [], [], [] # 2-gram list (edges)
domain_act_slot_paths, act_slot_value_paths = [], [] # 3-gram list (paths)
domain_act_slot_value_paths = [] # 4-gram list (paths)
domain_nodes.extend(list(frame.keys()))
for domain, domain_frame in frame.items():
single_act_nodes, single_slot_nodes, single_value_nodes, single_act_slot_edges, single_slot_value_edges, \
single_act_slot_value_paths = construct_domain_frame_graph(frame=domain_frame)
act_nodes.extend(single_act_nodes)
slot_nodes.extend(single_slot_nodes)
value_nodes.extend(single_value_nodes)
act_slot_edges.extend(single_act_slot_edges)
slot_value_edges.extend(single_slot_value_edges)
act_slot_value_paths.extend(single_act_slot_value_paths)
domain_act_edges.extend([f'{domain}-{act}' for act in single_act_nodes])
domain_act_slot_paths.extend([f'{domain}-{act_slot}' for act_slot in single_act_slot_edges])
domain_act_slot_value_paths.extend([f'{domain}-{act_slot_value}'
for act_slot_value in single_act_slot_value_paths])
return domain_nodes, act_nodes, slot_nodes, value_nodes, domain_act_edges, act_slot_edges, slot_value_edges, \
domain_act_slot_paths, act_slot_value_paths, domain_act_slot_value_paths
def tree_edit_score(frame1, frame2):
# deal with empty frame
if not (frame1 and frame2):
return 0.
# clean frame
frame1 = clean_frame(frame=frame1)
frame2 = clean_frame(frame=frame2)
if frame1 == frame2:
return 1.
# construct frame graph
domain_nodes1, act_nodes1, slot_nodes1, value_nodes1, domain_act_edges1, act_slot_edges1, slot_value_edges1, \
domain_act_slot_paths1, act_slot_value_paths1, domain_act_slot_value_paths1 = \
construct_frame_graph(frame=frame1)
domain_nodes2, act_nodes2, slot_nodes2, value_nodes2, domain_act_edges2, act_slot_edges2, slot_value_edges2, \
domain_act_slot_paths2, act_slot_value_paths2, domain_act_slot_value_paths2 = \
construct_frame_graph(frame=frame2)
# compute individual score
domain_score = jaccard_dis_sim(domain_nodes1, domain_nodes2)
act_score = jaccard_dis_sim(act_nodes1, act_nodes2)
slot_score = jaccard_dis_sim(slot_nodes1, slot_nodes2)
value_score = jaccard_dis_sim(value_nodes1, value_nodes2)
domain_act_score = jaccard_dis_sim(domain_act_edges1, domain_act_edges2)
act_slot_score = jaccard_dis_sim(act_slot_edges1, act_slot_edges2)
slot_value_score = jaccard_dis_sim(slot_value_edges1, slot_value_edges2)
domain_act_slot_score = jaccard_dis_sim(domain_act_slot_paths1, domain_act_slot_paths2)
act_slot_value_score = jaccard_dis_sim(act_slot_value_paths1, act_slot_value_paths2)
domain_act_slot_value_score = jaccard_dis_sim(domain_act_slot_value_paths1, domain_act_slot_value_paths2)
# compute combined score
score, num_score = 0., 0
for single_score in (domain_score, act_score, slot_score, value_score, domain_act_score, act_slot_score,
slot_value_score, domain_act_slot_score, act_slot_value_score,
domain_act_slot_value_score):
score += single_score[0]
num_score += single_score[1]
score = score / num_score
return score | null |
164,393 | import json
import math
from collections import Counter
import numpy as np
from nltk.util import ngrams
from sklearn.metrics import f1_score
from space.utils import ontology, utils
from space.utils.clean_dataset import clean_slot_values
def setsub(a,b):
junks_a = []
useless_constraint = ['temperature','week','est ','quick','reminder','near']
for i in a:
flg = False
for j in b:
if similar(i,j):
flg = True
if not flg:
junks_a.append(i)
for junk in junks_a:
flg = False
for item in useless_constraint:
if item in junk:
flg = True
if not flg:
return False
return True
def setsim(a,b):
a,b = set(a),set(b)
return setsub(a,b) and setsub(b,a) | null |
164,394 | import json
import math
from collections import Counter
import numpy as np
from nltk.util import ngrams
from sklearn.metrics import f1_score
from space.utils import ontology, utils
from space.utils.clean_dataset import clean_slot_values
def DAEvaluate(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
results = {}
# for avg_name in ['micro', 'macro', 'weighted', 'samples']:
for avg_name in ['micro']:
my_f1_score = f1_score(y_true=labels, y_pred=preds, average=avg_name)
results["f1_{}".format(avg_name)] = my_f1_score
return results | null |
164,403 | import argparse
import logging
import os
import random
import glob
import json
import math
import re
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer)
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from bert_models import BertPretrain
from modeling_bert_dst import (BertForDST)
from data_processors import PROCESSORS
from utils_dst import (convert_examples_to_features)
from tensorlistdataset import (TensorListDataset)
logger = logging.getLogger(__name__)
def batch_to_device(batch, device):
def eval_metric(model, features, total_loss, per_slot_per_example_loss, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits, per_slot_refer_logits):
def predict_and_format(model, tokenizer, features, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits, per_slot_refer_logits, ids, input_ids_unmasked, values, inform, prefix, ds):
def load_and_cache_examples(args, model, tokenizer, processor, evaluate=False):
def evaluate(args, model, tokenizer, processor, prefix=""):
dataset, features = load_and_cache_examples(args, model, tokenizer, processor, evaluate=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
all_preds = []
ds = {slot: 'none' for slot in model.slot_list}
with torch.no_grad():
diag_state = {slot: torch.tensor([0 for _ in range(args.eval_batch_size)]).to(args.device) for slot in model.slot_list}
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = batch_to_device(batch, args.device)
# Reset dialog state if turn is first in the dialog.
turn_itrs = [features[i.item()].guid.split('-')[2] for i in batch[9]]
reset_diag_state = np.where(np.array(turn_itrs) == '0')[0]
for slot in model.slot_list:
for i in reset_diag_state:
diag_state[slot][i] = 0
with torch.no_grad():
inputs = {'input_ids': batch[0],
'input_mask': batch[1],
'segment_ids': batch[2],
'start_pos': batch[3],
'end_pos': batch[4],
'inform_slot_id': batch[5],
'refer_id': batch[6],
'diag_state': diag_state,
'class_label_id': batch[8]}
unique_ids = [features[i.item()].guid for i in batch[9]]
values = [features[i.item()].values for i in batch[9]]
input_ids_unmasked = [features[i.item()].input_ids_unmasked for i in batch[9]]
inform = [features[i.item()].inform for i in batch[9]]
outputs = model(**inputs)
# Update dialog state for next turn.
for slot in model.slot_list:
updates = outputs[2][slot].max(1)[1]
for i, u in enumerate(updates):
if u != 0:
diag_state[slot][i] = u
results = eval_metric(model, inputs, outputs[0], outputs[1], outputs[2], outputs[3], outputs[4], outputs[5])
preds, ds = predict_and_format(model, tokenizer, inputs, outputs[2], outputs[3], outputs[4], outputs[5], unique_ids, input_ids_unmasked, values, inform, prefix, ds)
all_results.append(results)
all_preds.append(preds)
all_preds = [item for sublist in all_preds for item in sublist] # Flatten list
# Generate final results
final_results = {}
for k in all_results[0].keys():
final_results[k] = torch.stack([r[k] for r in all_results]).mean()
# Write final predictions (for evaluation with external tool)
output_prediction_file = os.path.join(args.output_dir, "pred_res.%s.%s.json" % (args.predict_type, prefix))
with open(output_prediction_file, "w") as f:
json.dump(all_preds, f, indent=2)
return final_results | null |
164,406 | import os
import torch
from collections import OrderedDict
def get_match_value(name, state_dict_numpy):
"""
Need be overridden towards different models, here for UnifiedTransformer Model
"""
if name == 'bert.embeddings.word_embeddings.weight':
return state_dict_numpy['embedder.token_embedding.weight']
elif name == 'bert.embeddings.position_embeddings.weight':
return state_dict_numpy['embedder.pos_embedding.weight']
elif name == 'bert.embeddings.token_type_embeddings.weight':
return state_dict_numpy['embedder.type_embedding.weight']
elif name == 'bert.embeddings.LayerNorm.weight':
return state_dict_numpy['embed_layer_norm.weight']
elif name == 'bert.embeddings.LayerNorm.bias':
return state_dict_numpy['embed_layer_norm.bias']
elif name == 'bert.pooler.dense.weight':
# return state_dict_numpy['pooler.0.weight']
return None
elif name == 'bert.pooler.dense.bias':
# return state_dict_numpy['pooler.0.bias']
return None
elif name == 'cls.predictions.transform.dense.weight':
return state_dict_numpy['mlm_transform.0.weight']
elif name == 'cls.predictions.transform.dense.bias':
return state_dict_numpy['mlm_transform.0.bias']
elif name == 'cls.predictions.transform.LayerNorm.weight':
return state_dict_numpy['mlm_transform.2.weight']
elif name == 'cls.predictions.transform.LayerNorm.bias':
return state_dict_numpy['mlm_transform.2.bias']
elif name == 'cls.predictions.bias':
return state_dict_numpy['mlm_bias']
elif name == 'cls.predictions.decoder.weight':
return state_dict_numpy['embedder.token_embedding.weight']
elif name == 'cls.predictions.decoder.bias':
return state_dict_numpy['mlm_bias']
else:
num = name.split('.')[3]
assert num in [str(i) for i in range(12)]
if name == f'bert.encoder.layer.{num}.attention.self.query.weight':
qkv_weight = state_dict_numpy[f'layers.{num}.attn.linear_qkv.weight']
return qkv_weight[:768]
elif name == f'bert.encoder.layer.{num}.attention.self.key.weight':
qkv_weight = state_dict_numpy[f'layers.{num}.attn.linear_qkv.weight']
return qkv_weight[768: 1536]
elif name == f'bert.encoder.layer.{num}.attention.self.value.weight':
qkv_weight = state_dict_numpy[f'layers.{num}.attn.linear_qkv.weight']
return qkv_weight[1536:]
elif name == f'bert.encoder.layer.{num}.attention.self.query.bias':
qkv_bias = state_dict_numpy[f'layers.{num}.attn.linear_qkv.bias']
return qkv_bias[:768]
elif name == f'bert.encoder.layer.{num}.attention.self.key.bias':
qkv_bias = state_dict_numpy[f'layers.{num}.attn.linear_qkv.bias']
return qkv_bias[768: 1536]
elif name == f'bert.encoder.layer.{num}.attention.self.value.bias':
qkv_bias = state_dict_numpy[f'layers.{num}.attn.linear_qkv.bias']
return qkv_bias[1536:]
elif name == f'bert.encoder.layer.{num}.attention.output.dense.weight':
return state_dict_numpy[f'layers.{num}.attn.linear_out.weight']
elif name == f'bert.encoder.layer.{num}.attention.output.dense.bias':
return state_dict_numpy[f'layers.{num}.attn.linear_out.bias']
elif name == f'bert.encoder.layer.{num}.attention.output.LayerNorm.weight':
return state_dict_numpy[f'layers.{num}.attn_norm.weight']
elif name == f'bert.encoder.layer.{num}.attention.output.LayerNorm.bias':
return state_dict_numpy[f'layers.{num}.attn_norm.bias']
elif name == f'bert.encoder.layer.{num}.intermediate.dense.weight':
return state_dict_numpy[f'layers.{num}.ff.linear_hidden.0.weight']
elif name == f'bert.encoder.layer.{num}.intermediate.dense.bias':
return state_dict_numpy[f'layers.{num}.ff.linear_hidden.0.bias']
elif name == f'bert.encoder.layer.{num}.output.dense.weight':
return state_dict_numpy[f'layers.{num}.ff.linear_out.weight']
elif name == f'bert.encoder.layer.{num}.output.dense.bias':
return state_dict_numpy[f'layers.{num}.ff.linear_out.bias']
elif name == f'bert.encoder.layer.{num}.output.LayerNorm.weight':
return state_dict_numpy[f'layers.{num}.ff_norm.weight']
elif name == f'bert.encoder.layer.{num}.output.LayerNorm.bias':
return state_dict_numpy[f'layers.{num}.ff_norm.bias']
else:
raise ValueError('No matched name in state_dict_numpy!')
def space2hug(input_template, input_pt, output_pt, restore=True):
state_dict_pytorch = OrderedDict()
state_dict_init_template = torch.load(input_template, map_location=lambda storage, loc: storage)
state_dict_init_pytorch = torch.load(input_pt, map_location=lambda storage, loc: storage)
if 'module.' in list(state_dict_init_pytorch.keys())[0]:
new_model_state_dict = OrderedDict()
for k, v in state_dict_init_pytorch.items():
assert k[:7] == 'module.'
new_model_state_dict[k[7:]] = v
state_dict_init_pytorch = new_model_state_dict
for name, value in state_dict_init_template.items():
match_value = get_match_value(name, state_dict_init_pytorch)
if match_value is not None:
assert match_value.shape == value.shape
assert match_value.dtype == value.dtype
state_dict_pytorch[name] = match_value
else:
print(f'Parm {name} is not existed! Restore: [{restore}]')
if restore:
state_dict_pytorch[name] = value
else:
continue
torch.save(state_dict_pytorch, output_pt) | null |
164,407 | import re
import ast
import json
import random
import bisect
import argparse
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from sacrebleu import corpus_bleu, sentence_bleu
import numpy as np
def get_turn_dst(test):
turn_dst = {}
for k in test.keys():
v = test[k]
slots = {}
for idx, turn in enumerate(v['log']):
if turn['tag'] == 'user':
for domain in v['log'][idx + 1]['metadata']:
for i in v['log'][idx + 1]['metadata'][domain]['book'].keys():
if i == 'booked':
continue
if v['log'][idx + 1]['metadata'][domain]['book'][i]:
slots[f'{domain}-{i}'] = v['log'][idx + 1]['metadata'][domain]['book'][i]
for i in v['log'][idx + 1]['metadata'][domain]['semi'].keys():
if v['log'][idx + 1]['metadata'][domain]['semi'][i]:
slots[f'{domain}-{i}'] = v['log'][idx + 1]['metadata'][domain]['semi'][i]
dst_key = k + '-' + str(idx + 1)
turn_dst[dst_key] = slots.copy()
return turn_dst | null |
164,408 | import re
import ast
import json
import random
import bisect
import argparse
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from sacrebleu import corpus_bleu, sentence_bleu
import numpy as np
DB_PROMPT = {'restaurant': {'area': 'The area of restaurant is ',
'pricerange': 'The pricerange of restaurant is ',
'name': 'The name of restaurant is ',
'food': 'The food of restaurant is ',
'phone': 'The phone of restaurant is ',
'postcode': 'The postcode of restaurant is ',
'address': 'The address of restaurant is ',
'id': 'The id is '},
'attraction': {'name': 'The name of attraction is',
'type': 'The type of attraction is',
'area': 'The area of attraction is',
'phone': 'The phone of attraction is ',
'postcode': 'The postcode of attraction is ',
'address': 'The address of attraction is ',
'openhours': 'The openhours of attraction is ',
'entrance fee': 'The entrance fee of attraction is ',
'id': 'The id is '},
'train': {'departure': 'The departure of train is',
'destination': 'The destination of train is',
'leaveat': 'The leaveAt of train is',
'day': 'The day of train is',
'arriveby': 'The arriveBy of train is',
'trainid': 'The trainID of train is',
'duration': 'The duration of train is',
'price': 'The price of per ticket is '},
'hotel': {'stars': 'The stars of hotel is',
'pricerange': 'The pricerange of hotel is',
'name': 'The name of hotel is',
'area': 'The area of hotel is',
'type': 'The type of hotel is',
'address': 'The address of hotel is',
'postcode': 'The postcode of hotel is ',
'phone': 'The phone of hotel is ',
'internet': 'Whether hotel has internet ',
'parking': 'Whether hotel has parking ',
'id': 'The id is '},
'taxi': {'taxi_types': 'The car type of taxi is '},
'police': {'name': 'The name of police is ',
'address': 'The address of police is ',
'phone': 'The phone of police is '},
'hospital': {'department': 'The department of hospital is ',
'phone': 'The phone of hospital is '}}
def get_db_query(turn_dst):
key_list = list(turn_dst.keys())
db_query = defaultdict(list)
for idx, k in enumerate(tqdm(key_list)):
dial, turn = k.split('-')[0], k.split('-')[1]
if idx > 0:
last_key = key_list[idx - 1]
if dial == last_key.split('-')[0]:
for key, v in turn_dst[last_key].items():
domain = key.split('-')[0]
slot = key.split('-')[1]
if domain != 'profile' and slot in DB_PROMPT[domain] and turn_dst[k][key] != v:
db_query[dial].append(int(turn))
break
# print(turn_dst[last_key][key], turn_dst[k][key], key, k)
else:
db_query[last_key.split('-')[0]].append(int(last_key.split('-')[1]))
db_query['SNG0877'] = [25]
return db_query | null |
164,409 | import re
import ast
import json
import random
import bisect
import argparse
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from sacrebleu import corpus_bleu, sentence_bleu
import numpy as np
def get_query_db(db, query):
query_dict = defaultdict(dict)
for k, v in query.items():
cur = k.split('-')
domain, attribute = cur[0], cur[1]
query_dict[domain][attribute] = AliGN.get(v, v)
# print(query_dict)
result, db_id = {}, {}
for domain in query_dict.keys():
domain_data, domain_data_id = [], []
if domain != 'profile':
if domain == 'taxi':
domain_data.append({'taxi_colors': 'white',
'car types': "toyota",
'taxi_phone': '000000000000'})
else:
for line in db[domain].iterrows():
match = 1
for k in query_dict[domain]:
# print(query_dict[domain][k])
db_entry = {k.lower(): v for k, v in dict(line[1]).items()}
if k in ['leaveat', 'arriveby']:
try:
time_str_to_minutes(query_dict[domain][k])
except:
continue
if k == 'leaveat' and time_str_to_minutes(query_dict[domain][k]) > time_str_to_minutes(
db_entry[k]):
# print(time_str_to_minutes(query_dict[domain][k]), time_str_to_minutes(db_entry[k])
# print(query_dict[domain][k], db_entry[k])
match = 0
continue
elif k == 'arriveby' and time_str_to_minutes(query_dict[domain][k]) < time_str_to_minutes(
db_entry[k]):
# print(time_str_to_minutes(query_dict[domain][k]), time_str_to_minutes(db_entry[k]))
# print(query_dict[domain][k], db_entry[k])
match = 0
continue
else:
if k in DB_PROMPT[domain] and query_dict[domain][k] != str(db_entry[k]):
print(k, query_dict[domain][k], db_entry[k])
match = 0
if match:
# print({k:v for k, v in dict(line[1]).items() if k in DB_PROMPT[domain]})
entry = {}
for k, v in dict(db_entry).items():
if k in DB_PROMPT[domain]:
entry[k] = str(v).replace('.', '') if k == 'phone' else v
if domain in INFORM_DOMAIN:
entry_id = entry['trainid'] if domain == 'train' else entry['id']
domain_data_id.append(entry_id)
if args.e2e:
domain_data.append(entry)
break
else:
if not domain_data:
domain_data.append(entry)
if domain_data:
result[domain] = domain_data
db_id[domain] = domain_data_id
# result[domain] = domain_data[0]
return result, db_id
def get_db_result(args, db, turn_dst, db_query):
result = {}
for k, v in tqdm(db_query.items()):
for turn in v:
dst_key = k + '-' + str(turn)
dst = turn_dst[dst_key]
db_res = get_query_db(args, db, dst)
# print(db_res)
result[dst_key] = db_res
# break
return result | null |
164,410 | import re
import ast
import json
import random
import bisect
import argparse
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from sacrebleu import corpus_bleu, sentence_bleu
import numpy as np
INFORM_DOMAIN = ['restaurant', 'hotel', 'attraction', 'train']
inform_special_token = {'restaurant': '[res_name]', 'hotel': '[hot_name]',
'attraction': '[att_name]', 'train': '[tra_trainid]'}
def delex(db, resp, dial_id, turn_id, db_query):
if not db_query:
return '[]'
resp = resp.lower()
for domain, entry in db.items():
v2k, values = {}, []
for k, v in entry.items():
v2k[str(v).lower()] = k
values.append(v)
values.sort(key=lambda x:len(str(x)), reverse=True)
# print(v2k)
for v in values:
v = str(v).lower()
if v in ['yes', 'no']:
continue
start = resp.find(v)
if start != -1:
slot = v2k[v]
resp = resp[:start] + f'[{domain[:3]}_{slot}]' + resp[len(v)+start:]
if domain in inform_special_token and inform_special_token[domain] in resp:
if args.e2e:
gt_db_key = get_db_key(dial_id, turn_id, gt_db_query)
gt_db = gt_db_result_idset[gt_db_key]
db_key = get_db_key(dial_id, turn_id, db_query)
pred_db = db_result_id[db_key]
if domain not in gt_db or pred_db[domain][0] not in gt_db[domain]:
return '[]'
else:
pass
return resp
def get_db_key(dial_id, turn_id, db_query):
db_key = dial_id + '-' + str(turn_id)
dial_db = db_query[dial_id]
db_key = bisect.bisect_left(dial_db, int(turn_id))
db_key = dial_id + '-' + str(dial_db[db_key])
return db_key
def inform_success(dials, db_result, db_query):
sum_match = sum_succ = 0
db_wrong = 0
dials_num, turn_num = len(dials), 0
for dial_id, dial in dials.items():
inform_domain, success_slots, db_wrong_domains = set(), set(), set()
# 统计inform的domain和success的slots
for domain in dial['goal']:
if dial['goal'][domain]:
# print(domain)
if domain in INFORM_DOMAIN:
inform_domain.add(domain)
if 'reqt' in dial['goal'][domain]:
for i in dial['goal'][domain]['reqt']:
success_slots.add(f'{domain[:3]}_{i.lower()}')
# print(inform_domain, success_slots)
match_inform, match_success = set(), set()
for turn_id, turn in enumerate(dial['log']):
if turn['tag'] == 'system' and 'result' in turn:
# print(turn_id)
db_key = get_db_key(dial_id, turn_id, db_query)
gt_db_key = get_db_key(dial_id, turn_id, gt_db_query)
if gt_db_result[gt_db_key]:
gen = ' '.join(turn['result'].split(' '))
gen_resp = delex(db_result[db_key], gen, dial_id, turn_id, db_query) # 去词汇化后的回复
if gen_resp == '[]':
continue
# break
else:
continue
for domain in inform_domain:
if inform_special_token[domain] in gen_resp : # 判断是否有inform 若有匹配则从待匹配集合中删去
match_inform.add(domain)
for success_slot in success_slots: # 判断是否有匹配的success slot 若有匹配则从待匹配集合中删去
if success_slot in gen_resp:
match_success.add(success_slot)
inform = 1 if inform_domain == match_inform else 0 # 若匹配集合与待匹配集合相同 全部match 则为inform
success = 1 if inform and success_slots == match_success else 0 # 同上并增加判断是否inform
inform_success_result[dial_id] = [match_inform, inform_domain, match_success, success_slots]
sum_match += inform
sum_succ += success
# break
return sum_match / dials_num, sum_succ / dials_num, inform_success_result | null |
164,411 | import re
import ast
import json
import random
import bisect
import argparse
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from sacrebleu import corpus_bleu, sentence_bleu
import numpy as np
def get_db_key(dial_id, turn_id, db_query):
db_key = dial_id + '-' + str(turn_id)
dial_db = db_query[dial_id]
db_key = bisect.bisect_left(dial_db, int(turn_id))
db_key = dial_id + '-' + str(dial_db[db_key])
return db_key
def bleu_delex(db, resp, dial_id, turn_id):
resp = resp.lower()
for domain, entry in db.items():
for slot, v in entry.items():
v = str(v).lower()
if v in ['yes', 'no']:
continue
start = resp.find(v)
if start != -1:
resp = resp[:start] + f'[value_{slot}]' + resp[len(v)+start:]
if domain in inform_special_token and inform_special_token[domain] in resp:
pass
return resp
def get_bleu(dials, db_result, db_query):
sum_match = sum_succ = 0
gold_bleu = json.load(open('bleu_gold.json'))
dials_num, turn_num = len(dials), 0
all_gold_resps, all_pred_resps = {}, {}
for dial_id, dial in dials.items():
gold_dial = gold_bleu[dial_id.lower()]['log']
gold_resps, pred_resps = [], []
for turn_id, turn in enumerate(dial['log']):
if turn['tag'] == 'system' and 'result' in turn:
# print(type(gold_dial), (turn_id - 1) // 2)
gold_resp = gold_dial[(turn_id - 1) // 2]['resp']
# print(turn_id)
db_key = get_db_key(dial_id, turn_id, db_query)
if db_result[db_key]:
gen_resp = bleu_delex(db_result[db_key], turn['result'], dial_id, turn_id)
else:
gen_resp = turn['result']
gold_resps.append(gold_resp)
pred_resps.append(gen_resp)
all_gold_resps[dial_id] = gold_resps
all_pred_resps[dial_id] = pred_resps
return all_pred_resps, all_gold_resps | null |
164,412 | import re
import ast
import json
import random
import bisect
import argparse
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from sacrebleu import corpus_bleu, sentence_bleu
import numpy as np
def calculate_bleu(input_data, reference_dialogs):
all_bleu = 0
turn_all = 0
for dialog_id, dialog in tqdm(input_data.items()):
turn_all = turn_all + len(dialog)
for turn_idx in range(len(dialog)):
blue = sentence_bleu(input_data[dialog_id][turn_idx],[reference_dialogs[dialog_id][turn_idx]]).score
all_bleu = all_bleu + blue
all_bleu = all_bleu / turn_all
return all_bleu | null |
164,413 | import re
import ast
import json
import random
import bisect
import argparse
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
from sacrebleu import corpus_bleu, sentence_bleu
import numpy as np
INFORM_DOMAIN = ['restaurant', 'hotel', 'attraction', 'train']
def case_delex(db, resp, dial_id, turn_id):
resp = resp.lower()
for domain, entry in db.items():
for slot, v in entry.items():
v = str(v).lower()
if v in ['yes', 'no']:
continue
start = resp.find(v)
if start != -1:
resp = resp[:start] + f'[{domain[:3]}_{slot}]' + resp[len(v)+start:]
return resp
def case_study(dials, db_query, db_result, turn_dst, inform_success_result):
dial = {}
for k, v in dials.items():
inform_domain, success_slots = set(), set()
# 统计inform的domain和success的slots
for domain in v['goal']:
if v['goal'][domain]:
# print(domain)
if domain in INFORM_DOMAIN:
inform_domain.add(domain)
if 'reqt' in v['goal'][domain]:
for i in v['goal'][domain]['reqt']:
success_slots.add(f'{domain[:3]}_{i}')
# print(inform_domain, success_slots)
utts = [inform_success_result[k]]
for turn_id, utt in enumerate(v['log']):
if utt['tag'] == 'user':
utts.append('USER:'+utt['text'])
else:
db_key = k + '-' + str(turn_id)
dial_db = db_query[k]
# print(dialogue_index, dial_db)
db_key = bisect.bisect_left(dial_db, int(turn_id))
# print(dialogue_index, turn_index, dial_db[db_key])
db_key = k + '-' + str(dial_db[db_key])
utts.append('SYSTEM:' + case_delex(db_result[db_key], utt['result'], k, turn_id))
# utts.append('dst:' + str(turn_dst[db_key]))
utts.append('db:' + str(db_result[db_key]))
dial[k] = utts
return dial | null |
164,414 | from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Model
from eval import MultiWozEvaluator
from damd_net import DAMD, cuda_, get_one_hot_input
from reader import MultiWozReader
import utils
from torch.optim import Adam
import torch
import torch.nn as nn
import os
import random
import argparse
import time
import logging
import json
import tqdm
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from config import global_config as cfg
import warnings
def parse_arg_cfg(args):
# add args to cfg
if args.cfg:
for pair in args.cfg:
k, v = tuple(pair.split('='))
dtype = type(getattr(cfg, k))
if dtype == type(None):
raise ValueError()
if dtype is bool:
v = False if v == 'False' else True
elif dtype is list:
v = v.split(',')
if k == 'cuda_device':
v = [int(no) for no in v]
else:
v = dtype(v)
setattr(cfg, k, v)
return | null |
164,415 | import json
from sklearn.metrics import f1_score, accuracy_score
import sys
import numpy as np
from dst import ignore_none, default_cleaning, IGNORE_TURNS_TYPE2, paser_bs
import argparse
IGNORE_TURNS_TYPE2 = \
{
'PMUL1812': [1, 2]
}
def paser_bs(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
# print('sent',sent)
# print('domain',domain)
# print('sub_span',sub_span)
# print('sub_s_idx',sub_s_idx)
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
#print('bs',bs)
belief_state.append(bs)
return list(set(belief_state))
def ignore_none(pred_belief, target_belief):
for pred in pred_belief:
if 'catherine s' in pred:
pred.replace('catherine s', 'catherines')
clean_target_belief = []
clean_pred_belief = []
for bs in target_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_target_belief.append(bs)
for bs in pred_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_pred_belief.append(bs)
dontcare_slots = []
for bs in target_belief:
if 'dontcare' in bs:
domain = bs.split()[0]
slot = bs.split()[1]
dontcare_slots.append('{}_{}'.format(domain, slot))
target_belief = clean_target_belief
pred_belief = clean_pred_belief
return pred_belief, target_belief
def default_cleaning(pred_belief, target_belief):
pred_belief_jason = []
target_belief_jason = []
for pred in pred_belief:
if pred in ['', ' ']:
continue
domain = pred.split()[0]
if 'book' in pred:
slot = ' '.join(pred.split()[1:3])
val = ' '.join(pred.split()[3:])
else:
slot = pred.split()[1]
val = ' '.join(pred.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
pred_belief_jason.append('{} {} {}'.format(domain, slot, val))
for tgt in target_belief:
domain = tgt.split()[0]
if 'book' in tgt:
slot = ' '.join(tgt.split()[1:3])
val = ' '.join(tgt.split()[3:])
else:
slot = tgt.split()[1]
val = ' '.join(tgt.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
target_belief_jason.append('{} {} {}'.format(domain, slot, val))
turn_pred = pred_belief_jason
turn_target = target_belief_jason
return turn_pred, turn_target
def compute_jacc(data,path,default_cleaning_flag=True,type2_cleaning_flag=False):
num_turns = 0
joint_acc = 0
joint_acc_wo_cross = 0
error = {}
clean_tokens = ['<|endoftext|>', ]
dict_slot_acc_right = {}
dict_slot_acc_all = {}
dict_rate = {}
for file_name in data:
last_turn_flag = 0
for turn_id, turn_data in data[file_name].items():
turn_target = turn_data['bspn']
turn_pred = turn_data['bspn_gen']
turn_target = paser_bs(turn_target)
turn_pred = paser_bs(turn_pred)
# clean
for bs in turn_pred:
if bs in clean_tokens + ['', ' '] or bs.split()[-1] == 'none':
turn_pred.remove(bs)
new_turn_pred = []
for bs in turn_pred:
for tok in clean_tokens:
bs = bs.replace(tok, '').strip()
new_turn_pred.append(bs)
turn_pred = new_turn_pred
turn_pred, turn_target = ignore_none(turn_pred, turn_target)
# MultiWOZ default cleaning
if default_cleaning_flag:
turn_pred, turn_target = default_cleaning(turn_pred, turn_target)
if turn_id + 1 not in data[file_name].keys():
for domain_slot_value in turn_target:
domain = domain_slot_value.split()[0]
slot = domain_slot_value.split()[1]
if domain + '-' + slot in dict_slot_acc_all.keys():
dict_slot_acc_all[domain + '-' + slot] = dict_slot_acc_all[domain + '-' + slot] + 1
else:
dict_slot_acc_all[domain + '-' + slot] = 1
# print(turn_pred)
for pred_domain_slot_value in turn_pred:
if pred_domain_slot_value in set(turn_target):
domain = pred_domain_slot_value.split()[0]
slot = pred_domain_slot_value.split()[1]
if domain + '-' + slot in dict_slot_acc_right.keys():
dict_slot_acc_right[domain + '-' + slot] = dict_slot_acc_right[domain + '-' + slot] + 1
else:
dict_slot_acc_right[domain + '-' + slot] = 1
else:
pass
for domain_slot in dict_slot_acc_right.keys():
# print(domain_slot)
dict_rate[domain_slot] = dict_slot_acc_right[domain_slot] / dict_slot_acc_all[domain_slot]
join_flag = False
turn_pred_wo_cross = []
turn_target_wo_cross = []
for item in turn_pred:
if '[profile]' not in item:
turn_pred_wo_cross.append(item)
else:
pass
for item in turn_target:
if '[profile]' not in item:
turn_target_wo_cross.append(item)
else:
pass
if set(turn_target_wo_cross) == set(turn_pred_wo_cross):
joint_acc_wo_cross += 1
join_flag = True
elif type2_cleaning_flag: # check for possible Type 2 noisy annotations
flag = True
for bs in turn_target_wo_cross:
if bs not in turn_pred_wo_cross:
flag = False
break
if flag:
for bs in turn_pred_wo_cross:
if bs not in turn_target_wo_cross:
flag = False
break
if flag: # model prediction might be correct if found in Type 2 list of noisy annotations
dial_name = dial.split('.')[0]
if dial_name in IGNORE_TURNS_TYPE2 and turn_id in IGNORE_TURNS_TYPE2[dial_name]: # ignore these turns
pass
else:
joint_acc_wo_cross += 1
# print('turn_pred ',set(turn_pred))
# print('turn_target',set(turn_target))
# print('turn_pred_wo_cross',set(turn_pred_wo_cross))
# print('turn_target_wo_cross',set(turn_target_wo_cross))
if set(turn_target) == set(turn_pred):
joint_acc += 1
join_flag = True
elif type2_cleaning_flag: # check for possible Type 2 noisy annotations
flag = True
for bs in turn_target:
if bs not in turn_pred:
flag = False
break
if flag:
for bs in turn_pred:
if bs not in turn_target:
flag = False
break
if flag: # model prediction might be correct if found in Type 2 list of noisy annotations
dial_name = dial.split('.')[0]
if dial_name in IGNORE_TURNS_TYPE2 and turn_id in IGNORE_TURNS_TYPE2[dial_name]: # ignore these turns
pass
else:
joint_acc += 1
join_flag = True
if not join_flag:
if file_name not in error:
error[file_name] = {}
turn_data['gtbs'] = turn_target
turn_data['predbs'] = turn_pred
error[file_name][turn_id] = turn_data
num_turns += 1
joint_acc /= num_turns
joint_acc_wo_cross /= num_turns
print('joint accuracy: {}'.format(joint_acc))
print('joint accuracy_wo_cross: {}'.format(joint_acc_wo_cross))
with open('bs_error.json',"w") as f:
json.dump(error,f,indent=2)
return joint_acc, joint_acc_wo_cross, dict_rate | null |
164,416 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
The provided code snippet includes necessary dependencies for implementing the `top_k_top_p_filtering` function. Write a Python function `def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf'))` to solve the following problem:
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Here is the function:
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[
0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(
logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[...,
1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits | Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 |
164,417 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
def py2np(list):
return np.array(list) | null |
164,418 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
def write_dict(fn, dic):
with open(fn, 'w') as f:
json.dump(dic, f, indent=2) | null |
164,419 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
def f1_score(label_list, pred_list):
tp = len([t for t in pred_list if t in label_list])
fp = max(0, len(pred_list) - tp)
fn = max(0, len(label_list) - tp)
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
f1 = 2 * precision * recall / (precision + recall + 1e-10)
return f1 | null |
164,420 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
def padSeqs_gpt(sequences, pad_id, maxlen=None):
lengths = []
for x in sequences:
lengths.append(len(x))
num_samples = len(sequences)
seq_mexlen = np.max(lengths)
# maxlen = 1024
if seq_mexlen > 1024: # gpt2.n_ctx
# print('maxlen exceeds 1024')
maxlen = 1024
else:
maxlen = seq_mexlen
# tokenizer.encode('<|endoftext|>') = ['50256']
# All labels set to ``-100`` are ignored (masked), the loss is only
# computed for labels in ``[0, ..., config.vocab_size]`` (from modeling_gpt2.GPT2LMHeadModel)
x = (np.ones((num_samples, maxlen)) * pad_id)
for idx, s in enumerate(sequences):
if not len(s):
print('empty list was found in padSeqs')
# trunc method = 'pre'
trunc = s[-maxlen:]
trunc = np.asarray(trunc)
# pad method = 'post'
x[idx, :len(trunc)] = trunc
return x, lengths | null |
164,421 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
def padSeqs(sequences, maxlen=None, truncated = False, pad_method='post',
trunc_method='pre', dtype='int32', value=0.):
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
lengths = []
for x in sequences:
if not hasattr(x, '__len__'):
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
lengths.append(len(x))
num_samples = len(sequences)
seq_maxlen = np.max(lengths)
if maxlen is not None and truncated:
maxlen = min(seq_maxlen, maxlen)
else:
maxlen = seq_maxlen
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if not len(s):
print('empty list/array was found')
continue # empty list/array was found
if trunc_method == 'pre':
trunc = s[-maxlen:]
elif trunc_method == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % trunc_method)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if pad_method == 'post':
x[idx, :len(trunc)] = trunc
elif pad_method == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % pad_method)
return x | null |
164,422 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
The provided code snippet includes necessary dependencies for implementing the `get_glove_matrix` function. Write a Python function `def get_glove_matrix(glove_path, vocab, initial_embedding_np)` to solve the following problem:
return a glove embedding matrix :param self: :param glove_file: :param initial_embedding_np: :return: np array of [V,E]
Here is the function:
def get_glove_matrix(glove_path, vocab, initial_embedding_np):
"""
return a glove embedding matrix
:param self:
:param glove_file:
:param initial_embedding_np:
:return: np array of [V,E]
"""
ef = open(glove_path, 'r', encoding='UTF-8')
cnt = 0
vec_array = initial_embedding_np
old_avg = np.average(vec_array)
old_std = np.std(vec_array)
vec_array = vec_array.astype(np.float32)
new_avg, new_std = 0, 0
for line in ef.readlines():
line = line.strip().split(' ')
word, vec = line[0], line[1:]
vec = np.array(vec, np.float32)
if not vocab.has_word(word):
continue
word_idx = vocab.encode(word)
if word_idx <vocab.vocab_size:
cnt += 1
vec_array[word_idx] = vec
new_avg += np.average(vec)
new_std += np.std(vec)
new_avg /= cnt
new_std /= cnt
ef.close()
logging.info('%d known embedding. old mean: %f new mean %f, old std %f new std %f' % (cnt, old_avg,
new_avg, old_std, new_std))
return vec_array | return a glove embedding matrix :param self: :param glove_file: :param initial_embedding_np: :return: np array of [V,E] |
164,423 | import logging
import json
import numpy as np
from collections import OrderedDict
import ontology
def position_encoding_init(self, n_position, d_pos_vec):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc | null |
164,424 | import json, os, re, copy, zipfile
import spacy
import ontology, utils
from collections import OrderedDict
from tqdm import tqdm
from config import global_config as cfg
from db_ops import MultiWozDB
from clean_dataset import clean_slot_values, clean_text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value
def get_db_values(value_set_path): # value_set.json, all the domain[slot] values in datasets
processed = {}
bspn_word = []
nlp = spacy.load('en_core_web_sm')
with open(value_set_path, 'r') as f: # read value set file in lower
value_set = json.loads(f.read().lower())
with open('db/ontology.json', 'r') as f: # read ontology in lower, all the domain-slot values
otlg = json.loads(f.read().lower())
for domain, slots in value_set.items(): # add all informable slots to bspn_word, create lists holder for values
processed[domain] = {}
bspn_word.append('['+domain+']')
for slot, values in slots.items():
s_p = ontology.normlize_slot_names.get(slot, slot)
if s_p in ontology.informable_slots[domain]:
bspn_word.append(s_p)
processed[domain][s_p] = []
for domain, slots in value_set.items(): # add all words of values of informable slots to bspn_word
for slot, values in slots.items():
s_p = ontology.normlize_slot_names.get(slot, slot)
if s_p in ontology.informable_slots[domain]:
for v in values:
_, v_p = clean_slot_values(domain, slot, v)
v_p = ' '.join([token.text for token in nlp(v_p)]).strip()
processed[domain][s_p].append(v_p)
for x in v_p.split():
if x not in bspn_word:
bspn_word.append(x)
for domain_slot, values in otlg.items(): # split domain-slots to domains and slots
domain, slot = domain_slot.split('-')
if domain == 'profile':
continue
if domain == 'bus':
domain = 'taxi'
if slot == 'price range':
slot = 'pricerange'
if slot == 'book stay':
slot = 'stay'
if slot == 'book day':
slot = 'day'
if slot == 'book people':
slot = 'people'
if slot == 'book time':
slot = 'time'
if slot == 'arrive by':
slot = 'arrive'
if slot == 'leave at':
slot = 'leave'
if slot == 'leaveat':
slot = 'leave'
if slot not in processed[domain]: # add all slots and words of values if not already in processed and bspn_word
processed[domain][slot] = []
bspn_word.append(slot)
for v in values:
_, v_p = clean_slot_values(domain, slot, v)
v_p = ' '.join([token.text for token in nlp(v_p)]).strip()
if v_p not in processed[domain][slot]:
processed[domain][slot].append(v_p)
for x in v_p.split():
if x not in bspn_word:
bspn_word.append(x)
with open(value_set_path.replace('.json', '_processed.json'), 'w') as f:
json.dump(processed, f, indent=2) # save processed.json
with open('data/multi-woz-processed/bspn_word_collection.json', 'w') as f:
json.dump(bspn_word, f, indent=2) # save bspn_word
print('DB value set processed! ') | null |
164,425 | import json, os, re, copy, zipfile
import spacy
import ontology, utils
from collections import OrderedDict
from tqdm import tqdm
from config import global_config as cfg
from db_ops import MultiWozDB
from clean_dataset import clean_slot_values, clean_text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value
def preprocess_db(db_paths): # apply clean_slot_values to all dbs
dbs = {}
nlp = spacy.load('en_core_web_sm')
for domain in ontology.all_domains:
if domain != 'profile': #修改db
with open(db_paths[domain], 'r') as f: # for every db_domain, read json file
dbs[domain] = json.loads(f.read().lower())
for idx, entry in enumerate(dbs[domain]): # entry has information about slots of said domain
new_entry = copy.deepcopy(entry)
for key, value in entry.items(): # key = slot
if type(value) is not str:
continue
del new_entry[key]
key, value = clean_slot_values(domain, key, value)
tokenize_and_back = ' '.join([token.text for token in nlp(value)]).strip()
new_entry[key] = tokenize_and_back
dbs[domain][idx] = new_entry
with open(db_paths[domain].replace('.json', '_processed.json'), 'w') as f:
json.dump(dbs[domain], f, indent=2)
# print('[%s] DB processed! '%domain) | null |
164,426 | import re
import ontology
def my_clean_text(text):
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
return text | null |
164,427 | import re
import ontology
def clean_text(text):
text = text.strip()
text = text.lower()
text = text.replace(u"’", "'")
text = text.replace(u"‘", "'")
text = text.replace(';', ',')
text = text.replace('"', ' ')
text = text.replace('/', ' and ')
text = text.replace("don't", "do n't")
text = clean_time(text)
baddata = { r'c\.b (\d), (\d) ([a-z])\.([a-z])': r'cb\1\2\3\4',
'c.b. 1 7 d.y': 'cb17dy',
'c.b.1 7 d.y': 'cb17dy',
'c.b 25, 9 a.q': 'cb259aq',
'isc.b 25, 9 a.q': 'is cb259aq',
'c.b2, 1 u.f': 'cb21uf',
'c.b 1,2 q.a':'cb12qa',
'0-122-336-5664': '01223365664',
'postcodecb21rs': 'postcode cb21rs',
r'i\.d': 'id',
' i d ': 'id',
'Telephone:01223358966': 'Telephone: 01223358966',
'depature': 'departure',
'depearting': 'departing',
'-type': ' type',
r"b[\s]?&[\s]?b": "bed and breakfast",
"b and b": "bed and breakfast",
r"guesthouse[s]?": "guest house",
r"swimmingpool[s]?": "swimming pool",
"wo n\'t": "will not",
" \'d ": " would ",
" \'m ": " am ",
" \'re' ": " are ",
" \'ll' ": " will ",
" \'ve ": " have ",
r'^\'': '',
r'\'$': '',
}
for tmpl, good in baddata.items():
text = re.sub(tmpl, good, text)
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
with open('data/multi-woz/mapping.pair', 'r') as fin:
for line in fin.readlines():
fromx, tox = line.replace('\n', '').split('\t')
text = ' ' + text + ' '
text = text.replace(' ' + fromx + ' ', ' ' + tox + ' ')[1:-1]
return text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value | null |
164,428 | import os, json, copy, re, zipfile
from collections import OrderedDict
from ontology import all_domains
data_path = './data/multi-woz/'
save_path = './data/multi-woz-analysis/'
save_path_exp = './data/multi-woz-processed/'
data_file = 'data.json'
domains = all_domains
def analysis():
compressed_raw_data = {}
goal_of_dials = {}
req_slots = {}
info_slots = {}
dom_count = {}
dom_fnlist = {}
all_domain_specific_slots = set()
for domain in domains:
req_slots[domain] = []
info_slots[domain] = []
archive = zipfile.ZipFile(data_path+data_file+'.zip', 'r')
data = archive.open(data_file, 'r').read().decode('utf-8').lower()
ref_nos = list(set(re.findall(r'\"reference\"\: \"(\w+)\"', data)))
data = json.loads(data)
for fn, dial in data.items():
goals = dial['goal']
if 'log' in dial.keys():
pass
else:
continue
logs = dial['log']
# get compressed_raw_data and goal_of_dials
compressed_raw_data[fn] = {'goal': {}, 'log': []}
goal_of_dials[fn] = {}
for dom, goal in goals.items(): # get goal of domains that are in demmand
# print(dom)
if dom != 'topic' and dom != 'message' and goal:
compressed_raw_data[fn]['goal'][dom] = goal
goal_of_dials[fn][dom] = goal
for turn in logs:
if not turn['metadata']: # user's turn
compressed_raw_data[fn]['log'].append({'text': turn['text']})
else: # system's turn
meta = turn['metadata']
turn_dict = {'text': turn['text'], 'metadata': {}}
for dom, book_semi in meta.items(): # for every domain, sys updates "book" and "semi"
book, semi = book_semi['book'], book_semi['semi']
record = False
for slot, value in book.items(): # record indicates non-empty-book domain
if value not in ['', []]:
record = True
if record:
turn_dict['metadata'][dom] = {}
turn_dict['metadata'][dom]['book'] = book # add that domain's book
record = False
for slot, value in semi.items(): # here record indicates non-empty-semi domain
if value not in ['', []]:
record = True
break
if record:
for s, v in copy.deepcopy(semi).items():
if v == 'not mentioned':
del semi[s]
if not turn_dict['metadata'].get(dom):
turn_dict['metadata'][dom] = {}
turn_dict['metadata'][dom]['semi'] = semi # add that domain's semi
compressed_raw_data[fn]['log'].append(turn_dict) # add to log the compressed turn_dict
# get domain statistics
dial_type = 'multi' if 'mul' in fn or 'MUL' in fn else 'single' # determine the dialog's type: sinle or multi
if fn in ['pmul2756.json', 'pmul4958.json', 'pmul3599.json']:
dial_type = 'single'
dial_domains = [dom for dom in domains if goals[dom]] # domains that are in demmand
dom_str = ''
for dom in dial_domains:
if not dom_count.get(dom+'_'+dial_type): # count each domain type, with single or multi considered
dom_count[dom+'_'+dial_type] = 1
else:
dom_count[dom+'_'+dial_type] += 1
if not dom_fnlist.get(dom+'_'+dial_type): # keep track the file number of each domain type
dom_fnlist[dom+'_'+dial_type] = [fn]
else:
dom_fnlist[dom+'_'+dial_type].append(fn)
dom_str += '%s_'%dom
dom_str = dom_str[:-1] # substract the last char in dom_str
if dial_type=='multi': # count multi-domains
if not dom_count.get(dom_str):
dom_count[dom_str] = 1
else:
dom_count[dom_str] += 1
if not dom_fnlist.get(dom_str):
dom_fnlist[dom_str] = [fn]
else:
dom_fnlist[dom_str].append(fn)
######
# get informable and requestable slots statistics
# print(domains)
for domain in domains:
info_ss = goals[domain].get('info', {})
book_ss = goals[domain].get('book', {})
req_ss = goals[domain].get('reqt', {})
# profile_ss = goal
for info_s in info_ss:
all_domain_specific_slots.add(domain+'-'+info_s)
if info_s not in info_slots[domain]:
info_slots[domain]+= [info_s]
for book_s in book_ss:
if 'book_' + book_s not in info_slots[domain] and book_s not in ['invalid', 'pre_invalid']:
all_domain_specific_slots.add(domain+'-'+book_s)
info_slots[domain]+= ['book_' + book_s]
for req_s in req_ss:
if req_s not in req_slots[domain]:
req_slots[domain]+= [req_s]
# result statistics
if not os.path.exists(save_path):
os.mkdir(save_path)
if not os.path.exists(save_path_exp):
os.mkdir(save_path_exp)
with open(save_path+'req_slots.json', 'w') as sf:
json.dump(req_slots,sf,indent=2)
with open(save_path+'info_slots.json', 'w') as sf:
json.dump(info_slots,sf,indent=2)
with open(save_path+'all_domain_specific_info_slots.json', 'w') as sf:
json.dump(list(all_domain_specific_slots),sf,indent=2)
print("slot num:", len(list(all_domain_specific_slots)))
with open(save_path+'goal_of_each_dials.json', 'w') as sf:
json.dump(goal_of_dials, sf, indent=2)
with open(save_path+'compressed_data.json', 'w') as sf:
json.dump(compressed_raw_data, sf, indent=2)
with open(save_path + 'domain_count.json', 'w') as sf:
single_count = [d for d in dom_count.items() if 'single' in d[0]]
multi_count = [d for d in dom_count.items() if 'multi' in d[0]]
other_count = [d for d in dom_count.items() if 'multi' not in d[0] and 'single' not in d[0]]
dom_count_od = OrderedDict(single_count+multi_count+other_count)
json.dump(dom_count_od, sf, indent=2)
with open(save_path_exp + 'reference_no.json', 'w') as sf:
json.dump(ref_nos,sf,indent=2)
with open(save_path_exp + 'domain_files.json', 'w') as sf:
json.dump(dom_fnlist, sf, indent=2) | null |
164,429 | from eval import MultiWozEvaluator
from damd_net import DAMD, cuda_, get_one_hot_input
from dst_reader import MultiWozReader
from config import global_config as cfg
import utils
from torch.optim import Adam
import torch
import torch.nn as nn
import os
import random
import argparse
import time
import logging
import json
import tqdm
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from compute_joint_acc import compute_jacc
import warnings
import collections
from dst import default_cleaning, IGNORE_TURNS_TYPE2, paser_bs
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Model, AutoTokenizer,AutoModel,AutoConfig
def parse_arg_cfg(args):
# add args to cfg
if args.cfg:
for pair in args.cfg:
k, v = tuple(pair.split('='))
dtype = type(getattr(cfg, k))
if dtype == type(None):
raise ValueError()
if dtype is bool:
v = False if v == 'False' else True
elif dtype is list:
v = v.split(',')
if k == 'cuda_device':
v = [int(no) for no in v]
else:
v = dtype(v)
setattr(cfg, k, v)
return | null |
164,430 | import copy, operator
from queue import PriorityQueue
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import Variable
from torch.distributions import Categorical
import utils
from config import global_config as cfg
def init_gru(gru):
def weight_reset(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.reset_parameters()
gru.apply(weight_reset)
# gru.reset_parameters()
for _, hh, _, _ in gru.all_weights:
for i in range(0, hh.size(0), gru.hidden_size):
torch.nn.init.orthogonal_(hh[i : i + gru.hidden_size], gain=1) | null |
164,431 | import copy, operator
from queue import PriorityQueue
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import Variable
from torch.distributions import Categorical
import utils
from config import global_config as cfg
def cuda_(var):
# cfg.cuda_device[0]
return var.cuda() if cfg.cuda else var
def label_smoothing(labels, smoothing_rate, vocab_size_oov):
with torch.no_grad():
confidence = 1.0 - smoothing_rate
low_confidence = (1.0 - confidence) / labels.new_tensor(vocab_size_oov - 1)
y_tensor = labels.data if isinstance(labels, Variable) else labels
y_tensor = y_tensor.type(torch.LongTensor).contiguous().view(-1, 1)
n_dims = vocab_size_oov
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).fill_(low_confidence).scatter_(1, y_tensor, confidence)
y_one_hot = cuda_(y_one_hot.view(*labels.shape, -1))
return y_one_hot | null |
164,432 | import copy, operator
from queue import PriorityQueue
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import Variable
from torch.distributions import Categorical
import utils
from config import global_config as cfg
def cuda_(var):
# cfg.cuda_device[0]
return var.cuda() if cfg.cuda else var
The provided code snippet includes necessary dependencies for implementing the `get_final_scores` function. Write a Python function `def get_final_scores(raw_scores, word_onehot_input, input_idx_oov, vocab_size_oov)` to solve the following problem:
:param raw_scores: list of tensor of size [B, Tdec, V], [B, Tdec, Tenc1], [B, Tdec, Tenc1] ... :param word_onehot_input: list of nparray of size [B, Tenci, V+Tenci] :param input_idx_oov: list of nparray of size [B, Tenc] :param vocab_size_oov: :returns: tensor of size [B, Tdec, vocab_size_oov]
Here is the function:
def get_final_scores(raw_scores, word_onehot_input, input_idx_oov, vocab_size_oov):
"""
:param raw_scores: list of tensor of size [B, Tdec, V], [B, Tdec, Tenc1], [B, Tdec, Tenc1] ...
:param word_onehot_input: list of nparray of size [B, Tenci, V+Tenci]
:param input_idx_oov: list of nparray of size [B, Tenc]
:param vocab_size_oov:
:returns: tensor of size [B, Tdec, vocab_size_oov]
"""
for idx, raw_sc in enumerate(raw_scores):
if idx==0: continue
one_hot = word_onehot_input[idx-1] #[B, Tenc_i, V+Tenc_i]
cps = torch.einsum('imj,ijn->imn', raw_sc, one_hot) #[B, Tdec, V+Tenc_i]
# cps[cps==0] = -1e20 # zero prob -> -inf log prob
raw_scores[idx] = cps
cum_idx = [score.size(2) for score in raw_scores]
for i in range(len(cum_idx) - 1):
cum_idx[i + 1] += cum_idx[i]
cum_idx.insert(0, 0)
logsoftmax = torch.nn.LogSoftmax(dim=2)
normalized_scores = logsoftmax(torch.cat(raw_scores, dim=2)) #[B,Tdec, V+V+Tenc1+V+Tenc2+...]
# print(normalized_scores.size())
# print('normalized_gen_scores:' , normalized_scores.cpu().detach().numpy()[0,:5, 0:40])
gen_score = normalized_scores[:, :, cum_idx[0]:cum_idx[1]] # [B, Tdec, V]
Tdec = gen_score.size(1)
B = gen_score.size(0)
V = gen_score.size(2)
total_score = cuda_(torch.zeros(B, Tdec, vocab_size_oov)).fill_(-1e20) # [B, Tdec, vocab_size_oov]
c_to_g_scores = []
for i in range(1, len(cum_idx) - 1):
cps = normalized_scores[:, :, cum_idx[i]:cum_idx[i+1]] #[B, Tdec, V+Tenc_i]
# print('normalized_cp_scores:' , cps.cpu().detach().numpy()[0,:5, 0:40])
c_to_g_scores.append(cps[:, :, :V])
cp_score = cps[:, :, V:]
avail_copy_idx = (input_idx_oov[i-1]>=V).nonzero()
# print(len(copy_idx))
for idx in avail_copy_idx:
b, t = idx[0], idx[1]
ts = total_score[b, :, input_idx_oov[i-1][b, t]].view(Tdec,1)
cs = cp_score[b, :, t].view(Tdec,1)
total_score[b, :, input_idx_oov[i-1][b, t]] = torch.logsumexp(torch.cat([ts, cs], 1), 1)
gen_score = torch.logsumexp(torch.stack([gen_score] + c_to_g_scores, 3), 3)
total_score[:, :, :V] = gen_score
# print('total_score:' , total_score.cpu().detach().numpy()[0,:3, 0:40])
return total_score.contiguous() #[B, Tdec, vocab_size_oov] | :param raw_scores: list of tensor of size [B, Tdec, V], [B, Tdec, Tenc1], [B, Tdec, Tenc1] ... :param word_onehot_input: list of nparray of size [B, Tenci, V+Tenci] :param input_idx_oov: list of nparray of size [B, Tenc] :param vocab_size_oov: :returns: tensor of size [B, Tdec, vocab_size_oov] |
164,433 | import copy, operator
from queue import PriorityQueue
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import Variable
from torch.distributions import Categorical
import utils
from config import global_config as cfg
def cuda_(var):
def get_one_hot_input(x_input_np):
def update_input(name, inputs):
inputs[name+'_unk_np'] = copy.deepcopy(inputs[name+'_np'])
inputs[name+'_unk_np'][inputs[name+'_unk_np']>=cfg.vocab_size] = 2 # <unk>
inputs[name+'_onehot'] = get_one_hot_input(inputs[name+'_unk_np'])
inputs[name] = cuda_(torch.from_numpy(inputs[name+'_unk_np']).long())
inputs[name+'_nounk'] = cuda_(torch.from_numpy(inputs[name+'_np']).long()) | null |
164,451 | import os
import random
from collections import OrderedDict, defaultdict
from itertools import chain
import json
import sqlite3 as sql
import spacy
import numpy as np
from tqdm import tqdm
from nltk.tokenize import word_tokenize as nltk_word_tokenize
from nltk.stem import WordNetLemmatizer
from space.args import str2bool
from space.data.tokenizer import Tokenizer
from space.utils import ontology, utils
from space.utils.db_ops import MultiWozDB
from space.utils.ontologies import CamRest676Ontology, KvretOntology
def max_lens(X):
lens = [len(X)]
while isinstance(X[0], list):
lens.append(max(map(len, X)))
X = [x for xs in X for x in xs]
return lens
def list2np(X, padding=0, dtype="int64"):
shape = max_lens(X)
ret = np.full(shape, padding, dtype=np.int32)
if len(shape) == 1:
ret = np.array(X)
elif len(shape) == 2:
for i, x in enumerate(X):
ret[i, :len(x)] = np.array(x)
elif len(shape) == 3:
for i, xs in enumerate(X):
for j, x in enumerate(xs):
ret[i, j, :len(x)] = np.array(x)
return ret.astype(dtype) | null |
164,456 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import torch.nn as nn
import math
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from dst import default_cleaning, IGNORE_TURNS_TYPE2, paser_bs,ignore_none
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
from transformers import Wav2Vec2Processor
IGNORE_TURNS_TYPE2 = \
{
'PMUL1812': [1, 2]
}
def paser_bs(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
# print('sent',sent)
# print('domain',domain)
# print('sub_span',sub_span)
# print('sub_s_idx',sub_s_idx)
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
#print('bs',bs)
belief_state.append(bs)
return list(set(belief_state))
def ignore_none(pred_belief, target_belief):
for pred in pred_belief:
if 'catherine s' in pred:
pred.replace('catherine s', 'catherines')
clean_target_belief = []
clean_pred_belief = []
for bs in target_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_target_belief.append(bs)
for bs in pred_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_pred_belief.append(bs)
dontcare_slots = []
for bs in target_belief:
if 'dontcare' in bs:
domain = bs.split()[0]
slot = bs.split()[1]
dontcare_slots.append('{}_{}'.format(domain, slot))
target_belief = clean_target_belief
pred_belief = clean_pred_belief
return pred_belief, target_belief
def default_cleaning(pred_belief, target_belief):
pred_belief_jason = []
target_belief_jason = []
for pred in pred_belief:
if pred in ['', ' ']:
continue
domain = pred.split()[0]
if 'book' in pred:
slot = ' '.join(pred.split()[1:3])
val = ' '.join(pred.split()[3:])
else:
slot = pred.split()[1]
val = ' '.join(pred.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
pred_belief_jason.append('{} {} {}'.format(domain, slot, val))
for tgt in target_belief:
domain = tgt.split()[0]
if 'book' in tgt:
slot = ' '.join(tgt.split()[1:3])
val = ' '.join(tgt.split()[3:])
else:
slot = tgt.split()[1]
val = ' '.join(tgt.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
target_belief_jason.append('{} {} {}'.format(domain, slot, val))
turn_pred = pred_belief_jason
turn_target = target_belief_jason
return turn_pred, turn_target
def compute_jacc(data,default_cleaning_flag=True,type2_cleaning_flag=False):
num_turns = 0
joint_acc = 0
error = {}
clean_tokens = ['<|endoftext|>', ]
dict_slot_acc_right = {}
dict_slot_acc_all = {}
dict_rate = {}
for file_name in data:
last_turn_flag = 0
for turn_id, turn_data in data[file_name].items():
turn_target = turn_data['bspn']
turn_pred = turn_data['bspn_gen']
turn_target = paser_bs(turn_target)
turn_pred = paser_bs(turn_pred)
# clean
for bs in turn_pred:
if bs in clean_tokens + ['', ' '] or bs.split()[-1] == 'none':
turn_pred.remove(bs)
new_turn_pred = []
for bs in turn_pred:
for tok in clean_tokens:
bs = bs.replace(tok, '').strip()
new_turn_pred.append(bs)
turn_pred = new_turn_pred
turn_pred, turn_target = ignore_none(turn_pred, turn_target)
# MultiWOZ default cleaning
if default_cleaning_flag:
turn_pred, turn_target = default_cleaning(turn_pred, turn_target)
if turn_id + 1 not in data[file_name].keys():
for domain_slot_value in turn_target:
domain = domain_slot_value.split()[0]
slot = domain_slot_value.split()[1]
if domain + '-' + slot in dict_slot_acc_all.keys():
dict_slot_acc_all[domain + '-' + slot] = dict_slot_acc_all[domain + '-' + slot] + 1
else:
dict_slot_acc_all[domain + '-' + slot] = 1
for pred_domain_slot_value in turn_pred:
if pred_domain_slot_value in set(turn_target):
domain = pred_domain_slot_value.split()[0]
slot = pred_domain_slot_value.split()[1]
if domain + '-' + slot in dict_slot_acc_right.keys():
dict_slot_acc_right[domain + '-' + slot] = dict_slot_acc_right[domain + '-' + slot] + 1
else:
dict_slot_acc_right[domain + '-' + slot] = 1
else:
pass
for domain_slot in dict_slot_acc_right.keys():
dict_rate[domain_slot] = dict_slot_acc_right[domain_slot] / dict_slot_acc_all[domain_slot]
join_flag = False
turn_pred_wo_wrong = []
turn_target_wo_wrong = []
for instance_pred, instance_target in zip(turn_pred,turn_target):
if 'emma' not in instance_pred and 'jerry' not in instance_pred and 'john' not in instance_pred and 'micheal' not in instance_pred and 'emma' not in instance_target and 'jerry' not in instance_target and 'john' not in instance_target and 'micheal' not in instance_target:
turn_pred_wo_wrong.append(instance_pred)
turn_target_wo_wrong.append(instance_target)
if set(turn_target_wo_wrong) == set(turn_pred_wo_wrong):
joint_acc += 1
join_flag = True
elif type2_cleaning_flag: # check for possible Type 2 noisy annotations
flag = True
for bs in turn_target_wo_wrong:
if bs not in turn_pred_wo_wrong:
flag = False
break
if flag:
for bs in turn_pred_wo_wrong:
if bs not in turn_target_wo_wrong:
flag = False
break
if flag: # model prediction might be correct if found in Type 2 list of noisy annotations
dial_name = dial.split('.')[0]
if dial_name in IGNORE_TURNS_TYPE2 and turn_id in IGNORE_TURNS_TYPE2[dial_name]: # ignore these turns
pass
else:
joint_acc += 1
if not join_flag:
if file_name not in error:
error[file_name] = {}
turn_data['gtbs'] = turn_target
turn_data['predbs'] = turn_pred
error[file_name][turn_id] = turn_data
num_turns += 1
joint_acc /= num_turns
print('joint accuracy: {}'.format(joint_acc))
print('dict_rate: {}'.format(dict_rate))
with open('bs_error.json',"w") as f:
json.dump(error,f,indent=2)
return joint_acc, dict_rate | null |
164,457 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import torch.nn as nn
import math
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from dst import default_cleaning, IGNORE_TURNS_TYPE2, paser_bs,ignore_none
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
from transformers import Wav2Vec2Processor
def get_logger(log_path, name="default"):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler(log_path, mode="w")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | null |
164,458 | import math
import random
import warnings
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from typing import Optional, Tuple, Union
from transformers.activations import ACT2FN
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_outputs import BaseModelOutput, CausalLMOutput
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import torch_int_div
from transformers import WavLMConfig, WavLMModel
The provided code snippet includes necessary dependencies for implementing the `_compute_mask_indices` function. Write a Python function `def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0) -> np.ndarray` to solve the following problem:
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension.
Here is the function:
def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int,
attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask | Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. |
164,459 | import math
import random
import warnings
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from typing import Optional, Tuple, Union
from transformers.activations import ACT2FN
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_outputs import BaseModelOutput, CausalLMOutput
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import torch_int_div
from transformers import WavLMConfig, WavLMModel
MASK_CONSECUTIVE_MIN = 20
MASK_CONSECUTIVE_MAX = 50
MASK_PROPORTION = 0.15
def create_mam_samples(audio, audio_len):
# spec_masked:输入 spec_stacked:target
dtype = audio.dtype
labels = audio.clone()
masked = torch.zeros(labels.shape[:2] + (1,), dtype=torch.uint8).to(audio.device)
for idx in range(labels.shape[0]):
def starts_to_intervals(starts, consecutive):
tiled = starts.expand(consecutive, starts.size(0)).permute(1, 0)
offset = torch.arange(consecutive).expand_as(tiled)
intervals = tiled + offset
return intervals.view(-1)
# time masking
mask_consecutive = random.randint(MASK_CONSECUTIVE_MIN, MASK_CONSECUTIVE_MAX) # mask区间长度
valid_start_max = max(audio_len[idx] - mask_consecutive - 1, 0) # mask区间的起点
proportion = round(audio_len[idx] * MASK_PROPORTION / mask_consecutive) # 结合mask长度考虑后,得到mask的概率
chosen_starts = torch.randperm(valid_start_max + 1)[:proportion] # 允许多个mask重叠
chosen_intervals = starts_to_intervals(chosen_starts, mask_consecutive) # 被mask的所有位置
dice = np.random.uniform(0, 1, len(chosen_intervals))
# 以80%的概率替换为0,10%的概率替换为序列中的其他token,10%的概率不做修改。音频的mask会mask一整个token
zero_intervals = torch.BoolTensor(dice < 0.8)
zero_intervals = torch.masked_select(chosen_intervals, zero_intervals)
rand_intervals = torch.BoolTensor((dice >= 0.8) * (dice < 0.9))
rand_intervals = torch.masked_select(chosen_intervals, rand_intervals)
if len(zero_intervals) > 0:
audio[idx, zero_intervals, :] = 0
masked[idx, chosen_intervals, :] = 1
if len(rand_intervals) > 0:
random_intervals = torch.randperm(audio_len[idx])[:len(rand_intervals)]
audio[idx, rand_intervals, :] = labels[idx, random_intervals, :]
return audio.to(dtype=dtype), masked.to(dtype=torch.bool), labels.to(dtype=dtype) | null |
164,461 | import math
import torch
import numpy as np
from space.args import str2bool
def gather(var, idx):
if isinstance(var, list):
return [gather(x, idx) for x in var]
elif isinstance(var, dict):
return {k: gather(v, idx) for k, v in var.items()}
elif isinstance(var, torch.Tensor):
out = var.cuda().index_select(dim=0, index=idx.cuda()).cuda()
return out
else:
return var | null |
164,465 | import logging
import json
import numpy as np
from collections import OrderedDict
from space.utils import ontology
def f1_score(label_list, pred_list):
tp = len([t for t in pred_list if t in label_list])
fp = max(0, len(pred_list) - tp)
fn = max(0, len(label_list) - tp)
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
f1 = 2 * precision * recall / (precision + recall + 1e-10)
return f1 | null |
164,472 | import re
from space.utils import ontology
def clean_text(text):
text = text.strip()
text = text.lower()
text = text.replace(u"’", "'")
text = text.replace(u"‘", "'")
text = text.replace(';', ',')
text = text.replace('"', ' ')
text = text.replace('/', ' and ')
text = text.replace("don't", "do n't")
text = clean_time(text)
baddata = { r'c\.b (\d), (\d) ([a-z])\.([a-z])': r'cb\1\2\3\4',
'c.b. 1 7 d.y': 'cb17dy',
'c.b.1 7 d.y': 'cb17dy',
'c.b 25, 9 a.q': 'cb259aq',
'isc.b 25, 9 a.q': 'is cb259aq',
'c.b2, 1 u.f': 'cb21uf',
'c.b 1,2 q.a':'cb12qa',
'0-122-336-5664': '01223365664',
'postcodecb21rs': 'postcode cb21rs',
r'i\.d': 'id',
' i d ': 'id',
'Telephone:01223358966': 'Telephone: 01223358966',
'depature': 'departure',
'depearting': 'departing',
'-type': ' type',
r"b[\s]?&[\s]?b": "bed and breakfast",
"b and b": "bed and breakfast",
r"guesthouse[s]?": "guest house",
r"swimmingpool[s]?": "swimming pool",
"wo n\'t": "will not",
" \'d ": " would ",
" \'m ": " am ",
" \'re' ": " are ",
" \'ll' ": " will ",
" \'ve ": " have ",
r'^\'': '',
r'\'$': '',
}
for tmpl, good in baddata.items():
text = re.sub(tmpl, good, text)
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
with open('/data/nt12_hdd_gluster/myself/space3/tools/mapping.pair', 'r') as fin:
for line in fin.readlines():
fromx, tox = line.replace('\n', '').split('\t')
text = ' ' + text + ' '
text = text.replace(' ' + fromx + ' ', ' ' + tox + ' ')[1:-1]
return text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value | null |
164,473 | from space.utils.decorators import ignore_nodes
def jaccard_dis_sim(x, y):
def clean_frame(frame):
def construct_frame_graph(frame):
def tree_edit_score(frame1, frame2):
# deal with empty frame
if not (frame1 and frame2):
return 0.
# clean frame
frame1 = clean_frame(frame=frame1)
frame2 = clean_frame(frame=frame2)
if frame1 == frame2:
return 1.
# construct frame graph
domain_nodes1, act_nodes1, slot_nodes1, value_nodes1, domain_act_edges1, act_slot_edges1, slot_value_edges1, \
domain_act_slot_paths1, act_slot_value_paths1, domain_act_slot_value_paths1 = \
construct_frame_graph(frame=frame1)
domain_nodes2, act_nodes2, slot_nodes2, value_nodes2, domain_act_edges2, act_slot_edges2, slot_value_edges2, \
domain_act_slot_paths2, act_slot_value_paths2, domain_act_slot_value_paths2 = \
construct_frame_graph(frame=frame2)
# compute individual score
domain_score = jaccard_dis_sim(domain_nodes1, domain_nodes2)
act_score = jaccard_dis_sim(act_nodes1, act_nodes2)
slot_score = jaccard_dis_sim(slot_nodes1, slot_nodes2)
value_score = jaccard_dis_sim(value_nodes1, value_nodes2)
domain_act_score = jaccard_dis_sim(domain_act_edges1, domain_act_edges2)
act_slot_score = jaccard_dis_sim(act_slot_edges1, act_slot_edges2)
slot_value_score = jaccard_dis_sim(slot_value_edges1, slot_value_edges2)
domain_act_slot_score = jaccard_dis_sim(domain_act_slot_paths1, domain_act_slot_paths2)
act_slot_value_score = jaccard_dis_sim(act_slot_value_paths1, act_slot_value_paths2)
domain_act_slot_value_score = jaccard_dis_sim(domain_act_slot_value_paths1, domain_act_slot_value_paths2)
# compute combined score
score, num_score = 0., 0
for single_score in (domain_score, act_score, slot_score, value_score, domain_act_score, act_slot_score,
slot_value_score, domain_act_slot_score, act_slot_value_score,
domain_act_slot_value_score):
score += single_score[0]
num_score += single_score[1]
score = score / num_score
return score | null |
164,476 | import json
all_domain = [
"[taxi]","[police]","[hospital]","[hotel]","[attraction]","[train]","[restaurant]",'[profile]'
]
all_slots = all_reqslot + all_infslot
all_slots = set(all_slots)
The provided code snippet includes necessary dependencies for implementing the `paser_bs_old` function. Write a Python function `def paser_bs_old(sent)` to solve the following problem:
Convert compacted bs span to triple list Ex:
Here is the function:
def paser_bs_old(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
print('sent',sent)
print('domain',domain)
print('sub_span',sub_span)
print('sub_s_idx',sub_s_idx)
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
#print('bs',bs)
belief_state.append(bs)
return list(set(belief_state)) | Convert compacted bs span to triple list Ex: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.