id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
163,622 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
def question_package(data_json, knowledge=False):
question_list = []
for data in data_json:
question_list.append(data['question'])
return question_list | null |
163,623 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
def knowledge_package(data_json, knowledge=False):
knowledge_list = []
for data in data_json:
knowledge_list.append(data['evidence'])
return knowledge_list | null |
163,624 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
def decouple_question_schema(datasets, db_root_path):
question_list = []
db_path_list = []
knowledge_list = []
for i, data in enumerate(datasets):
question_list.append(data['question'])
cur_db_path = db_root_path + data['db_id'] + '/' + data['db_id'] +'.sqlite'
db_path_list.append(cur_db_path)
knowledge_list.append(data['evidence'])
return question_list, db_path_list, knowledge_list | null |
163,625 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
def new_directory(path):
if not os.path.exists(path):
os.makedirs(path)
def generate_sql_file(sql_lst, output_path=None):
result = {}
for i, sql in enumerate(sql_lst):
result[i] = sql
if output_path:
directory_path = os.path.dirname(output_path)
new_directory(directory_path)
json.dump(result, open(output_path, 'w'), indent=4)
return result | null |
163,626 | import sys
import json
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
def package_sqls(sql_path, db_root_path, mode='gpt', data_mode='dev'):
clean_sqls = []
db_path_list = []
if mode == 'gpt':
sql_data = json.load(open(sql_path + 'predict_' + data_mode + '.json', 'r'))
for idx, sql_str in sql_data.items():
if type(sql_str) == str:
sql, db_name = sql_str.split('\t----- bird -----\t')
else:
sql, db_name = " ", "financial"
clean_sqls.append(sql)
db_path_list.append(db_root_path + db_name + '/' + db_name + '.sqlite')
elif mode == 'gt':
sqls = open(sql_path + data_mode + '_gold.sql')
sql_txt = sqls.readlines()
# sql_txt = [sql.split('\t')[0] for sql in sql_txt]
for idx, sql_str in enumerate(sql_txt):
sql, db_name = sql_str.strip().split('\t')
clean_sqls.append(sql)
db_path_list.append(db_root_path + db_name + '/' + db_name + '.sqlite')
return clean_sqls, db_path_list | null |
163,627 | import sys
import json
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
def result_callback(result):
exec_result.append(result)
def execute_model(predicted_sql,ground_truth, db_place, idx, meta_time_out):
try:
res = func_timeout(meta_time_out, execute_sql,
args=(predicted_sql, ground_truth, db_place))
except KeyboardInterrupt:
sys.exit(0)
except FunctionTimedOut:
result = [(f'timeout',)]
res = 0
except Exception as e:
result = [(f'error',)] # possibly len(query) > 512 or not executable
res = 0
# print(result)
# result = str(set([ret[0] for ret in result]))
result = {'sql_idx': idx, 'res': res}
# print(result)
return result
def run_sqls_parallel(sqls, db_places, num_cpus=1, meta_time_out=30.0):
pool = mp.Pool(processes=num_cpus)
for i,sql_pair in enumerate(sqls):
predicted_sql, ground_truth = sql_pair
pool.apply_async(execute_model, args=(predicted_sql, ground_truth, db_places[i], i, meta_time_out), callback=result_callback)
pool.close()
pool.join() | null |
163,628 | import sys
import json
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
def sort_results(list_of_dicts):
return sorted(list_of_dicts, key=lambda x: x['sql_idx']) | null |
163,629 | import sys
import json
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
def load_json(dir):
def compute_acc_by_diff(exec_results,diff_json_path):
num_queries = len(exec_results)
results = [res['res'] for res in exec_results]
contents = load_json(diff_json_path)
simple_results, moderate_results, challenging_results = [], [], []
for i,content in enumerate(contents):
if content['difficulty'] == 'simple':
simple_results.append(exec_results[i])
if content['difficulty'] == 'moderate':
moderate_results.append(exec_results[i])
if content['difficulty'] == 'challenging':
challenging_results.append(exec_results[i])
simple_acc = sum([res['res'] for res in simple_results])/len(simple_results)
moderate_acc = sum([res['res'] for res in moderate_results])/len(moderate_results)
challenging_acc = sum([res['res'] for res in challenging_results])/len(challenging_results)
all_acc = sum(results)/num_queries
count_lists = [len(simple_results), len(moderate_results), len(challenging_results), num_queries]
return simple_acc * 100, moderate_acc * 100, challenging_acc * 100, all_acc * 100, count_lists | null |
163,630 | import sys
import json
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
def print_data(score_lists,count_lists):
levels = ['simple', 'moderate', 'challenging', 'total']
print("{:20} {:20} {:20} {:20} {:20}".format("", *levels))
print("{:20} {:<20} {:<20} {:<20} {:<20}".format('count', *count_lists))
print('====================================== ACCURACY =====================================')
print("{:20} {:<20.2f} {:<20.2f} {:<20.2f} {:<20.2f}".format('accuracy', *score_lists)) | null |
163,631 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
import time
import math
def package_sqls(sql_path, db_root_path, mode='gpt', data_mode='dev'):
clean_sqls = []
db_path_list = []
if mode == 'gpt':
sql_data = json.load(open(sql_path + 'predict_' + data_mode + '.json', 'r'))
for idx, sql_str in sql_data.items():
if type(sql_str) == str:
sql, db_name = sql_str.split('\t----- bird -----\t')
else:
sql, db_name = " ", "financial"
clean_sqls.append(sql)
db_path_list.append(db_root_path + db_name + '/' + db_name + '.sqlite')
elif mode == 'gt':
sqls = open(sql_path + data_mode + '_gold.sql')
sql_txt = sqls.readlines()
for idx, sql_str in enumerate(sql_txt):
sql, db_name = sql_str.strip().split('\t')
clean_sqls.append(sql)
db_path_list.append(db_root_path + db_name + '/' + db_name + '.sqlite')
return clean_sqls, db_path_list | null |
163,632 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
import time
import math
def result_callback(result):
exec_result.append(result)
def execute_model(predicted_sql,ground_truth, db_place, idx, iterate_num, meta_time_out):
try:
# you can personalize the total timeout number
# larger timeout leads to more stable ves
# while it needs more your patience....
time_ratio = func_timeout(meta_time_out * iterate_num, iterated_execute_sql,
args=(predicted_sql, ground_truth, db_place, iterate_num))
# print([idx, math.sqrt(time_ratio)])
except KeyboardInterrupt:
sys.exit(0)
except FunctionTimedOut:
result = [(f'timeout',)]
time_ratio = 0
except Exception as e:
result = [(f'error',)] # possibly len(query) > 512 or not executable
time_ratio = 0
result = {'sql_idx': idx, 'time_ratio': time_ratio}
return result
def run_sqls_parallel(sqls, db_places, num_cpus=1, iterate_num=100, meta_time_out=30.0):
pool = mp.Pool(processes=num_cpus)
for i,sql_pair in enumerate(sqls):
predicted_sql, ground_truth = sql_pair
pool.apply_async(execute_model, args=(predicted_sql, ground_truth, db_places[i], i, iterate_num, meta_time_out), callback=result_callback)
pool.close()
pool.join() | null |
163,633 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
import time
import math
def sort_results(list_of_dicts):
return sorted(list_of_dicts, key=lambda x: x['sql_idx']) | null |
163,634 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
import time
import math
def compute_ves(exec_results):
num_queries = len(exec_results)
total_ratio = 0
count = 0
for i, result in enumerate(exec_results):
if result['time_ratio'] != 0:
count += 1
total_ratio += math.sqrt(result['time_ratio']) * 100
ves = (total_ratio/num_queries)
return ves
def load_json(dir):
with open(dir, 'r') as j:
contents = json.loads(j.read())
return contents
def compute_ves_by_diff(exec_results,diff_json_path):
num_queries = len(exec_results)
contents = load_json(diff_json_path)
simple_results, moderate_results, challenging_results = [], [], []
for i,content in enumerate(contents):
if content['difficulty'] == 'simple':
simple_results.append(exec_results[i])
if content['difficulty'] == 'moderate':
moderate_results.append(exec_results[i])
if content['difficulty'] == 'challenging':
challenging_results.append(exec_results[i])
simple_ves = compute_ves(simple_results)
moderate_ves = compute_ves(moderate_results)
challenging_ves = compute_ves(challenging_results)
all_ves = compute_ves(exec_results)
count_lists = [len(simple_results), len(moderate_results), len(challenging_results), num_queries]
return simple_ves, moderate_ves, challenging_ves, all_ves, count_lists | null |
163,635 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import multiprocessing as mp
from func_timeout import func_timeout, FunctionTimedOut
import time
import math
def print_data(score_lists,count_lists):
levels = ['simple', 'moderate', 'challenging', 'total']
print("{:20} {:20} {:20} {:20} {:20}".format("", *levels))
print("{:20} {:<20} {:<20} {:<20} {:<20}".format('count', *count_lists))
print('========================================= VES ========================================')
print("{:20} {:<20.2f} {:<20.2f} {:<20.2f} {:<20.2f}".format('ves', *score_lists)) | null |
163,636 | import json
import re
import pprint
import os
import tqdm
import random
def gen_global_index():
index = 0
while True:
yield index
index += 1 | null |
163,637 | import json
import re
import pprint
import os
import tqdm
import random
random.seed(42)
def split_trans(split):
if split == 'train' or split == 'test' or split == 'dev':
return split
elif split == 'valid':
return 'dev'
elif split == 'valid1':
return 'dev'
elif split == 'valid2':
return 'test'
else:
raise Exception('guaiguaidigai')
def summarize_from_feedback_preprocess(path,index_generator):
files = os.listdir(path)
files = [filename for filename in files if filename.endswith('.json')]
target_samples = {
'train':[],
'dev':[],
'test':[]
}
for filename in files:
with open(os.path.join(path,filename),'r', encoding="utf-8") as f:
raw = f.readlines()
data = []
for line in raw:
line = json.loads(line)
data.append(line)
samples = []
bar = tqdm.tqdm(data)
for index,sample in enumerate(bar):
bar.set_description(os.path.join(path,filename))
assert len(sample['summaries']) == 2
if 'post' in sample['info']:
prefix = "SUBREDDIT: r/{}\nTITLE: {}\nPOST: {}\nTL;DR:".format(sample['info']['subreddit'], sample['info']['title'],sample['info']['post']).strip()
one_sample = {
'available': [
{
'id':next(index_generator),
'prefix': prefix,
'target_num':2,
'target':[
" {}".format(sample['summaries'][sample['choice']]['text'].strip()),
" {}".format(sample['summaries'][1-sample['choice']]['text'].strip()),
]
},
],
'split': split_trans(sample['split']),
'source': {
'path': os.path.join(path,filename),
'line_num': index+1,
}
}
target_samples[one_sample['split']].append(one_sample)
else:
prefix = "Article: {}\nTL;DR:".format(sample['info']['article'])
pass
os.makedirs(path.replace('raw_data','preprocessed_data'), exist_ok=True)
true_dev_index = random.sample(list(range(len(target_samples['dev']))),1000)
true_dev = []
for index, sample in enumerate(target_samples['dev']):
if index in true_dev_index:
sample['split'] = 'dev'
true_dev.append(sample)
else:
sample['split'] = 'train'
target_samples['train'].append(sample)
target_samples['dev'] = true_dev
with open(os.path.join(path.replace('raw_data','preprocessed_data'), "train.json"), 'w', encoding='utf-8') as f:
for sample in target_samples['train']:
f.write(json.dumps(sample,ensure_ascii=False)+'\n')
print("{}: {}".format(os.path.join(path.replace('raw_data','preprocessed_data'),"train.json"),len(target_samples['train'])))
with open(os.path.join(path.replace('raw_data','preprocessed_data'), "dev.json"), 'w', encoding='utf-8') as f:
for sample in target_samples['dev']:
f.write(json.dumps(sample,ensure_ascii=False)+'\n')
print("{}: {}".format(os.path.join(path.replace('raw_data','preprocessed_data'),"dev.json"),len(target_samples['dev'])))
with open(os.path.join(path.replace('raw_data','preprocessed_data'), "test.json"), 'w', encoding='utf-8') as f:
for sample in target_samples['test']:
f.write(json.dumps(sample,ensure_ascii=False)+'\n')
print("{}: {}".format(os.path.join(path.replace('raw_data','preprocessed_data'),"test.json"),len(target_samples['test']))) | null |
163,638 | import os
import sys
import json
import random
import numpy as np
import tqdm
from utils.metrics_summarize import create_reward_fn
def split_trans(split):
if split == 'train' or split == 'test' or split == 'dev':
return split
elif split == 'valid':
return 'dev'
elif split == 'valid1':
return 'dev'
elif split == 'valid2':
return 'test'
else:
raise Exception('guaiguaidigai')
def concat_wo_ranker(prefixes, suffixes):
#prefixes = [[a,b,c],[d,e,f]]
#suffixes = [[a,b,c],[d,e,f]]
training_stage_num = len(prefixes[0])
batch_size = len(prefixes)
new_prefixes = sum(prefixes,[])
new_suffixes = sum(suffixes,[])
rewards = get_score(new_prefixes, new_suffixes).view(batch_size, training_stage_num).cpu().detach().numpy().tolist() #[batch_size, ranking]
return prefixes, suffixes, rewards
def reward_model_ranker(prefixes, suffixes):
#prefixes = [[a,b,c],[d,e,f]]
#suffixes = [[a,b,c],[d,e,f]]
training_stage_num = len(prefixes[0])
batch_size = len(prefixes)
new_prefixes = sum(prefixes,[])
new_suffixes = sum(suffixes,[])
rewards = get_score(new_prefixes, new_suffixes).view(batch_size, training_stage_num).cpu().detach().numpy() #[batch_size, ranking]
indices = np.argsort(-rewards,axis=1)
prefixes = [[prefixes[i][index] for index in indices[i]] for i in range(batch_size)]
suffixes = [[suffixes[i][index] for index in indices[i]] for i in range(batch_size)]
rewards = [[float(rewards[i][index]) for index in indices[i]] for i in range(batch_size)]
return prefixes, suffixes, rewards
def extract_train_data(root_dir, if_score, if_rerank, training_stage_num = None, split='train'):
training_data = []
with open(root_dir, 'r', encoding='utf-8') as f:
raw_data = f.readlines()
for line in raw_data:
sample = json.loads(line)
if split_trans(sample['split']) == split:
new_sample = {'meta': sample['source'], 'prefix':[],'suffix':[]}
if data_aug:
for s in sample['extended']+sample['available']:
for suffix in s['target']:
assert isinstance(suffix,str)
new_sample['prefix'].append(s['prefix'])
new_sample['suffix'].append(suffix)
else:
for s in sample['available']:
for suffix in s['target']:
assert isinstance(suffix,str)
new_sample['prefix'].append(s['prefix'])
new_sample['suffix'].append(suffix)
training_data.append(new_sample)
if training_stage_num == None:
training_stage_num = len(new_sample['prefix'])
assert training_stage_num == len(new_sample['prefix'])
if if_score:
batch_size = reward_batch_size / 2 # default
for index in tqdm.tqdm(range(0,len(training_data),batch_size),desc="rewarding"):
prefixes = []
suffixes = []
if len(training_data)-index < batch_size:
batch_size = len(training_data)-index
for sub_index in range(batch_size):
prefixes.append(training_data[index+sub_index]['prefix'])
suffixes.append(training_data[index+sub_index]['suffix'])
if if_rerank:
prefixes, suffixes, rewards = reward_model_ranker(prefixes,suffixes)
else:
prefixes, suffixes, rewards = concat_wo_ranker(prefixes,suffixes)
for sub_index in range(batch_size):
training_data[index+sub_index]['prefix'] = prefixes[sub_index]
training_data[index+sub_index]['suffix'] = suffixes[sub_index]
training_data[index+sub_index]['reward'] = rewards[sub_index]
else:
for l in training_data:
l['reward'] = [1.0] * len(l['suffix'])
for l in training_data:
l['sft_index'] = 0
return training_data | null |
163,639 | import json
import re
import pprint
import os
import tqdm
def gen_global_index():
index = 0
while True:
yield index
index += 1 | null |
163,640 | import json
import re
import pprint
import os
import tqdm
Roles = {
"Human": "<|prompter|>",
"Assistant": "<|assistant|>"
}
def hhrlhf_preprocess(path,filename,index_generator,split='train'):
with open(os.path.join(path,filename),'r', encoding='utf-8') as f:
raw = f.readlines()
data = []
for line in raw:
line = json.loads(line)
data.append(line)
# Thank OpenAssistant for their helpful public code:
# https://github.com/LAION-AI/Open-Assistant/blob/c6591bd04dd337c716d097b2d267b92403850396/model/model_training/custom_datasets/rank_datasets.py
def _split_dialogue(text: str):
lines = text.split("\n\n")
dialogue: list[tuple[str, str]] = []
role = None
messages = []
for line in lines:
if line.startswith("Human:"):
speaker = "Human"
message = line[7:]
elif line.startswith("Assistant:"):
speaker = "Assistant"
message = line[11:]
else:
continue
if role != speaker:
if role is not None:
dialogue.append((Roles[role], "\n".join(messages)))
messages = []
role = speaker
messages.append(message.strip())
if role is not None and len(messages) > 0:
dialogue.append([Roles[role], "\n".join(messages)])
return dialogue
if split == "train" or split == "dev":
samples = []
bar = tqdm.tqdm(data)
for index, sample in enumerate(bar):
bar.set_description(os.path.join(path,filename))
if "Assistant" not in sample["chosen"]:
print("Flag1", index+1)
continue
chosen = _split_dialogue(sample["chosen"]) # [(Role, Post), (Role, Post), ... ]
rejected = _split_dialogue(sample["rejected"])
assert rejected[0][0] == "<|prompter|>" and chosen[0][0] == "<|prompter|>"
# only very few items have non matching lengths
if len(rejected) == len(chosen):
assert chosen[-1][0] == rejected[-1][0]
prefix = [role+text for role, text in chosen[:-1]]
prefix.append(chosen[-1][0])
good_reply = chosen[-1][1] # last part of dialog, the text
bad_reply = rejected[-1][1] # last part of dialog, the text
one_sample = {
'extended':[
{'id':next(index_generator), 'prefix': prefix, 'target_num':2, 'target':[]}
],
'available':[
{'id':next(index_generator), 'prefix': prefix, 'target_num':2, 'target':[good_reply, bad_reply]}
],
'available_for_test':[],
'split': split,
'source':{
'path': os.path.join(path,filename),
'line_num': index+1,
'task': "dialogue"
}
}
samples.append(one_sample)
else:
samples = []
bar = tqdm.tqdm(data)
for index, sample in enumerate(bar):
bar.set_description(os.path.join(path,filename))
assert "Assistant" in sample["chosen"] and "Assistant" in sample["rejected"]
chosen = _split_dialogue(sample["chosen"]) # [(Role, Post), (Role, Post), ... ]
rejected = _split_dialogue(sample["rejected"]) # [(Role, Post), (Role, Post), ... ]
assert chosen[0][0] == "<|prompter|>" and rejected[0][0] == "<|prompter|>"
# prepare chosen sample
prefix = []
step = 0
for role, text in chosen:
step += 1
if role == "<|prompter|>":
prefix.append([role, text])
elif role == "<|assistant|>":
temp_prefix = [temp_role+temp_text for temp_role, temp_text in prefix]
temp_prefix.append(role)
temp_reply = text # last part of dialog, the text
chosen_sample = {
'extended':[
{'id':next(index_generator), 'prefix': temp_prefix,'target_num':2, 'target':[]}
],
'available':[],
'available_for_test':[{
'id': next(index_generator),
'prefix': temp_prefix,
'target_num': 1,
'target':[temp_reply]
}],
'split': split,
'source':{
'path': os.path.join(path,filename),
'line_num': index+1,
'task': 'dialogue',
'selected': 'chosen',
'completed': step == len(chosen)
},
}
if chosen_sample['source']['completed']:
samples.append(chosen_sample)
prefix.append([role, text])
else:
raise Exception("???")
# prepare rejected sample
prefix = []
step = 0
for role, text in rejected:
step += 1
if role == "<|prompter|>":
prefix.append([role, text])
elif role == "<|assistant|>":
temp_prefix = [temp_role+temp_text for temp_role, temp_text in prefix]
temp_prefix.append(role)
temp_reply = text # last part of dialog, the text
rejected_sample = {
'extended':[
{'id':next(index_generator), 'prefix': temp_prefix,'target_num':2, 'target':[]}
],
'available':[],
'available_for_test':[{
'id': next(index_generator),
'prefix': temp_prefix,
'target_num': 1,
'target':[temp_reply]
}],
'split': split,
'source':{
'path': os.path.join(path,filename),
'line_num': index+1,
'task': 'dialogue',
'selected': 'rejected',
'completed': step == len(rejected)
},
}
if rejected_sample['source']['completed']:
samples.append(rejected_sample)
prefix.append([role, text])
else:
raise Exception("???")
os.makedirs(path.replace('raw_data','preprocessed_data'), exist_ok=True)
with open(os.path.join(path.replace('raw_data','preprocessed_data'),"{}.json".format(split)),'w', encoding='utf-8') as f:
for sample in samples:
f.write(json.dumps(sample,ensure_ascii=False)+'\n')
print("{}: {}".format(os.path.join(path.replace('raw_data','preprocessed_data'),"{}.json".format(split)),len(samples)))
return samples | null |
163,641 | import os
import sys
import json
import random
import numpy as np
import tqdm
from utils.metrics_hh import create_reward_fn
def split_trans(split):
def concat_wo_ranker(prefixes, suffixes):
def reward_model_ranker(prefixes, suffixes):
def extract_train_data(root_dir, if_score, if_rerank, training_stage_num = None, split='train'):
file_list = []
for root,dirs,files in os.walk(root_dir):
for file in files:
if not file.endswith("json"):
continue
file_list.append(os.path.join(root,file))
training_data = []
for file in tqdm.tqdm(file_list,desc="re-formating"):
with open(file,'r', encoding='utf-8') as f:
raw_data = f.readlines()
for line in raw_data:
sample = json.loads(line)
if split_trans(sample['split']) == split:
new_sample = {'meta': sample['source'], 'prefix':[],'suffix':[]}
if data_aug:
for s in sample['extended']+sample['available']:
for suffix in s['target']:
assert isinstance(suffix,str)
new_sample['prefix'].append(s['prefix'])
new_sample['suffix'].append(suffix)
else:
for s in sample['available']:
for suffix in s['target']:
assert isinstance(suffix,str)
new_sample['prefix'].append(s['prefix'])
new_sample['suffix'].append(suffix)
training_data.append(new_sample)
if training_stage_num == None:
training_stage_num = len(new_sample['prefix'])
assert training_stage_num == len(new_sample['prefix'])
if if_score:
batch_size = reward_batch_size / 2 # default
for index in tqdm.tqdm(range(0,len(training_data),batch_size),desc="rewarding"):
prefixes = []
suffixes = []
if len(training_data)-index < batch_size:
batch_size = len(training_data)-index
for sub_index in range(batch_size):
prefixes.append(training_data[index+sub_index]['prefix'])
suffixes.append(training_data[index+sub_index]['suffix'])
if if_rerank:
prefixes, suffixes, rewards = reward_model_ranker(prefixes,suffixes)
else:
prefixes, suffixes, rewards = concat_wo_ranker(prefixes,suffixes)
for sub_index in range(batch_size):
training_data[index+sub_index]['prefix'] = prefixes[sub_index]
training_data[index+sub_index]['suffix'] = suffixes[sub_index]
training_data[index+sub_index]['reward'] = rewards[sub_index]
else:
for l in training_data:
l['reward'] = [1.0] * len(l['suffix'])
for l in training_data:
l['sft_index'] = 0
return training_data | null |
163,642 | import sys
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from dataclasses import dataclass
import nltk
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
nltk.download('wordnet')
def get_bleu(hyp, ref):
hyp = hyp.strip()
ref = ref.strip()
return nltk.translate.bleu_score.sentence_bleu([ref], hyp) | null |
163,643 | import sys
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from dataclasses import dataclass
import nltk
def create_reward_fn_2():
model_name = "OpenAssistant/reward-model-deberta-v3-large-v2"
model_device = "cuda:{}".format(torch.cuda.device_count() - 1)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "right"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
input_content = tokenizer(
prefixes,
suffixes,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 140 | null |
163,644 | import sys
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from dataclasses import dataclass
import nltk
def create_reward_fn_3():
model_name = "OpenAssistant/reward-model-deberta-v3-large"
model_device = "cuda:{}".format(torch.cuda.device_count() - 1)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "right"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
input_content = tokenizer(
prefixes,
suffixes,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 140 | null |
163,645 | import random
import numpy as np
import torch
import argparse
from transformers import SchedulerType
args = parse_args()
def parse_args():
parser = argparse.ArgumentParser(description="Preference Ranking Optimization For Human Alignment")
parser.add_argument(
"--task",
type=str,
default="hh",
)
parser.add_argument(
"--do_train",
action="store_true",
)
parser.add_argument(
"--do_validation",
action="store_true",
)
parser.add_argument(
"--sft_weight",
type=float,
default=2,
)
parser.add_argument(
"--index",
type=str,
default="100",
)
parser.add_argument(
"--seed",
type=int,
default=42,
)
parser.add_argument(
"--temperature",
type=float,
default=1,
)
parser.add_argument(
"--training_stage_num",
type=int,
default=1,
)
parser.add_argument(
"--train_file_path", type=str, default=None,
)
parser.add_argument(
"--validation_file_path", type=str, default=None,
)
parser.add_argument(
"--validation_file_name", type=str, default=None,
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=False,
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=1,
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=1,
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-6,
)
parser.add_argument(
"--block_size",
type=int,
default=20,
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=2,
)
parser.add_argument("--output_dir", type=str, default="checkpoints")
parser.add_argument(
"--checkpointing_step",
type=int,
default=600,
)
parser.add_argument(
"--log_path",
type=str,
default="logs",
)
args = parser.parse_args()
return args | null |
163,646 | import random
import numpy as np
import torch
import argparse
from transformers import SchedulerType
import random
random.seed(33)
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark=False
torch.backends.cudnn.deterministic=True | null |
163,647 | import sys
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from dataclasses import dataclass
import utils.reward_model
import nltk
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import nltk
nltk.download('wordnet')
def get_bleu(hyp, ref):
hyp = hyp.strip()
ref = ref.strip()
return nltk.translate.bleu_score.sentence_bleu([ref], hyp) | null |
163,648 | import sys
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from dataclasses import dataclass
import utils.reward_model
import nltk
def create_reward_fn_2():
model_name = "OpenAssistant/oasst-rm-2-pythia-6.9b-epoch-1"
model_device = "cuda:{}".format(torch.cuda.device_count() - 1)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "left"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
texts = []
for p, s in zip(prefixes, suffixes):
assert p[-1] == "<|prompter|>" or p[-1] == "<|assistant|>", p[-1]
temp_prefix = p[:-1] + [p[-1]+s]
texts.append("".join([t + tokenizer.eos_token for t in temp_prefix]))
input_content = tokenizer(
texts,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 16 | null |
163,649 | import sys
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from dataclasses import dataclass
import utils.reward_model
import nltk
def create_reward_fn_3():
model_name = "OpenAssistant/oasst-rm-2.1-pythia-1.4b-epoch-2.5"
model_device = "cuda:{}".format(torch.cuda.device_count() - 1)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "left"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
texts = []
for p, s in zip(prefixes, suffixes):
assert p[-1] == "<|prompter|>" or p[-1] == "<|assistant|>", p[-1]
temp_prefix = p[:-1] + [p[-1]+s]
texts.append("".join([t + tokenizer.eos_token for t in temp_prefix]))
input_content = tokenizer(
texts,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 40 | null |
163,650 | import torch
import torch.nn.functional as F
import tqdm
import numpy as np
import random
def setup_seed(seed=42):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark=False
torch.backends.cudnn.deterministic=True | null |
163,651 | import torch
import torch.nn.functional as F
import tqdm
import numpy as np
import random
def generate_pipeline(model, tokenizer, prompts, add_special_tokens=False, gen_kwarg={"max_new_tokens": 64, "num_beams": 1, "do_sample": False,}, batch_size = 28):
def pipeline(prompts):
tokenizer.padding_side = "left"
tokenizer.truncation_side = "right"
new_prompts = []
for p in prompts:
assert p[-7:] == "\nTL;DR:", p[-7:]
p = p[:-7]
new_prompts.append(p)
model_inputs = tokenizer(
new_prompts,
max_length=512,
truncation=True,
add_special_tokens=add_special_tokens,
)
truncated_prompts = tokenizer.batch_decode(model_inputs['input_ids'], skip_special_tokens=True)
truncated_prompts = [p + "\nTL;DR:" for p in truncated_prompts]
model_inputs = tokenizer(
truncated_prompts,
add_special_tokens=add_special_tokens,
padding=True,
return_tensors="pt"
)
truncated_prompts = tokenizer.batch_decode(model_inputs['input_ids'], skip_special_tokens=True)
prompts_size = [len(s) for s in truncated_prompts]
return model_inputs, prompts_size, truncated_prompts
model_inputs, prompts_size, truncated_prompts = pipeline(prompts)
text_res = []
for index in tqdm.tqdm(range(0, len(model_inputs["input_ids"]), batch_size)):
if len(model_inputs["input_ids"]) - index < batch_size:
batch_size = len(model_inputs["input_ids"]) - index
batch = {key: model_inputs[key][index:index+batch_size].to(model.device) for key in model_inputs}
with torch.no_grad():
ts = model.generate(
**batch,
**gen_kwarg,
pad_token_id=tokenizer.pad_token_id,
).cpu().detach()
text_res.append(ts)
for index in range(len(text_res)):
text_res[index] = tokenizer.batch_decode(
text_res[index],
skip_special_tokens=True
)
text_res = sum(text_res, [])
for index in range(len(text_res)):
text = text_res[index]
assert truncated_prompts[index].rstrip() in text
text = text.replace(truncated_prompts[index].rstrip(), "").strip()
for stop in ["\n\n"]:
stop_ix = text.find(stop)
if stop_ix >= 0:
text = text[:stop_ix].rstrip()
text_res[index] = text
return text_res, truncated_prompts | null |
163,652 | import os
import argparse
import json
import tqdm
import torch
import torch.nn.functional as F
import metrics2
from transformers import (
AutoConfig,
AutoTokenizer,
LlamaTokenizer,
AutoModelForCausalLM
)
from infer_func_now import setup_seed, generate_pipeline
from accelerate import Accelerator
from accelerate.utils import InitProcessGroupKwargs
from datetime import timedelta
def get_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument('--index', type=str)
parser.add_argument('--stage', type=int)
parser.add_argument('--directory', default="best_checkpoint", type=str)
args = parser.parse_args()
return args | null |
163,653 | import os
import argparse
import json
import tqdm
import evaluate
def get_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument('--index', type=str)
parser.add_argument('--stage', type=int)
parser.add_argument('--directory', default="best_checkpoint", type=str)
args = parser.parse_args()
return args | null |
163,654 | import sys
import os
import math
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXConfig, GPTNeoXModel, GPTNeoXPreTrainedModel
from transformers.utils import ModelOutput
from dataclasses import dataclass
from typing import Literal, Optional
import tqdm
import nltk
def get_bleu(hyp, ref):
hyp = hyp.strip()
ref = ref.strip()
return nltk.translate.bleu_score.sentence_bleu([ref], hyp) | null |
163,655 | import sys
import os
import math
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXConfig, GPTNeoXModel, GPTNeoXPreTrainedModel
from transformers.utils import ModelOutput
from dataclasses import dataclass
from typing import Literal, Optional
import tqdm
import nltk
rank = int(os.environ['RANK'])
def create_reward_fn_2():
model_name = "OpenAssistant/reward-model-deberta-v3-large-v2"
model_device = "cuda:{}".format(rank)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "right"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
input_content = tokenizer(
prefixes,
suffixes,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 140 | null |
163,656 | import sys
import os
import math
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXConfig, GPTNeoXModel, GPTNeoXPreTrainedModel
from transformers.utils import ModelOutput
from dataclasses import dataclass
from typing import Literal, Optional
import tqdm
import nltk
rank = int(os.environ['RANK'])
def create_reward_fn_3():
model_name = "OpenAssistant/reward-model-deberta-v3-large"
model_device = "cuda:{}".format(rank)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "right"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
input_content = tokenizer(
prefixes,
suffixes,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 140 | null |
163,657 | import os
import argparse
import json
import tqdm
import torch
import torch.nn.functional as F
import metrics2
from transformers import (
AutoConfig,
AutoTokenizer,
LlamaTokenizer,
AutoModelForCausalLM
)
from peft import PeftConfig, PeftModel
from infer_func_now import setup_seed
from accelerate import Accelerator
from accelerate.utils import InitProcessGroupKwargs
from datetime import timedelta
def get_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument('--index', type=str)
parser.add_argument('--stage', type=int)
parser.add_argument('--directory', default="best_checkpoint", type=str)
args = parser.parse_args()
return args | null |
163,659 | import torch
import torch.nn.functional as F
import tqdm
import numpy as np
import random
def generate_pipeline(model, tokenizer, prompts, add_special_tokens=False, gen_kwarg={"max_new_tokens": 128, "num_beams": 1, "do_sample": False,}, batch_size = 28):
def pipeline(prompts):
tokenizer.padding_side = "left"
tokenizer.truncation_side = "left"
model_inputs = tokenizer(
prompts,
max_length=512-128,
truncation=True,
add_special_tokens=add_special_tokens,
)
truncated_prompts = tokenizer.batch_decode(model_inputs['input_ids'], skip_special_tokens=True)
model_inputs = tokenizer(
truncated_prompts,
max_length=512-128,
truncation=True,
add_special_tokens=add_special_tokens,
padding=True,
return_tensors="pt"
)
truncated_prompts = tokenizer.batch_decode(model_inputs['input_ids'], skip_special_tokens=True)
prompts_size = [len(s) for s in truncated_prompts]
return model_inputs, prompts_size, truncated_prompts
model_inputs, prompts_size, truncated_prompts = pipeline(prompts)
text_res = []
for index in tqdm.tqdm(range(0, len(model_inputs["input_ids"]), batch_size)):
if len(model_inputs["input_ids"]) - index < batch_size:
batch_size = len(model_inputs["input_ids"]) - index
batch = {key: model_inputs[key][index:index+batch_size].to(model.device) for key in model_inputs}
with torch.no_grad():
ts = model.generate(
**batch,
**gen_kwarg,
pad_token_id=tokenizer.pad_token_id,
).cpu().detach()
text_res.append(ts)
for index in range(len(text_res)):
text_res[index] = tokenizer.batch_decode(
text_res[index],
skip_special_tokens=True
)
text_res = sum(text_res, [])
for index in range(len(text_res)):
text = text_res[index]
assert truncated_prompts[index].rstrip() in text
text = text.replace(truncated_prompts[index].rstrip(), "").strip()
for stop in ["Human:", "human:", "Assistant:", "assistant:"]:
stop_ix = text.find(stop)
if stop_ix >= 0:
text = text[:stop_ix].rstrip()
text_res[index] = text
return text_res, truncated_prompts | null |
163,661 | import os
import argparse
import json
import tqdm
def get_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument('--index', type=str)
parser.add_argument('--stage', type=int)
parser.add_argument('--directory', default="best_checkpoint", type=str)
args = parser.parse_args()
return args | null |
163,662 | import sys
import os
import math
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXConfig, GPTNeoXModel, GPTNeoXPreTrainedModel
from transformers.utils import ModelOutput
from dataclasses import dataclass
from typing import Literal, Optional
import tqdm
import reward_model
from reward_model import TrainRewardModel
import nltk
def get_bleu(hyp, ref):
hyp = hyp.strip()
ref = ref.strip()
return nltk.translate.bleu_score.sentence_bleu([ref], hyp) | null |
163,663 | import sys
import os
import math
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXConfig, GPTNeoXModel, GPTNeoXPreTrainedModel
from transformers.utils import ModelOutput
from dataclasses import dataclass
from typing import Literal, Optional
import tqdm
import reward_model
from reward_model import TrainRewardModel
import nltk
rank = int(os.environ['RANK'])
def create_reward_fn_2():
model_name = "OpenAssistant/oasst-rm-2-pythia-6.9b-epoch-1"
model_device = "cuda:{}".format(rank)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "left"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
texts = []
for p, s in zip(prefixes, suffixes):
assert p[-1] == "<|prompter|>" or p[-1] == "<|assistant|>", p[-1]
temp_prefix = p[:-1] + [p[-1]+s]
texts.append("".join([t + tokenizer.eos_token for t in temp_prefix]))
input_content = tokenizer(
texts,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 16 | null |
163,664 | import sys
import os
import math
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXConfig, GPTNeoXModel, GPTNeoXPreTrainedModel
from transformers.utils import ModelOutput
from dataclasses import dataclass
from typing import Literal, Optional
import tqdm
import reward_model
from reward_model import TrainRewardModel
import nltk
rank = int(os.environ['RANK'])
def create_reward_fn_3():
model_name = "OpenAssistant/oasst-rm-2.1-pythia-1.4b-epoch-2.5"
model_device = "cuda:{}".format(rank)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.truncation_side = "left"
reward_model = AutoModelForSequenceClassification.from_pretrained(model_name).to(model_device)
reward_model.eval()
def get_score(prefixes, suffixes):
texts = []
for p, s in zip(prefixes, suffixes):
assert p[-1] == "<|prompter|>" or p[-1] == "<|assistant|>", p[-1]
temp_prefix = p[:-1] + [p[-1]+s]
texts.append("".join([t + tokenizer.eos_token for t in temp_prefix]))
input_content = tokenizer(
texts,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to(model_device)
with torch.no_grad():
rewards = reward_model(**input_content).logits
return rewards.view(-1)
return get_score, 40 | null |
163,665 | import os
import argparse
import json
import tqdm
import torch
import torch.nn.functional as F
import metrics2
from transformers import (
AutoConfig,
AutoTokenizer,
LlamaTokenizer,
AutoModelForCausalLM
)
from peft import PeftConfig, PeftModel
from infer_func_now import setup_seed, generate_pipeline, ranking_pipeline
from accelerate import Accelerator
from accelerate.utils import InitProcessGroupKwargs
from datetime import timedelta
def get_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument('--index', type=str)
parser.add_argument('--stage', type=int)
parser.add_argument('--directory', default="best_checkpoint", type=str)
args = parser.parse_args()
return args | null |
163,666 | import argparse
import os
import pickle
import time
import torch
from tqdm import tqdm
from data_loader import DataLoader
from config import Config
from ECDMetric import ECDMetric
from model import BertForMatching
from transformers import BertTokenizer, BertConfig
from tensorboardX import SummaryWriter
from transformers import AdamW
import json
import random
import numpy as np
import pprint
import logging
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) | null |
163,667 | import argparse
import os
import pickle
import time
import torch
from tqdm import tqdm
from data_loader import DataLoader
from config import Config
from ECDMetric import ECDMetric
from model import BertForMatching
from transformers import BertTokenizer, BertConfig
from tensorboardX import SummaryWriter
from transformers import AdamW
import json
import random
import numpy as np
import pprint
import logging
def evaluate(dataloader, device, model, metric, dataset='dev'):
true_tags = []
pred_tags = []
if dataset == 'dev':
dataset = dataloader.dev_examples
NUM_CAND = 2
else:
dataset = dataloader.test_examples
NUM_CAND = 10
dataset_iter = dataloader.get_batch_iterator(dataset, batch_size=50)
for batch in tqdm(dataset_iter):
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
_, logits = model(
input_ids=batch['input_ids'],
input_mask=batch['input_mask'],
input_type_ids=batch['input_type_ids'],
matching_label_id=batch['matching_label_id'])
pred_tags.extend(logits.numpy().tolist())
true_tags.extend(batch['matching_label_id'].numpy().tolist())
results = metric.compute(
CAND_NUM=NUM_CAND,
logits=pred_tags,
hard_ids=true_tags)
return results, pred_tags | null |
163,668 | import argparse
import os
import pickle
import time
import torch
from tqdm import tqdm
import numpy as np
from data_cls import DataProcessor
from model import BertForNLU
from transformers import BertTokenizer, BertConfig
from tensorboardX import SummaryWriter
from transformers import AdamW
import json
import random
import numpy as np
import pprint
import logging
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) | null |
163,669 | import argparse
import os
import pickle
import time
import torch
from tqdm import tqdm
import numpy as np
from data_cls import DataProcessor
from model import BertForNLU
from transformers import BertTokenizer, BertConfig
from tensorboardX import SummaryWriter
from transformers import AdamW
import json
import random
import numpy as np
import pprint
import logging
def compute(dialogs):
def evaluate(dataloader, device, model, eval_batch_size, dataset='dev'):
if dataset == 'dev':
dataset = dataloader.dev_instances
dialogs = dataloader.dev_dialogs
elif dataset == 'test':
dataset = dataloader.test_instances
dialogs = dataloader.test_dialogs
else:
raise ValueError('wrong eval set')
dataset_iter = dataloader.get_batch_iterator(dataset, batch_size=eval_batch_size)
for batch_raw in tqdm(dataset_iter):
batch = {}
for k, v in batch_raw.items():
if isinstance(v, list):
batch[k] = v
else:
batch[k] = v.to(device)
with torch.no_grad():
_, preds = model(
input_ids=batch['input_ids'],
input_mask=batch['input_mask'],
input_type_ids=batch['input_type_ids'],
matching_label_id=batch['matching_label_id'])
guids = batch['guids']
preds = preds.cpu().numpy().tolist()
trues = batch['matching_label_id'].cpu().numpy().tolist()
for idx, guid in enumerate(guids):
dial_id, turn_id, ins_id = guid.split('||')
turn_id = int(turn_id.split('_')[1])
if 'pred_label' not in dialogs[dial_id][turn_id]:
dialogs[dial_id][turn_id]['pred_label'] = {}
dialogs[dial_id][turn_id]['pred_label'][ins_id] = dataloader.intent_dic_inv[preds[idx]]
if 'true_label' not in dialogs[dial_id][turn_id]:
dialogs[dial_id][turn_id]['true_label'] = {}
dialogs[dial_id][turn_id]['true_label'][ins_id] = dataloader.intent_dic_inv[trues[idx]]
results = compute(dialogs)
return results, dialogs | null |
163,670 | import numpy as np
import json
def compute(dialogs):
dial_acc = []
turn_acc_all = []
turn_acc = []
for dial in dialogs.values():
tmp = []
# 基本通用.重听 基本通用.简短词 拒识
for turn in dial:
if 'usr_query' in turn:
true_label = turn['usr_intent']
assert true_label == turn['true_label']['origin']
pred_labels = list(turn['pred_label'].values())
for pred in pred_labels:
if pred == true_label:
turn_acc_all.append(1)
elif pred in ['基本通用.重听', '基本通用.简短词', '拒识'] and \
true_label in ['基本通用.重听', '基本通用.简短词', '拒识']:
turn_acc_all.append(1)
else:
turn_acc_all.append(0)
if true_label == pred_labels[0]:
tmp.append(1)
turn_acc.append(1)
else:
tmp.append(0)
turn_acc.append(0)
dial_acc.append(all(tmp))
return {'turn_acc:': np.mean(turn_acc),
'turnACCALL': np.mean(turn_acc_all),
'dialACC': np.mean(dial_acc)} | null |
163,671 | import numpy as np
import json
def compute_with_hard_data(dialogs, robust_ids):
dial_acc = []
turn_acc = []
for dial_id, dial in dialogs.items():
tmp = []
if dial_id not in robust_ids: continue
# 基本通用.重听 基本通用.简短词 拒识
for turn_id, turn in enumerate(dial):
if 'usr_query' in turn:
true_label = turn['usr_intent']
assert true_label == turn['true_label']['origin']
key = robust_ids[dial_id][turn_id]
pred_label = turn['pred_label'][key]
if true_label == pred_label:
tmp.append(1)
turn_acc.append(1)
else:
tmp.append(0)
turn_acc.append(0)
dial_acc.append(all(tmp))
return {'turn_acc:': np.mean(turn_acc),
'dialACC': np.mean(dial_acc)} | null |
163,672 | from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers import GPT2LMHeadModel, BertTokenizer
from reader import RisaWOZReader
from eval import RisaWOZEvaluator
import torch
import torch.nn as nn
import os
import random
import argparse
import time
import logging
import json
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from config import global_config as cfg
import warnings
def parse_arg_cfg(args):
# add args to cfg
if args.cfg:
for pair in args.cfg:
k, v = tuple(pair.split('='))
dtype = type(getattr(cfg, k))
if dtype == type(None):
raise ValueError()
if dtype is bool:
v = False if v == 'False' else True
elif dtype is list:
v = v.split(',')
if k == 'cuda_device':
v = [int(no) for no in v]
else:
v = dtype(v)
setattr(cfg, k, v)
return | null |
163,673 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
The provided code snippet includes necessary dependencies for implementing the `top_k_top_p_filtering` function. Write a Python function `def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf'))` to solve the following problem:
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Here is the function:
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[
0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(
logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[...,
1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits | Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 |
163,674 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
def py2np(list):
return np.array(list) | null |
163,675 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
def write_dict(fn, dic):
with open(fn, 'w') as f:
json.dump(dic, f, indent=2) | null |
163,676 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
def f1_score(label_list, pred_list):
tp = len([t for t in pred_list if t in label_list])
fp = max(0, len(pred_list) - tp)
fn = max(0, len(label_list) - tp)
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
f1 = 2 * precision * recall / (precision + recall + 1e-10)
return f1 | null |
163,677 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
def padSeqs_gpt(sequences, pad_id, maxlen=None):
lengths = []
for x in sequences:
lengths.append(len(x))
num_samples = len(sequences)
seq_mexlen = np.max(lengths)
# maxlen = 1024
if seq_mexlen > 1024: # gpt2.n_ctx
# print('maxlen exceeds 1024')
maxlen = 1024
else:
maxlen = seq_mexlen
# tokenizer.encode('<|endoftext|>') = ['50256']
# All labels set to ``-100`` are ignored (masked), the loss is only
# computed for labels in ``[0, ..., config.vocab_size]`` (from modeling_gpt2.GPT2LMHeadModel)
x = (np.ones((num_samples, maxlen)) * pad_id)
for idx, s in enumerate(sequences):
if not len(s):
print('empty list was found in padSeqs')
# trunc method = 'pre'
trunc = s[-maxlen:]
trunc = np.asarray(trunc)
# pad method = 'post'
x[idx, :len(trunc)] = trunc
return x, lengths | null |
163,678 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
def padSeqs(sequences, maxlen=None, truncated = False, pad_method='post',
trunc_method='pre', dtype='int32', value=0.):
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
lengths = []
for x in sequences:
if not hasattr(x, '__len__'):
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
lengths.append(len(x))
num_samples = len(sequences)
seq_maxlen = np.max(lengths)
if maxlen is not None and truncated:
maxlen = min(seq_maxlen, maxlen)
else:
maxlen = seq_maxlen
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if not len(s):
print('empty list/array was found')
continue # empty list/array was found
if trunc_method == 'pre':
trunc = s[-maxlen:]
elif trunc_method == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % trunc_method)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if pad_method == 'post':
x[idx, :len(trunc)] = trunc
elif pad_method == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % pad_method)
return x | null |
163,679 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
The provided code snippet includes necessary dependencies for implementing the `get_glove_matrix` function. Write a Python function `def get_glove_matrix(glove_path, vocab, initial_embedding_np)` to solve the following problem:
return a glove embedding matrix :param self: :param glove_file: :param initial_embedding_np: :return: np array of [V,E]
Here is the function:
def get_glove_matrix(glove_path, vocab, initial_embedding_np):
"""
return a glove embedding matrix
:param self:
:param glove_file:
:param initial_embedding_np:
:return: np array of [V,E]
"""
ef = open(glove_path, 'r', encoding='UTF-8')
cnt = 0
vec_array = initial_embedding_np
old_avg = np.average(vec_array)
old_std = np.std(vec_array)
vec_array = vec_array.astype(np.float32)
new_avg, new_std = 0, 0
for line in ef.readlines():
line = line.strip().split(' ')
word, vec = line[0], line[1:]
vec = np.array(vec, np.float32)
if not vocab.has_word(word):
continue
word_idx = vocab.encode(word)
if word_idx <vocab.vocab_size:
cnt += 1
vec_array[word_idx] = vec
new_avg += np.average(vec)
new_std += np.std(vec)
new_avg /= cnt
new_std /= cnt
ef.close()
logging.info('%d known embedding. old mean: %f new mean %f, old std %f new std %f' % (cnt, old_avg,
new_avg, old_std, new_std))
return vec_array | return a glove embedding matrix :param self: :param glove_file: :param initial_embedding_np: :return: np array of [V,E] |
163,680 | import logging
import json
import torch
import numpy as np
from collections import OrderedDict
import ontology as ontology
def position_encoding_init(self, n_position, d_pos_vec):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc | null |
163,681 | import json, random
import re
def normalize_slot(s):
s = re.sub(r"3\.0\s?[Tt]\s?", "", s)
s = s.lower()
s = s.replace('/', '').replace(' ', '')
s = re.sub(r"\(.*\)$", "", s)
return s.lower() | null |
163,682 | import math, logging, json
from collections import Counter
from nltk.util import ngrams
import copy
import pprint
import numpy as np
import ontology as ontology
The provided code snippet includes necessary dependencies for implementing the `my_lcs` function. Write a Python function `def my_lcs(string, sub)` to solve the following problem:
Calculates longest common subsequence for a pair of tokenized strings :param string : list of str : tokens from a string split using whitespace :param sub : list of str : shorter string, also split using whitespace :returns: length (list of int): length of the longest common subsequence between the two strings Note: my_lcs only gives length of the longest common subsequence, not the actual LCS This function is copied from https://github.com/Maluuba/nlg-eval/blob/master/nlgeval/pycocoevalcap/rouge/rouge.py
Here is the function:
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
This function is copied from https://github.com/Maluuba/nlg-eval/blob/master/nlgeval/pycocoevalcap/rouge/rouge.py
"""
if len(string) < len(sub):
sub, string = string, sub
lengths = [[0 for i in range(0, len(sub) + 1)] for j in range(0, len(string) + 1)]
for j in range(1, len(sub) + 1):
for i in range(1, len(string) + 1):
if string[i - 1] == sub[j - 1]:
lengths[i][j] = lengths[i - 1][j - 1] + 1
else:
lengths[i][j] = max(lengths[i - 1][j], lengths[i][j - 1])
return lengths[len(string)][len(sub)] | Calculates longest common subsequence for a pair of tokenized strings :param string : list of str : tokens from a string split using whitespace :param sub : list of str : shorter string, also split using whitespace :returns: length (list of int): length of the longest common subsequence between the two strings Note: my_lcs only gives length of the longest common subsequence, not the actual LCS This function is copied from https://github.com/Maluuba/nlg-eval/blob/master/nlgeval/pycocoevalcap/rouge/rouge.py |
163,683 | import copy
import os, random, argparse, time, logging, json, tqdm
import numpy as np
from copy import deepcopy
from collections import OrderedDict
import torch
import pprint
from utils import RisaWOZT5Reader
from config import global_config as cfg
from transformers import (AdamW, BertTokenizer, WEIGHTS_NAME,CONFIG_NAME, get_linear_schedule_with_warmup)
from T5 import MiniT5
from torch.utils.tensorboard import SummaryWriter
from eval import RisaWOZEvaluator
def parse_arg_cfg(args):
if args.cfg:
for pair in args.cfg:
k, v = tuple(pair.split('='))
dtype = type(getattr(cfg, k))
if dtype == type(None):
raise ValueError()
if dtype is bool:
v = False if v == 'False' else True
elif dtype is list:
v = v.split(',')
if k=='cuda_device':
v = [int(no) for no in v]
else:
v = dtype(v)
setattr(cfg, k, v)
return | null |
163,686 | import json
from tool_manager import ToolManager
import re
from rouge import Rouge
import os
from utils import ChatGPTWrapper, DavinciWrapper, GPT4Wrapper
import logging
from tqdm import tqdm
from api_call_extraction import parse_api_call
from datetime import datetime
import numpy as np
class Rouge(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"],
reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
],
)
def _compute(self, predictions, references, rouge_types=None, use_agregator=True, use_stemmer=False):
if rouge_types is None:
rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
if use_agregator:
aggregator = scoring.BootstrapAggregator()
else:
scores = []
for ref, pred in zip(references, predictions):
score = scorer.score(ref, pred)
if use_agregator:
aggregator.add_scores(score)
else:
scores.append(score)
if use_agregator:
result = aggregator.aggregate()
else:
result = {}
for key in scores[0]:
result[key] = list(score[key] for score in scores)
return result
def calculate_rouge_l_score(reference, hypothesis):
rouge = Rouge()
scores = rouge.get_scores(hypothesis, reference)
rouge_l_score = scores[0]['rouge-l']['f']
return rouge_l_score | null |
163,687 | import json
from tool_manager import ToolManager
import re
from rouge import Rouge
import os
from utils import ChatGPTWrapper, DavinciWrapper, GPT4Wrapper
import logging
from tqdm import tqdm
from api_call_extraction import parse_api_call
from datetime import datetime
import numpy as np
def get_api_call(model_output):
api_call_pattern = r"\[(\w+)\((.*)\)\]"
api_call_pattern = re.compile(api_call_pattern)
match = api_call_pattern.search(model_output)
if match:
return match.group(0)
else:
return None | null |
163,688 | import json
from tool_manager import ToolManager
from api_call_extraction import parse_api_call, get_api_call
import logging
from rouge import Rouge
def split_by_uppercase(s):
return ''.join([' ' + c if c.isupper() else c for c in s]).strip()
def calculate_rouge_l_score(reference, hypothesis):
rouge = Rouge()
if hypothesis == '':
return 0
scores = rouge.get_scores(hypothesis, reference)
rouge_l_score = scores[0]['rouge-l']['f']
return rouge_l_score
class ToolManager:
def __init__(self, apis_dir='./apis') -> None:
import importlib.util
all_apis = []
# import all the file in the apis folder, and load all the classes
except_files = ['__init__.py', 'api.py']
for file in os.listdir(apis_dir):
if file.endswith('.py') and file not in except_files:
api_file = file.split('.')[0]
basename = os.path.basename(apis_dir)
module = importlib.import_module(f'{basename}.{api_file}')
classes = [getattr(module, x) for x in dir(module) if isinstance(getattr(module, x), type)]
for cls in classes:
if issubclass(cls, API) and cls is not API:
all_apis.append(cls)
classes = all_apis
self.init_databases = {}
init_database_dir = './init_database'
for file in os.listdir(init_database_dir):
if file.endswith('.json'):
database_name = file.split('.')[0]
with open(os.path.join(init_database_dir, file), 'r') as f:
self.init_databases[database_name] = json.load(f)
# Get the description parameter for each class
apis = []
for cls in classes:
if issubclass(cls, object) and cls is not object:
name = cls.__name__
cls_info = {
'name': name,
'class': cls,
'description': cls.description,
'input_parameters': cls.input_parameters,
'output_parameters': cls.output_parameters,
}
if hasattr(cls, 'database_name') and cls.database_name in self.init_databases:
cls_info['init_database'] = self.init_databases[cls.database_name]
apis.append(cls_info)
self.apis = apis
self.inited_tools = {}
if 'CheckToken' in self.list_all_apis():
self.token_checker = self.init_tool('CheckToken')
def get_api_by_name(self, name: str):
"""
Gets the API with the given name.
Parameters:
- name (str): the name of the API to get.
Returns:
- api (dict): the API with the given name.
"""
for api in self.apis:
if api['name'] == name:
return api
raise Exception('invalid tool name.')
def get_api_description(self, name: str):
"""
Gets the description of the API with the given name.
Parameters:
- name (str): the name of the API to get the description of.
Returns:
- desc (str): the description of the API with the given name.
"""
api_info = self.get_api_by_name(name).copy()
api_info.pop('class')
if 'init_database' in api_info:
api_info.pop('init_database')
return json.dumps(api_info)
def init_tool(self, tool_name: str, *args, **kwargs):
"""
Initializes a tool with the given name and parameters.
Parameters:
- tool_name (str): the name of the tool to initialize.
- args (list): the positional arguments to initialize the tool with.
- kwargs (dict): the parameters to initialize the tool with.
Returns:
- tool (object): the initialized tool.
"""
if tool_name in self.inited_tools:
return self.inited_tools[tool_name]
# Get the class for the tool
api_class = self.get_api_by_name(tool_name)['class']
temp_args = []
if 'init_database' in self.get_api_by_name(tool_name):
# Initialize the tool with the init database
temp_args.append(self.get_api_by_name(tool_name)['init_database'])
if tool_name != 'CheckToken' and 'token' in self.get_api_by_name(tool_name)['input_parameters']:
temp_args.append(self.token_checker)
args = temp_args + list(args)
tool = api_class(*args, **kwargs)
self.inited_tools[tool_name] = tool
return tool
def process_parameters(self, tool_name: str, parameters: list):
input_parameters = self.get_api_by_name(tool_name)['input_parameters'].values()
assert len(parameters) == len(input_parameters), 'invalid number of parameters.'
processed_parameters = []
for this_para, input_para in zip(parameters, input_parameters):
para_type = input_para['type']
if para_type == 'int':
assert this_para.isdigit(), 'invalid parameter type. parameter: {}'.format(this_para)
processed_parameters.append(int(this_para))
elif para_type == 'float':
assert this_para.replace('.', '', 1).isdigit(), 'invalid parameter type.'
processed_parameters.append(float(this_para))
elif para_type == 'str':
processed_parameters.append(this_para)
else:
raise Exception('invalid parameter type.')
return processed_parameters
def api_call(self, tool_name: str, **kwargs):
"""
Calls the API with the given name and parameters.
"""
input_parameters = self.get_api_by_name(tool_name)['input_parameters'] # {'username': {'type': 'str', 'description': 'The username of the user.'}, 'password': {'type': 'str', 'description': 'The password of the user.'}}
# assert len(kwargs) == len(input_parameters), 'invalid number of parameters. expected: {}, got: {}'.format(len(input_parameters), len(kwargs))
processed_parameters = {}
for input_key in kwargs:
input_value = kwargs[input_key]
assert input_key in input_parameters, 'invalid parameter name. parameter: {}'.format(input_key)
required_para = input_parameters[input_key]
required_type = required_para['type']
if required_type == 'int':
if isinstance(input_value, str):
assert input_value.isdigit(), 'invalid parameter type. parameter: {}'.format(input_value)
processed_parameters[input_key] = int(input_value)
elif required_type == 'float':
if isinstance(input_value, str):
assert input_value.replace('.', '', 1).isdigit(), 'invalid parameter type.'
processed_parameters[input_key] = float(input_value)
elif required_type == 'str':
processed_parameters[input_key] = input_value
elif required_type == 'list(str)':
# input_value = input_value.replace('\'', '"')
processed_parameters[input_key] = input_value
elif required_type == 'list':
# input_value = input_value.replace('\'', '"')
processed_parameters[input_key] = input_value
elif required_type == 'bool':
processed_parameters[input_key] = input_value == 'True'
else:
raise Exception('invalid parameter type.')
tool = self.init_tool(tool_name)
result = tool.call(**processed_parameters)
return result
def command_line(self):
"""
Starts the command line interface for the tool manager.
"""
mode = 'function_call' # 'function_call' or 'qa'
if mode == 'qa':
while True:
tool_keywords = input('Please enter the keywords for the tool you want to use (\'exit\' to exit):\n')
tool_searcher = self.init_tool('ToolSearcher')
response = tool_searcher.call(tool_keywords)
tool = self.init_tool(response['output']['name'])
print('The tool you want to use is: \n' + self.get_api_description(response['output']['name']))
while True:
command = input('Please enter the parameters for the tool you want to use (\'exit\' to exit): \n')
if command == 'exit':
break
else:
command = command.replace(' ', '')
processed_parameters = self.process_parameters(response['output']['name'], command.split(','))
print(tool.call(*processed_parameters))
elif mode == 'function_call':
while True:
command = input('Please enter the command for the tool you want to use: \n')
if command == 'exit':
break
api_name, param_dict = parse_api_call(command)
print(self.api_call(api_name, **param_dict))
def list_all_apis(self):
"""
Lists all the APIs.
Returns:
- apis (list): a list of all the APIs.
"""
return [api['name'] for api in self.apis]
def get_api_call(model_output):
api_call_pattern = r"\[(\w+)\((.*)\)\]"
api_call_pattern = re.compile(api_call_pattern)
match = api_call_pattern.search(model_output)
if match:
return match.group(0)
else:
return None
def parse_api_call(text):
pattern = r"\[(\w+)\((.*)\)\]"
match = re.search(pattern, text, re.MULTILINE)
api_name = match.group(1)
params = match.group(2)
# params = params.replace('\'[', '[')
# params = params.replace(']\'', ']')
# params = params.replace('\'{', '{')
# params = params.replace('}\'', '}')
# param_dict = eval('fn(' + params + ')')
param_pattern = r"(\w+)\s*=\s*['\"](.+?)['\"]|(\w+)\s*=\s*(\[.*\])|(\w+)\s*=\s*(\w+)"
param_dict = {}
for m in re.finditer(param_pattern, params):
if m.group(1):
param_dict[m.group(1)] = m.group(2)
elif m.group(3):
param_dict[m.group(3)] = m.group(4)
elif m.group(5):
param_dict[m.group(5)] = m.group(6)
return api_name, param_dict
def test_json():
# if_api = True
if_api = False
pred_path = 'path-to-json'
gt_path = 'test_data/level-3.json'
tool_manager = ToolManager('./lv3_apis')
with open(pred_path, 'r') as f:
preds = [json.loads(line) for line in f.readlines()]
# preds = json.load(f)
with open(gt_path, 'r') as f:
gts = json.load(f)
# if_api = 'API-Request:' in preds[0]['output']
if if_api:
total_num = len(preds)
correct_num = 0
errored_sample_ids = []
tool_search_error_num = 0
else:
rougel_scores = []
for pred_id, pred in enumerate(preds):
if if_api:
sample_id = pred['sample_id']
# if sample_id in errored_sample_ids:
# continue
api_id = pred['api_id']
gt = gts[sample_id]['apis'][api_id]
gt_api_name = gt['api_name']
gt_result = gt['output']
pred_api_call = get_api_call(pred['pred'])
if not pred_api_call:
logging.warning('No api call found in pred: {}'.format(pred_id))
errored_sample_ids.append(sample_id)
continue
try:
pred_api_name, pred_param_dict = parse_api_call(pred_api_call)
except Exception as e:
logging.warning('Parse api call error: {} {}'.format(str(e), pred_id))
errored_sample_ids.append(sample_id)
continue
try:
if pred_api_name == 'ToolSearcher':
pred_param_dict['keywords'] = split_by_uppercase(pred_param_dict['keywords'])
pred_result = tool_manager.api_call(pred_api_name, **pred_param_dict)
except TypeError as e:
logging.warning('TypeError: {} {}'.format(str(e), pred_id))
errored_sample_ids.append(sample_id)
continue
except AssertionError as e:
logging.warning('AssertionError: {} {}'.format(str(e), pred_id))
errored_sample_ids.append(sample_id)
continue
except Exception as e:
if str(e) == 'invalid tool name.':
logging.warning('invalid tool name: {} {}'.format(str(e), pred_id))
errored_sample_ids.append(sample_id)
continue
else:
raise e
gt_api = tool_manager.init_tool(gt_api_name)
try:
correct = gt_api.check_api_call_correctness(pred_result, gt_result)
except KeyError:
correct = False
logging.warning('KeyError: {}'.format(pred_id))
except AssertionError as e:
correct = False
logging.warning('AssertionError: {} {}'.format(str(e), pred_id))
if correct:
correct_num += 1
else:
# for test toolsearcher
errored_sample_ids.append(sample_id)
if gt_api_name != 'ToolSearcher':
pass
else:
tool_search_error_num += 1
logging.warning('Incorrect: {}'.format(pred_id))
else:
gt_response = pred['output']
pred_response = pred['pred'].replace('User:', '').replace('AI:', '').strip()
rouge_l_score = calculate_rouge_l_score(gt_response, pred_response)
rougel_scores.append(rouge_l_score)
if if_api:
print('Accuracy: {}'.format(correct_num / total_num))
print('Total: {}'.format(total_num))
print('Correct: {}'.format(correct_num))
print('Sample Accuracy: {}'.format((50 - len(set(errored_sample_ids))) / 50))
print('Total: {}'.format(50))
print('Correct: {}'.format(50 - len(set(errored_sample_ids))))
print('ToolSearcher Error Num: {}'.format(tool_search_error_num))
else:
print('Rouge-L: {}'.format(sum(rougel_scores) / len(rougel_scores))) | null |
163,689 | import json
from tool_manager import ToolManager
import re
from rouge import Rouge
import os
from utils import ChatGPTWrapper, DavinciWrapper
import logging
from tqdm import tqdm
from api_call_extraction import parse_api_call
from datetime import datetime
import numpy as np
class Rouge(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"],
reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
],
)
def _compute(self, predictions, references, rouge_types=None, use_agregator=True, use_stemmer=False):
if rouge_types is None:
rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
if use_agregator:
aggregator = scoring.BootstrapAggregator()
else:
scores = []
for ref, pred in zip(references, predictions):
score = scorer.score(ref, pred)
if use_agregator:
aggregator.add_scores(score)
else:
scores.append(score)
if use_agregator:
result = aggregator.aggregate()
else:
result = {}
for key in scores[0]:
result[key] = list(score[key] for score in scores)
return result
def calculate_rouge_l_score(reference, hypothesis):
rouge = Rouge()
scores = rouge.get_scores(hypothesis, reference)
rouge_l_score = scores[0]['rouge-l']['f']
return rouge_l_score | null |
163,690 | import json
from tool_manager import ToolManager
import re
from rouge import Rouge
import os
from utils import ChatGPTWrapper, DavinciWrapper
import logging
from tqdm import tqdm
from api_call_extraction import parse_api_call
from datetime import datetime
import numpy as np
def get_api_call(model_output):
api_call_pattern = r"\[(\w+)\((.*)\)\]"
api_call_pattern = re.compile(api_call_pattern)
match = api_call_pattern.search(model_output)
if match:
return match.group(0)
else:
return None | null |
163,691 | import json
from tool_manager import ToolManager
import re
from rouge import Rouge
import os
from utils import ChatGPTWrapper, DavinciWrapper
import logging
from tqdm import tqdm
from api_call_extraction import parse_api_call
from datetime import datetime
import numpy as np
def print_error_samples(sample):
print('Instruction: \n{}\n'.format(sample['pred']['instruction']))
print('Input: \n{}\n'.format(sample['pred']['input']))
print('Output: \n{}\n'.format(sample['model_output']))
print('Ground truth: \n{}\n'.format(sample['pred']['expected_output'])) | null |
163,692 | import gradio as gr
import os
import json
import requests
from tool_manager import ToolManager
from utils import ChatGPTWrapper, GPT4Wrapper
from api_call_extraction import get_api_call, parse_api_call
from api_call_extraction import parse_api_call
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
tool_manager = ToolManager()
proxies = None
def history_to_chat(history):
chat_pairs = []
pair = (None, None) # user, model
for item in history:
if item['role'] == 'user':
if pair[0] is not None:
chat_pairs.append(pair)
pair = (None, None)
pair = (item['content'], pair[1])
elif item['role'] == 'assistant':
if pair[1] is not None:
chat_pairs.append(pair)
pair = (None, None)
pair = (pair[0], item['content'])
if pair[0] is not None or pair[1] is not None:
chat_pairs.append(pair)
return chat_pairs
def history_to_backend(history):
chat_pairs = []
pair = ('placeholder', None) # response, model
for item in history:
if item['role'] == 'system':
if pair[0] is not None:
chat_pairs.append(pair)
pair = (None, None)
pair = (item['content'], pair[1])
elif item['role'] == 'assistant':
if pair[1] is not None:
chat_pairs.append(pair)
pair = (None, None)
pair = (pair[0], item['content'])
if pair[0] is not None or pair[1] is not None:
chat_pairs.append(pair)
if chat_pairs[0][0] == 'placeholder':
chat_pairs[0] = (None, chat_pairs[0][1])
return chat_pairs
class ChatGPTWrapper:
def __init__(self, api_key='', proxies=None) -> None:
# Set the request parameters
self.url = 'https://api.openai.com/v1/chat/completions'
# Set the header
self.header = {
"Content-Type": "application/json",
"Authorization": 'Bearer {}'.format(api_key)
}
self.proxies = proxies
def call(self, messages, **kwargs):
query = {
"model": "gpt-3.5-turbo",
"messages": messages
}
query.update(kwargs)
# Make the request
if self.proxies:
response = requests.post(self.url, headers=self.header, data=json.dumps(query), proxies=self.proxies)
else:
response = requests.post(self.url, headers=self.header, data=json.dumps(query))
response = response.json()
if 'error' in response and 'Rate limit reached' in response['error']['message']:
raise RateLimitReached()
elif 'choices' in response:
return response
else:
if 'error' in response:
print(response['error']['message'])
if response['error']['message'] == 'The server had an error while processing your request. Sorry about that!':
raise RecoverableError(response['error']['message'])
else:
raise OfficialError(response['error']['message'])
else:
raise Exception('Unknown error occured. Json: {}'.format(response))
class GPT4Wrapper(ChatGPTWrapper):
def call(self, messages, **kwargs):
query = {
"model": "gpt-4-0314",
"messages": messages
}
query.update(kwargs)
# Make the request
if self.proxies:
response = requests.post(self.url, headers=self.header, data=json.dumps(query), proxies=self.proxies)
else:
response = requests.post(self.url, headers=self.header, data=json.dumps(query))
response = response.json()
if 'error' in response and 'Rate limit reached' in response['error']['message']:
raise RateLimitReached()
elif 'choices' in response:
return response
else:
if 'error' in response:
print(response['error']['message'])
if response['error']['message'] == 'The server had an error while processing your request. Sorry about that!':
raise RecoverableError(response['error']['message'])
else:
raise OfficialError(response['error']['message'])
else:
raise Exception('Unknown error occured. Json: {}'.format(response))
def get_api_call(model_output):
api_call_pattern = r"\[(\w+)\((.*)\)\]"
api_call_pattern = re.compile(api_call_pattern)
match = api_call_pattern.search(model_output)
if match:
return match.group(0)
else:
return None
def parse_api_call(text):
pattern = r"\[(\w+)\((.*)\)\]"
match = re.search(pattern, text, re.MULTILINE)
api_name = match.group(1)
params = match.group(2)
# params = params.replace('\'[', '[')
# params = params.replace(']\'', ']')
# params = params.replace('\'{', '{')
# params = params.replace('}\'', '}')
# param_dict = eval('fn(' + params + ')')
param_pattern = r"(\w+)\s*=\s*['\"](.+?)['\"]|(\w+)\s*=\s*(\[.*\])|(\w+)\s*=\s*(\w+)"
param_dict = {}
for m in re.finditer(param_pattern, params):
if m.group(1):
param_dict[m.group(1)] = m.group(2)
elif m.group(3):
param_dict[m.group(3)] = m.group(4)
elif m.group(5):
param_dict[m.group(5)] = m.group(6)
return api_name, param_dict
def predict(model_type, system_msg, inputs, top_p, temperature, chatbot=[], backend=[], history=[], backend_history=[], all_history=[]):
if model_type == 'GPT-4':
logging.info(f"model_type is {model_type}")
gpt = GPT4Wrapper(api_key=OPENAI_API_KEY, proxies=proxies)
elif model_type == 'GPT-3.5':
logging.info(f"model_type is {model_type}")
gpt = ChatGPTWrapper(api_key=OPENAI_API_KEY, proxies=proxies)
logging.info(f"system message is ^^ {system_msg}")
if system_msg.strip() == '':
system_prompt = None
payload = {
"temperature" : temperature, #1.0,
"top_p": top_p, #1.0,
"n" : 1,
}
else:
system_prompt = {"role": "system", "content": system_msg}
payload = {
"temperature" : 1.0,
"top_p":1.0,
"n" : 1,
}
if system_prompt:
all_history.append(system_prompt)
logging.info(f"Logging : user input is - {inputs}")
all_history.append({"role": "user", "content": "(User) " + inputs})
history.append({"role": "user", "content": "(User) " + inputs})
messages = all_history.copy()
backend = history_to_backend(backend_history)
chat = history_to_chat(history)
yield chat, backend, history, backend_history, all_history, ''
logging.info(f"Logging : payload is - {payload}")
response = gpt.call(messages, **payload)
model_output = response['choices'][0]['message']['content']
while get_api_call(model_output):
logging.info(f"Logging : api detected model_output is - {model_output}")
backend_history.append({"role": "assistant", "content": "(API Call) " + get_api_call(model_output)})
all_history.append({"role": "assistant", "content": "(API Call) " + get_api_call(model_output)})
backend = history_to_backend(backend_history)
chat = history_to_chat(history)
yield chat, backend, history, backend_history, all_history, response
api_name, param_dict = parse_api_call(model_output)
try:
result = tool_manager.api_call(api_name, **param_dict)
except Exception as e:
api_result = '(API) ' + str(e)
else:
if result['exception']:
api_result = '(API) Exception: ' + str(result['exception'])
else:
api_result = '(API) ' + str(result['output'])
# print(api_result)
logging.info(api_result)
backend_history.append({"role": "system", "content": api_result})
all_history.append({"role": "system", "content": api_result})
backend = history_to_backend(backend_history)
chat = history_to_chat(history)
yield chat, backend, history, backend_history, all_history, response
messages = all_history.copy()
response = gpt.call(messages, **payload)
model_output = response['choices'][0]['message']['content']
logging.info(f"Logging : model_output is - {model_output}")
history.append({"role": "assistant", "content": model_output})
all_history.append({"role": "assistant", "content": model_output})
logging.info(f"Logging : history is - {history}")
backend = history_to_backend(backend_history)
chat = history_to_chat(history)
logging.info(f"Logging : all_history is - {all_history}")
yield chat, backend, history, backend_history, all_history, response # resembles {chatbot: chat, state: history} | null |
163,693 | import gradio as gr
import os
import json
import requests
from tool_manager import ToolManager
from utils import ChatGPTWrapper, GPT4Wrapper
from api_call_extraction import get_api_call, parse_api_call
from api_call_extraction import parse_api_call
import logging
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
theme=theme) as demo:
gr.HTML(title)
gr.HTML(intro)
# gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
with gr.Column(elem_id = "col_container"):
#GPT4 API Key is provided by Huggingface
api_key = gr.Textbox(label="OpenAI API Key", info="You can get your API key from https://platform.openai.com/account/api-keys", value=OPENAI_API_KEY, visible=False)
model_type = gr.Radio(["GPT-3.5", "GPT-4"], label="Model", info="Which model to use for the chatbot. GPT-3.5 is the default.", value="GPT-3.5")
with gr.Accordion(label="System message:", open=False):
system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value=prompt)
accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False)
with gr.Row():
chatbot = gr.Chatbot(label='GPT', elem_id="chatbot")
backend = gr.Chatbot(label='Backend', elem_id="backend")
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
state = gr.State([])
backend_state = gr.State([])
all_state = gr.State([])
with gr.Row():
with gr.Column(scale=7):
b1 = gr.Button().style(full_width=True)
with gr.Column(scale=3):
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
# top_p, temperature
with gr.Accordion("Parameters", open=False):
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
#Event handling
api_key.change(update_api_key, [api_key], [])
inputs.submit( predict, [model_type, system_msg, inputs, top_p, temperature, chatbot, backend, state, backend_state, all_state], [chatbot, backend, state, backend_state, all_state, server_status_code],) #openai_api_key
b1.click( predict, [model_type, system_msg, inputs, top_p, temperature, chatbot, backend, state, backend_state, all_state], [chatbot, backend, state, backend_state, all_state, server_status_code],) #openai_api_key
inputs.submit(set_visible_false, [], [system_msg])
b1.click(set_visible_false, [], [system_msg])
inputs.submit(set_visible_true, [], [accordion_msg])
b1.click(set_visible_true, [], [accordion_msg])
b1.click(reset_textbox, [], [inputs])
inputs.submit(reset_textbox, [], [inputs])
b1.click(reset_textbox, [], [system_msg])
inputs.submit(reset_textbox, [], [system_msg])
def reset_textbox():
return gr.update(value='') | null |
163,694 | import gradio as gr
import os
import json
import requests
from tool_manager import ToolManager
from utils import ChatGPTWrapper, GPT4Wrapper
from api_call_extraction import get_api_call, parse_api_call
from api_call_extraction import parse_api_call
import logging
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
theme=theme) as demo:
gr.HTML(title)
gr.HTML(intro)
# gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
with gr.Column(elem_id = "col_container"):
#GPT4 API Key is provided by Huggingface
api_key = gr.Textbox(label="OpenAI API Key", info="You can get your API key from https://platform.openai.com/account/api-keys", value=OPENAI_API_KEY, visible=False)
model_type = gr.Radio(["GPT-3.5", "GPT-4"], label="Model", info="Which model to use for the chatbot. GPT-3.5 is the default.", value="GPT-3.5")
with gr.Accordion(label="System message:", open=False):
system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value=prompt)
accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False)
with gr.Row():
chatbot = gr.Chatbot(label='GPT', elem_id="chatbot")
backend = gr.Chatbot(label='Backend', elem_id="backend")
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
state = gr.State([])
backend_state = gr.State([])
all_state = gr.State([])
with gr.Row():
with gr.Column(scale=7):
b1 = gr.Button().style(full_width=True)
with gr.Column(scale=3):
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
# top_p, temperature
with gr.Accordion("Parameters", open=False):
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
#Event handling
api_key.change(update_api_key, [api_key], [])
inputs.submit( predict, [model_type, system_msg, inputs, top_p, temperature, chatbot, backend, state, backend_state, all_state], [chatbot, backend, state, backend_state, all_state, server_status_code],) #openai_api_key
b1.click( predict, [model_type, system_msg, inputs, top_p, temperature, chatbot, backend, state, backend_state, all_state], [chatbot, backend, state, backend_state, all_state, server_status_code],) #openai_api_key
inputs.submit(set_visible_false, [], [system_msg])
b1.click(set_visible_false, [], [system_msg])
inputs.submit(set_visible_true, [], [accordion_msg])
b1.click(set_visible_true, [], [accordion_msg])
b1.click(reset_textbox, [], [inputs])
inputs.submit(reset_textbox, [], [inputs])
b1.click(reset_textbox, [], [system_msg])
inputs.submit(reset_textbox, [], [system_msg])
def set_visible_false():
return gr.update(visible=False) | null |
163,695 | import gradio as gr
import os
import json
import requests
from tool_manager import ToolManager
from utils import ChatGPTWrapper, GPT4Wrapper
from api_call_extraction import get_api_call, parse_api_call
from api_call_extraction import parse_api_call
import logging
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
theme=theme) as demo:
gr.HTML(title)
gr.HTML(intro)
# gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
with gr.Column(elem_id = "col_container"):
#GPT4 API Key is provided by Huggingface
api_key = gr.Textbox(label="OpenAI API Key", info="You can get your API key from https://platform.openai.com/account/api-keys", value=OPENAI_API_KEY, visible=False)
model_type = gr.Radio(["GPT-3.5", "GPT-4"], label="Model", info="Which model to use for the chatbot. GPT-3.5 is the default.", value="GPT-3.5")
with gr.Accordion(label="System message:", open=False):
system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value=prompt)
accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False)
with gr.Row():
chatbot = gr.Chatbot(label='GPT', elem_id="chatbot")
backend = gr.Chatbot(label='Backend', elem_id="backend")
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
state = gr.State([])
backend_state = gr.State([])
all_state = gr.State([])
with gr.Row():
with gr.Column(scale=7):
b1 = gr.Button().style(full_width=True)
with gr.Column(scale=3):
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
# top_p, temperature
with gr.Accordion("Parameters", open=False):
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
#Event handling
api_key.change(update_api_key, [api_key], [])
inputs.submit( predict, [model_type, system_msg, inputs, top_p, temperature, chatbot, backend, state, backend_state, all_state], [chatbot, backend, state, backend_state, all_state, server_status_code],) #openai_api_key
b1.click( predict, [model_type, system_msg, inputs, top_p, temperature, chatbot, backend, state, backend_state, all_state], [chatbot, backend, state, backend_state, all_state, server_status_code],) #openai_api_key
inputs.submit(set_visible_false, [], [system_msg])
b1.click(set_visible_false, [], [system_msg])
inputs.submit(set_visible_true, [], [accordion_msg])
b1.click(set_visible_true, [], [accordion_msg])
b1.click(reset_textbox, [], [inputs])
inputs.submit(reset_textbox, [], [inputs])
b1.click(reset_textbox, [], [system_msg])
inputs.submit(reset_textbox, [], [system_msg])
def set_visible_true():
return gr.update(visible=True) | null |
163,696 | import gradio as gr
import os
import json
import requests
from tool_manager import ToolManager
from utils import ChatGPTWrapper, GPT4Wrapper
from api_call_extraction import get_api_call, parse_api_call
from api_call_extraction import parse_api_call
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
def update_api_key(api_key):
global OPENAI_API_KEY
OPENAI_API_KEY = api_key
logging.info(f"Logging : OPENAI_API_KEY is - {OPENAI_API_KEY}")
return | null |
163,697 | import re
def fn(**kwargs):
return kwargs | null |
163,698 | import os
import pickle
import torch.distributed as dist
from transformers import PretrainedConfig, WavLMConfig, RobertaConfig
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank() | null |
163,699 | import os
import pickle
import torch.distributed as dist
from transformers import PretrainedConfig, WavLMConfig, RobertaConfig
def read_processed_pretrain(combined_path):
if os.path.isdir(combined_path):
datas = None
for r, d, fs in os.walk(combined_path):
if not d:
for f in fs:
with open(os.path.join(r, f), "rb") as fp:
if datas is None:
datas = pickle.load(fp)
else:
datas += pickle.load(fp)
else:
with open(combined_path, "rb") as f:
datas = pickle.load(f)
return datas | null |
163,700 | import os
import pickle
import torch.distributed as dist
from transformers import PretrainedConfig, WavLMConfig, RobertaConfig
def get_ds_config(args, num_gpus):
return {
"train_batch_size": args.batch_size * num_gpus * args.grad_acc,
"train_micro_batch_size_per_gpu": args.batch_size,
"zero_optimization": {
"stage": args.ds_stage,
"stage3_param_persistence_threshold": 1e4,
"stage3_max_live_parameters": 3e7,
"stage3_prefetch_bucket_size": 3e7,
"memory_efficient_linear": False
},
"fp16": {
"enabled": True,
"opt_level": f"O{args.apex_level}",
"loss_scale_window": 200
},
"gradient_clipping": 1.0,
"prescale_gradients": False,
"wall_clock_breakdown": False
} | null |
163,701 | import json
import math
import time
import tqdm
import random
import argparse
import numpy as np
from utils import *
from modeling_spectra.model import *
from dataset import PretrainDataset, DataCollatorForPreTraining, DownstreamDataset, DataCollatorForDownstream
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler
from transformers import RobertaTokenizerFast, AdamW, get_scheduler
from downstream_metrics import downstream_metrics
from sklearn.metrics import accuracy_score
os.environ["NCCL_DEBUG"] = "WARN"
def step(args, loss, model, optimizer, scheduler, grad_acc_bound):
if args.ds_config:
model.backward(loss)
model.step()
else:
loss = loss / args.grad_acc
if args.apex_level > 0:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if args.grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.grad_norm)
else:
loss.backward()
if args.grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.grad_norm)
if grad_acc_bound:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
def configure_training_engine(args, model, config, tokenizer, train_data, valid_data=None, test_data=None):
no_decay = ['bias', 'LayerNorm.weight', 'LayerNorm.bias']
decay = [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)]
no_decay = [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)]
ogp = [{"params": decay, "weight_decay": args.weight_decay}, {"params": no_decay, "weight_decay": 0.0}]
num_train_steps = args.epochs * math.ceil(len(train_data) / args.batch_size / args.grad_acc / dist.get_world_size())
if args.apex_level > 0:
optimizer = FusedAdam(ogp, lr=args.lr, bias_correction=False)
else:
optimizer = AdamW(ogp, lr=args.lr, eps=1e-8)
warmup_steps = int(args.warmup * num_train_steps)
scheduler = get_scheduler("cosine" if warmup_steps == 0 else "linear", optimizer, warmup_steps, num_train_steps)
if args.mode == "pretrain":
c = DataCollatorForPreTraining(tokenizer, config, args.apex_level > 0)
else:
c = DataCollatorForDownstream(args.audio_length, args.task in ["mosi", "mosei"])
if args.ds_config:
import deepspeed
model, optimizer, _, scheduler = deepspeed.initialize(model=model, optimizer=optimizer, config=args.ds_config,
lr_scheduler=scheduler, dist_init_required=True)
else:
model.to(args.device)
if args.apex_level > 0:
model, optimizer = amp.initialize(model, optimizer, opt_level=f"O{args.apex_level}",
keep_batchnorm_fp32=False if args.apex_level >= 2 else None,
loss_scale="dynamic" if args.loss_scale == 0. else args.loss_scale)
model = DDP(model, find_unused_parameters=True, device_ids=[args.local_rank], output_device=[args.local_rank])
train_loader = DataLoader(train_data, batch_size=args.batch_size, collate_fn=c, pin_memory=True, num_workers=20,
sampler=DistributedSampler(train_data, seed=args.seed) if args.local_rank >= 0 else RandomSampler(train_data))
valid_loader = DataLoader(valid_data, batch_size=args.batch_size, collate_fn=c, pin_memory=True, num_workers=20,
sampler=RandomSampler(valid_data)) if valid_data else None
test_loader = DataLoader(test_data, batch_size=args.batch_size, collate_fn=c, pin_memory=True, num_workers=20,
sampler=RandomSampler(test_data)) if test_data else None
return model, optimizer, scheduler, train_loader, valid_loader, test_loader
class PretrainDataset(Dataset):
def __init__(self, datas, num_turns, prefix):
self.datas = datas
self.n = len(datas)
self.prefix = prefix
self.num_turns = num_turns
self.has_positive = [i for i, d in enumerate(datas) if d[-1] >= 0]
def __len__(self):
return len(self.has_positive)
def __getitem__(self, idx):
anchor_idx = self.has_positive[idx] # 0轮
prev_idx = self.datas[anchor_idx][-1] # -1轮
negative_idx_audio = random.randint(0, self.n - 3)
if negative_idx_audio >= anchor_idx:
negative_idx_audio += 2
negative_idx_text = random.randint(0, self.n - 3)
if negative_idx_text >= anchor_idx:
negative_idx_text += 2
history = [] # <-2轮
curr_idx = prev_idx
for i in range(2, self.num_turns):
if self.datas[curr_idx][-1] == -1:
break
curr_idx = self.datas[curr_idx][-1]
history = self.datas[curr_idx][1][1:] + history
af, aw = self.datas[anchor_idx][:2]
at = self.datas[anchor_idx][2:-1]
pf, pw = self.datas[prev_idx][:2]
pt = self.datas[prev_idx][2:-1]
nf = self.datas[negative_idx_audio][0]
nw = self.datas[negative_idx_text][1]
af, pf, nf = map(lambda x: os.path.join(self.prefix, x), [af, pf, nf])
return np.load(pf), pw, pt, np.load(af), aw, at, np.load(nf), nw, [0] + history
def pretrain(args, config, tokenizer):
train_data = PretrainDataset(read_processed_pretrain(args.transcripts), args.num_turns, args.data_path)
if args.model_path:
model = ATForPreTraining.from_pretrained(args.model_path, config=config)
elif args.audio_path and args.text_path:
model = ATForPreTraining(config, args.audio_path, args.text_path)
else:
model = ATForPreTraining(config)
model, optimizer, scheduler, train_loader, _, _ = configure_training_engine(args, model, config, tokenizer, train_data)
if args.grad_ckpt:
model.gradient_checkpointing_enable()
model.train()
outer_it = tqdm.trange(args.epochs)
for i in outer_it:
inner_it = tqdm.tqdm(train_loader, desc="Inner") if args.show_inner_progress and get_rank() else train_loader
le = len(inner_it)
if isinstance(train_loader.sampler, DistributedSampler):
train_loader.sampler.set_epoch(i)
for j, batch in enumerate(inner_it):
batch = tuple(t.to(args.device) for t in batch)
a_input, a_mask, t_input, t_label, t_mask, s_valid, e_valid, token_type, starts, ends = batch
mlm, mam, rs, span = model(a_input, t_input, a_mask, t_mask, t_label, token_type, s_valid, e_valid, starts, ends)
loss = mlm + mam + rs + span
if args.show_inner_progress and get_rank() == 0:
inner_it.set_postfix_str(f"MLM: {mlm:.4f} MAM: {mam:.4f} R-S: {rs:.4f} SPAN: {span:.4f}")
step(args, loss, model, optimizer, scheduler, (j + 1) % args.grad_acc == 0 or j + 1 == le)
if get_rank() == 0 and (i + 1) % args.save_interval == 0 and args.model_save_path:
save_path = os.path.join(args.model_save_path, f"{args.model_name}-{i + 1}")
temp = model
while hasattr(temp, "module"):
temp = temp.module
temp.save_pretrained(save_path) | null |
163,702 | import json
import math
import time
import tqdm
import random
import argparse
import numpy as np
from utils import *
from modeling_spectra.model import *
from dataset import PretrainDataset, DataCollatorForPreTraining, DownstreamDataset, DataCollatorForDownstream
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler
from transformers import RobertaTokenizerFast, AdamW, get_scheduler
from downstream_metrics import downstream_metrics
from sklearn.metrics import accuracy_score
LABEL_NUM = {'mosi': 1, 'mosei': 1, 'mintrec': 20, 'iemocap': 6}
def step(args, loss, model, optimizer, scheduler, grad_acc_bound):
if args.ds_config:
model.backward(loss)
model.step()
else:
loss = loss / args.grad_acc
if args.apex_level > 0:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if args.grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.grad_norm)
else:
loss.backward()
if args.grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.grad_norm)
if grad_acc_bound:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
def evaluate(model, dataloader, args):
model.eval()
epoch_eval_loss = []
pred_y, true_y = [], []
with torch.no_grad():
time.sleep(1)
for batch in dataloader:
batch = {k: (v.to(args.device) if v is not None else None) for k, v in batch.items()}
logits, loss = model(batch["audio"], batch["text"], batch["aam"], batch["tam"],
batch["turn_id"], batch["label"])
if args.label_num == 1:
prediction = logits.view(-1)
label_outputs = prediction.cpu().detach().numpy().astype(float)
else:
prediction = torch.argmax(logits, dim=1)
label_outputs = prediction.cpu().detach().numpy().astype(int)
pred_y.extend(label_outputs.tolist())
true_y.extend(batch["label"].detach().cpu().numpy().tolist())
if loss is not None:
epoch_eval_loss.append(float(loss.detach().cpu()))
return epoch_eval_loss, pred_y, true_y
def configure_training_engine(args, model, config, tokenizer, train_data, valid_data=None, test_data=None):
no_decay = ['bias', 'LayerNorm.weight', 'LayerNorm.bias']
decay = [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)]
no_decay = [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)]
ogp = [{"params": decay, "weight_decay": args.weight_decay}, {"params": no_decay, "weight_decay": 0.0}]
num_train_steps = args.epochs * math.ceil(len(train_data) / args.batch_size / args.grad_acc / dist.get_world_size())
if args.apex_level > 0:
optimizer = FusedAdam(ogp, lr=args.lr, bias_correction=False)
else:
optimizer = AdamW(ogp, lr=args.lr, eps=1e-8)
warmup_steps = int(args.warmup * num_train_steps)
scheduler = get_scheduler("cosine" if warmup_steps == 0 else "linear", optimizer, warmup_steps, num_train_steps)
if args.mode == "pretrain":
c = DataCollatorForPreTraining(tokenizer, config, args.apex_level > 0)
else:
c = DataCollatorForDownstream(args.audio_length, args.task in ["mosi", "mosei"])
if args.ds_config:
import deepspeed
model, optimizer, _, scheduler = deepspeed.initialize(model=model, optimizer=optimizer, config=args.ds_config,
lr_scheduler=scheduler, dist_init_required=True)
else:
model.to(args.device)
if args.apex_level > 0:
model, optimizer = amp.initialize(model, optimizer, opt_level=f"O{args.apex_level}",
keep_batchnorm_fp32=False if args.apex_level >= 2 else None,
loss_scale="dynamic" if args.loss_scale == 0. else args.loss_scale)
model = DDP(model, find_unused_parameters=True, device_ids=[args.local_rank], output_device=[args.local_rank])
train_loader = DataLoader(train_data, batch_size=args.batch_size, collate_fn=c, pin_memory=True, num_workers=20,
sampler=DistributedSampler(train_data, seed=args.seed) if args.local_rank >= 0 else RandomSampler(train_data))
valid_loader = DataLoader(valid_data, batch_size=args.batch_size, collate_fn=c, pin_memory=True, num_workers=20,
sampler=RandomSampler(valid_data)) if valid_data else None
test_loader = DataLoader(test_data, batch_size=args.batch_size, collate_fn=c, pin_memory=True, num_workers=20,
sampler=RandomSampler(test_data)) if test_data else None
return model, optimizer, scheduler, train_loader, valid_loader, test_loader
class DownstreamDataset(Dataset):
def __init__(self, root, task, op, audio_multi_turn=False):
if task == "iemocap":
with open(f"{root}/{task}/{op}.pkl", "rb") as f:
self.data_list = pickle.load(f)
else:
with open(f"{root}/{task}/{op}.pkl", "rb") as f:
self.data_list = pickle.load(f)
if audio_multi_turn:
for i, item in enumerate(self.data_list[1]):
if item[3] >= 0:
word = item[4] + item[1][1:]
turn_id = [0 for _ in item[4]] + [1 for _ in range(len(word) - len(item[4]))]
audio = self.data_list[0][item[3]]
else:
word = item[1]
turn_id = [1 for _ in item[1]]
audio = []
self.data_list[1][i] = [self.data_list[0][item[0]], word, item[2], turn_id, audio]
self.data_list = self.data_list[1]
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
return self.data_list[index]
def downstream_metrics(pred_label, true_label, task):
pred_label, true_label = np.array(pred_label), np.array(true_label)
if task in ['mosi', 'mosei']:
report_metric = eval_mosei_classification(pred_label, true_label)
elif task in ['meld', 'test', 'iemocap', 'snipsmart']:
f1 = f1_score(true_label, pred_label, average='weighted')
acc = accuracy_score(true_label, pred_label)
report_metric = {'accuracy': acc, 'weighted f1': f1}
else:
acc_twenty = accuracy_score(true_label, pred_label)
f1_twenty = f1_score(true_label, pred_label, average='macro')
pred_2class = (pred_label > 10).astype(int)
true_2class = (true_label > 10).astype(int)
acc_binary = accuracy_score(true_2class, pred_2class)
f1_binary = f1_score(true_2class, pred_2class, average='macro')
report_metric = {'acc_20': acc_twenty, 'f1_20': f1_twenty, 'acc_2': acc_binary, 'f1_2': f1_binary}
return report_metric
def finetune(args, config, tokenizer):
model = ATForSequenceClassification.from_pretrained(args.model_path)
train_data = DownstreamDataset(args.data_path, args.task, "train", args.multi_audio)
valid_data = DownstreamDataset(args.data_path, args.task, "valid", args.multi_audio)
test_data = DownstreamDataset(args.data_path, args.task, "test", args.multi_audio)
model, optimizer, scheduler, train_loader, valid_loader, test_loader = configure_training_engine(args, model, config, tokenizer, train_data, valid_data, test_data)
n_gpu = dist.get_world_size()
args.label_num = LABEL_NUM[args.task]
early_stop_metric = [-10.0, 0.0, 0.0, 0.0] if args.task in ["mosi", "mosei"] else [-10.0, 0.0, 0.0]
equal = [False for _ in early_stop_metric]
best_epoch = 0
if args.cl_mode == "step":
args.cl_steps = args.cl_steps * len(train_loader)
for epoch in range(args.epochs):
model.train()
epoch_train_loss = []
time.sleep(1)
if isinstance(train_loader.sampler, DistributedSampler):
train_loader.sampler.set_epoch(epoch)
train_it = train_loader if args.dont_show else tqdm.tqdm(train_loader)
le = len(train_it)
for (count, batch) in enumerate(train_it):
batch = {k: (v.to(args.device) if v is not None else None) for k, v in batch.items()}
_, loss = model(batch["audio"], batch["text"], batch["aam"], batch["tam"], batch["turn_id"], batch["label"])
if n_gpu <= 1:
epoch_train_loss.append(float(loss.detach().cpu()))
if not args.dont_show:
train_it.set_postfix_str(f"loss: {loss:.4f}")
step(args, loss, model, optimizer, scheduler, (count + 1) % args.grad_acc == 0 or count + 1 == le)
if not args.dont_show and n_gpu <= 1:
print(f"Epoch {epoch:03d} average loss {torch.mean(torch.tensor(epoch_train_loss)):.4f}")
epoch_val_loss, pred_y, true_y = evaluate(model, valid_loader, args)
average_valid_loss = torch.mean(torch.tensor(epoch_val_loss))
if args.task in ["mosi", "mosei"]:
m = downstream_metrics(pred_y, true_y, args.task)
val_acc, val_acc_2 = m["acc_a7"], m["acc_a2_non0"]
metrics = [-average_valid_loss, val_acc, val_acc_2, val_acc * 5 - average_valid_loss]
else:
val_acc = accuracy_score(true_y, pred_y)
metrics = [-average_valid_loss, val_acc, val_acc * 5 - average_valid_loss]
for i in range(len(metrics)):
if metrics[i] >= early_stop_metric[i]:
equal[i] = (metrics[i] == early_stop_metric[i])
early_stop_metric[i] = metrics[i]
best_epoch = epoch
else:
equal[i] = False
if get_rank() == 0:
print(f"Epoch {epoch:03d} average valid loss {average_valid_loss:.4f} valid accuracy {val_acc:.4f}")
_, pred_y, true_y = evaluate(model, test_loader, args)
metric = downstream_metrics(pred_y, true_y, args.task)
if get_rank() == 0:
print("Test Metric: {}".format(' - '.join(['{}: {:.4f}'.format(k, v) for k, v in metric.items()])))
if epoch - best_epoch == args.patience or (early_stop_metric[-1] == 0.0 and epoch == 2):
if get_rank() == 0:
print(f"early stopping at {epoch + 1}")
break | null |
163,703 | import random
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from sklearn.metrics import accuracy_score, f1_score
def get_centroids(embeddings):
centroids = embeddings.mean(dim=1)
return centroids
def get_cossim(embeddings, centroids):
num_utterances = embeddings.shape[1]
utterance_centroids = get_utterance_centroids(embeddings)
utterance_centroids_flat = utterance_centroids.view(utterance_centroids.shape[0] * utterance_centroids.shape[1], -1)
embeddings_flat = embeddings.reshape(embeddings.shape[0] * num_utterances, -1)
cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat)
centroids_expand = centroids.repeat((num_utterances * embeddings.shape[0], 1))
embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings.shape[0], 1)
embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] * embeddings_expand.shape[1],
embeddings_expand.shape[-1])
cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand)
cos_diff = cos_diff.view(
embeddings.size(0),
num_utterances,
centroids.size(0))
same_idx = list(range(embeddings.size(0)))
cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0], num_utterances)
cos_diff = cos_diff + 1e-6
return cos_diff
def get_eer(preds, targets, debug=False):
speaker2embeddings = {}
for i in range(len(targets)):
sp = targets[i]
embedding = preds[i]
if sp not in speaker2embeddings:
speaker2embeddings[sp] = []
speaker2embeddings[sp].append(embedding)
for sp in speaker2embeddings:
speaker2embeddings[sp] = np.stack(speaker2embeddings[sp], axis=0)
N = 4
M = 50
avg_EER = 0
for _ in tqdm(range(10)):
batch_avg_EER = 0
for batch_id, _ in enumerate(speaker2embeddings):
speakers = random.sample(speaker2embeddings.keys(), N)
all_utterances = []
for speaker in speakers:
speaker_npy = np.array(speaker2embeddings[speaker])
utter_index = np.random.randint(0, speaker_npy.shape[0], M)
utterance = speaker_npy[utter_index] # [M, hidden_dim]
all_utterances.append(utterance)
all_utterances = np.stack(all_utterances, axis=0) # [N, M, hidden_dim]
all_utterances = torch.from_numpy(all_utterances)
enrollment_embeddings, verification_embeddings = torch.split(all_utterances, int(M / 2), dim=1)
enrollment_centroids = get_centroids(enrollment_embeddings)
sim_matrix = get_cossim(verification_embeddings, enrollment_centroids)
# calculating EER
diff = 1
EER = 0
EER_thresh = 0
EER_FAR = 0
EER_FRR = 0
for thres in [0.01 * i for i in range(101)]:
sim_matrix_thresh = sim_matrix > thres
FAR = (sum([sim_matrix_thresh[i].float().sum() - sim_matrix_thresh[i, :, i].float().sum() for i in
range(int(N))]) / (N - 1.0) / (float(M / 2)) / N)
FRR = (sum([M / 2 - sim_matrix_thresh[i, :, i].float().sum() for i in range(int(N))])
/ (float(M / 2)) / N)
if diff > abs(FAR - FRR):
diff = abs(FAR - FRR)
EER = (FAR + FRR) / 2
EER_thresh = thres
EER_FAR = FAR
EER_FRR = FRR
batch_avg_EER += EER
if debug:
print("\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)" % (EER, EER_thresh, EER_FAR, EER_FRR))
avg_EER += batch_avg_EER / (batch_id + 1)
avg_EER = avg_EER / 10
return avg_EER | null |
163,704 | import math
import random
import warnings
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from typing import Optional, Tuple
from transformers.activations import ACT2FN
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_outputs import BaseModelOutput, CausalLMOutput
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import torch_int_div
from transformers import WavLMConfig
The provided code snippet includes necessary dependencies for implementing the `_compute_mask_indices` function. Write a Python function `def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0) -> np.ndarray` to solve the following problem:
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension.
Here is the function:
def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int,
attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask | Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. |
163,705 | import math
import random
import warnings
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from typing import Optional, Tuple
from transformers.activations import ACT2FN
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_outputs import BaseModelOutput, CausalLMOutput
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import torch_int_div
from transformers import WavLMConfig
def select_interval(audio_length, mode="round"):
mask_consecutive = random.randint(MASK_CONSECUTIVE_MIN, MASK_CONSECUTIVE_MAX) # mask区间长度
valid_start_max = max(audio_length - mask_consecutive - 1, 0) # mask区间的起点
if mode == "round":
# 先计算有多少个MASK然后再决定位置
proportion = round(audio_length * MASK_PROPORTION / mask_consecutive) # 结合mask长度考虑后,得到mask的概率
chosen_starts = torch.randperm(valid_start_max + 1)[:proportion] # 允许多个mask重叠
else:
# 不决定MASK位置
chosen_starts = []
i = 0
while i < audio_length - mask_consecutive:
r = random.random()
if r < MASK_PROPORTION:
chosen_starts.append(i)
i += round(mask_consecutive * MASK_BLOCK)
i += 1
chosen_starts = torch.LongTensor(chosen_starts)
tiled = chosen_starts.expand(mask_consecutive, chosen_starts.size(0)).permute(1, 0)
offset = torch.arange(mask_consecutive).expand_as(tiled)
intervals = tiled + offset
return intervals.view(-1) # 被mask的所有位置
def create_mam_samples(audio, audio_len):
# spec_masked:输入 spec_stacked:target
dtype = audio.dtype
labels = audio.clone()
masked = torch.zeros(labels.shape[:2] + (1,), dtype=torch.uint8).to(audio.device)
for idx in range(labels.shape[0]):
chosen_intervals = select_interval(audio_len[idx], "mlm")
dice = np.random.uniform(0, 1, len(chosen_intervals))
# 以80%的概率替换为0,10%的概率替换为序列中的其他token,10%的概率不做修改。音频的mask会mask一整个token
zero_intervals = torch.BoolTensor(dice < 0.8)
zero_intervals = torch.masked_select(chosen_intervals, zero_intervals)
rand_intervals = torch.BoolTensor((dice >= 0.8) * (dice < 0.9))
rand_intervals = torch.masked_select(chosen_intervals, rand_intervals)
if len(zero_intervals) > 0:
audio[idx, zero_intervals, :] = 0
masked[idx, chosen_intervals, :] = 1
if len(rand_intervals) > 0:
random_intervals = torch.randperm(audio_len[idx])[:len(rand_intervals)]
audio[idx, rand_intervals, :] = labels[idx, random_intervals, :]
return audio.to(dtype=dtype), masked.to(dtype=torch.bool), labels.to(dtype=dtype) | null |
163,706 | import os
import torch
import pickle
import torch.distributed as dist
from transformers import PretrainedConfig, WavLMConfig, RobertaConfig
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank() | null |
163,707 | import os
import torch
import pickle
import torch.distributed as dist
from transformers import PretrainedConfig, WavLMConfig, RobertaConfig
def read_processed_pretrain(combined_path):
if os.path.isdir(combined_path):
datas = None
for r, d, fs in os.walk(combined_path):
if not d:
for f in fs:
with open(os.path.join(r, f), "rb") as fp:
if datas is None:
datas = pickle.load(fp)
else:
datas += pickle.load(fp)
else:
with open(combined_path, "rb") as f:
datas = pickle.load(f)
return datas | null |
163,708 | import glob
import json
import sys
import numpy as np
import re
import os
from tqdm import tqdm
def load_dataset_config(dataset_config):
with open(dataset_config, "r", encoding='utf-8') as f:
raw_config = json.load(f)
return raw_config['class_types'], raw_config['slots'], raw_config['label_maps'] | null |
163,709 | import glob
import json
import sys
import numpy as np
import re
import os
from tqdm import tqdm
def tokenize(text):
if "\u0120" in text:
text = re.sub(" ", "", text)
text = re.sub("\u0120", " ", text)
text = text.strip()
return ' '.join([tok for tok in map(str.strip, re.split("(\W+)", text)) if len(tok) > 0])
def check_slot_inform(value_label, inform_label, label_maps):
value = inform_label
if value_label == inform_label:
value = value_label
elif is_in_list(inform_label, value_label):
value = value_label
elif is_in_list(value_label, inform_label):
value = value_label
elif inform_label in label_maps:
for inform_label_variant in label_maps[inform_label]:
if value_label == inform_label_variant:
value = value_label
break
elif is_in_list(inform_label_variant, value_label):
value = value_label
break
elif is_in_list(value_label, inform_label_variant):
value = value_label
break
elif value_label in label_maps:
for value_label_variant in label_maps[value_label]:
if value_label_variant == inform_label:
value = value_label
break
elif is_in_list(inform_label, value_label_variant):
value = value_label
break
elif is_in_list(value_label_variant, inform_label):
value = value_label
break
return value
def get_joint_slot_correctness(fp, class_types, label_maps,
key_class_label_id='class_label_id',
key_class_prediction='class_prediction',
key_start_pos='start_pos',
key_start_prediction='start_prediction',
key_end_pos='end_pos',
key_end_prediction='end_prediction',
key_refer_id='refer_id',
key_refer_prediction='refer_prediction',
key_slot_groundtruth='slot_groundtruth',
key_slot_prediction='slot_prediction'):
with open(fp) as f:
preds = json.load(f)
class_correctness = [[] for cl in range(len(class_types) + 1)]
confusion_matrix = [[[] for cl_b in range(len(class_types))] for cl_a in range(len(class_types))]
pos_correctness = []
refer_correctness = []
val_correctness = []
total_correctness = []
c_tp = {ct: 0 for ct in range(len(class_types))}
c_tn = {ct: 0 for ct in range(len(class_types))}
c_fp = {ct: 0 for ct in range(len(class_types))}
c_fn = {ct: 0 for ct in range(len(class_types))}
for pred in preds:
guid = pred['guid'] # List: set_type, dialogue_idx, turn_idx
turn_gt_class = pred[key_class_label_id]
turn_pd_class = pred[key_class_prediction]
gt_start_pos = pred[key_start_pos]
pd_start_pos = pred[key_start_prediction]
gt_end_pos = pred[key_end_pos]
pd_end_pos = pred[key_end_prediction]
gt_refer = pred[key_refer_id]
pd_refer = pred[key_refer_prediction]
gt_slot = pred[key_slot_groundtruth]
pd_slot = pred[key_slot_prediction]
gt_slot = tokenize(gt_slot)
pd_slot = tokenize(pd_slot)
# Make sure the true turn labels are contained in the prediction json file!
joint_gt_slot = gt_slot
if guid[-1] == '0': # First turn, reset the slots
joint_pd_slot = 'none'
# If turn_pd_class or a value to be copied is "none", do not update the dialog state.
if turn_pd_class == class_types.index('none'):
pass
elif turn_pd_class == class_types.index('dontcare'):
joint_pd_slot = 'dontcare'
elif turn_pd_class == class_types.index('copy_value'):
joint_pd_slot = pd_slot
elif 'true' in class_types and turn_pd_class == class_types.index('true'):
joint_pd_slot = 'true'
elif 'false' in class_types and turn_pd_class == class_types.index('false'):
joint_pd_slot = 'false'
elif 'refer' in class_types and turn_pd_class == class_types.index('refer'):
if pd_slot[0:3] == "§§ ":
if pd_slot[3:] != 'none':
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif pd_slot[0:2] == "§§":
if pd_slot[2:] != 'none':
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
elif pd_slot != 'none':
joint_pd_slot = pd_slot
elif 'inform' in class_types and turn_pd_class == class_types.index('inform'):
if pd_slot[0:3] == "§§ ":
if pd_slot[3:] != 'none':
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif pd_slot[0:2] == "§§":
if pd_slot[2:] != 'none':
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
else:
print("ERROR: Unexpected slot value format. Aborting.")
exit()
else:
print("ERROR: Unexpected class_type. Aborting.")
exit()
total_correct = True
# Check the per turn correctness of the class_type prediction
if turn_gt_class == turn_pd_class:
class_correctness[turn_gt_class].append(1.0)
class_correctness[-1].append(1.0)
c_tp[turn_gt_class] += 1
# Only where there is a span, we check its per turn correctness
if turn_gt_class == class_types.index('copy_value'):
if gt_start_pos == pd_start_pos and gt_end_pos == pd_end_pos:
pos_correctness.append(1.0)
else:
pos_correctness.append(0.0)
# Only where there is a referral, we check its per turn correctness
if 'refer' in class_types and turn_gt_class == class_types.index('refer'):
if gt_refer == pd_refer:
refer_correctness.append(1.0)
print(" [%s] Correct referral: %s | %s" % (guid, gt_refer, pd_refer))
else:
refer_correctness.append(0.0)
print(" [%s] Incorrect referral: %s | %s" % (guid, gt_refer, pd_refer))
else:
if turn_gt_class == class_types.index('copy_value'):
pos_correctness.append(0.0)
if 'refer' in class_types and turn_gt_class == class_types.index('refer'):
refer_correctness.append(0.0)
class_correctness[turn_gt_class].append(0.0)
class_correctness[-1].append(0.0)
confusion_matrix[turn_gt_class][turn_pd_class].append(1.0)
c_fn[turn_gt_class] += 1
c_fp[turn_pd_class] += 1
for cc in range(len(class_types)):
if cc != turn_gt_class and cc != turn_pd_class:
c_tn[cc] += 1
# Check the joint slot correctness.
# If the value label is not none, then we need to have a value prediction.
# Even if the class_type is 'none', there can still be a value label,
# it might just not be pointable in the current turn. It might however
# be referrable and thus predicted correctly.
if joint_gt_slot == joint_pd_slot:
val_correctness.append(1.0)
elif joint_gt_slot != 'none' and joint_gt_slot != 'dontcare' and joint_gt_slot != 'true' and joint_gt_slot != 'false' and joint_gt_slot in label_maps:
no_match = True
for variant in label_maps[joint_gt_slot]:
if variant == joint_pd_slot:
no_match = False
break
if no_match:
val_correctness.append(0.0)
total_correct = False
# print(" [%s] Incorrect value (variant): %s (turn class: %s) | %s (turn class: %s)" % (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class))
else:
val_correctness.append(1.0)
else:
val_correctness.append(0.0)
total_correct = False
# print(" [%s] Incorrect value: %s (turn class: %s) | %s (turn class: %s)" % (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class))
total_correctness.append(1.0 if total_correct else 0.0)
# Account for empty lists (due to no instances of spans or referrals being seen)
if pos_correctness == []:
pos_correctness.append(1.0)
if refer_correctness == []:
refer_correctness.append(1.0)
for ct in range(len(class_types)):
if c_tp[ct] + c_fp[ct] > 0:
precision = c_tp[ct] / (c_tp[ct] + c_fp[ct])
else:
precision = 1.0
if c_tp[ct] + c_fn[ct] > 0:
recall = c_tp[ct] / (c_tp[ct] + c_fn[ct])
else:
recall = 1.0
if precision + recall > 0:
f1 = 2 * ((precision * recall) / (precision + recall))
else:
f1 = 1.0
if c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct] > 0:
acc = (c_tp[ct] + c_tn[ct]) / (c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct])
else:
acc = 1.0
# print("Performance for class '%s' (%s): Recall: %.2f (%d of %d), Precision: %.2f, F1: %.2f, Accuracy: %.2f (TP/TN/FP/FN: %d/%d/%d/%d)" %
# (class_types[ct], ct, recall, np.sum(class_correctness[ct]), len(class_correctness[ct]), precision, f1, acc, c_tp[ct], c_tn[ct], c_fp[ct], c_fn[ct]))
# print("Confusion matrix:")
# for cl in range(len(class_types)):
# print(" %s" % (cl), end="")
# print("")
# for cl_a in range(len(class_types)):
# print("%s " % (cl_a), end="")
# for cl_b in range(len(class_types)):
# if len(class_correctness[cl_a]) > 0:
# print("%.2f " % (np.sum(confusion_matrix[cl_a][cl_b]) / len(class_correctness[cl_a])), end="")
# else:
# print("---- ", end="")
# print("")
return np.asarray(total_correctness), np.asarray(val_correctness), np.asarray(class_correctness), np.asarray(pos_correctness), np.asarray(refer_correctness), np.asarray(confusion_matrix), c_tp, c_tn, c_fp, c_fn | null |
163,710 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from apex import amp
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from utils import ATConfig, get_rank, EarlyStopping
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
def to_list(tensor):
return tensor.detach().cpu().tolist() | null |
163,711 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from apex import amp
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from utils import ATConfig, get_rank, EarlyStopping
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
def fetch_args():
parser = argparse.ArgumentParser()
# model parameters
parser.add_argument("--model", type=str)
parser.add_argument("--pool", action='store_true')
parser.add_argument("--hidden_size", default=768, type=int)
parser.add_argument("--model_type", default='roberta', type=str)
parser.add_argument("--max_token_length", default=512, type=int)
parser.add_argument("--max_audio_length", default=320000, type=int)
parser.add_argument("--dropout_rate", default=0.1, type=float)
parser.add_argument("--heads_dropout", default=0.0, type=float)
parser.add_argument("--class_loss_ratio", default=0.8, type=float)
parser.add_argument("--no_audio", action='store_true')
# training parameters
parser.add_argument("--resume", action='store_true')
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int)
parser.add_argument("--per_gpu_eval_batch_size", default=24, type=int)
parser.add_argument("--lr", default=2e-5, type=float)
parser.add_argument('--accum', type=int, default=2)
parser.add_argument("--weight_decay", default=0.0, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument("--num_train_epochs", default=12, type=int)
parser.add_argument("--max_steps", default=-1, type=int)
parser.add_argument("--warmup_proportion", default=0.1, type=float)
parser.add_argument("--svd", default=0.0, type=float)
parser.add_argument('--seed', type=int, default=3407)
# path parameters
parser.add_argument('--model_dir', default='./model')
parser.add_argument("--data_dir", default='./data', type=str)
parser.add_argument("--dataset_config", default='./data/spokenwoz.json', type=str)
parser.add_argument("--output_dir", default='./model', type=str)
parser.add_argument("--model_path", default="./saved_models", type=str)
parser.add_argument("--text_path", default="./models/roberta-base", type=str)
parser.add_argument("--audio_path", default="./models/wavlm-base-plus", type=str)
# other parameters
parser.add_argument('--ckpt', type=str)
parser.add_argument("--debug", action='store_true')
parser.add_argument('--no_amp', action='store_true')
parser.add_argument("--evaluate", action='store_true')
parser.add_argument("--no_cuda", action='store_true')
parser.add_argument('--save_steps', type=int, default=200)
parser.add_argument("--evaluate_all", action='store_true')
parser.add_argument("--token_loss_for_nonpointable", action='store_true',
help="Whether the token loss for classes other than copy_value contribute towards total loss.")
parser.add_argument("--refer_loss_for_nonpointable", action='store_true',
help="Whether the refer loss for classes other than refer contribute towards total loss.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--class_aux_feats_inform", action='store_true',
help="Whether or not to use the identity of informed slots as auxiliary featurs for class prediction.")
parser.add_argument("--class_aux_feats_ds", action='store_true',
help="Whether or not to use the identity of slots in the current dialog state as auxiliary featurs for class prediction.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--logging_steps', type=int, default=10,
help="Log every X updates steps.")
parser.add_argument('--save_epochs', type=int, default=0,
help="Save checkpoint every X epochs. Overrides --save_steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--amp_opt_level', type=str, default='O1')
args = parser.parse_args()
return args | null |
163,712 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from apex import amp
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from utils import ATConfig, get_rank, EarlyStopping
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def batch_to_device(batch, device):
batch_on_device = []
for element in batch:
if isinstance(element, dict):
batch_on_device.append({k: v.to(device) for k, v in element.items()})
else:
batch_on_device.append(element.to(device))
return tuple(batch_on_device)
def load_and_cache_examples(args, config, split, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
cached_file = f'{args.data_dir}/{split}_feature_{args.model_type}_nohistory.pkl'
logger.info("Loading features from cached file %s", cached_file)
features = pickle.load(open(cached_file, 'rb'))
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
text_inputs = torch.tensor([f.text_inputs for f in features], dtype=torch.long)
text_masks = torch.tensor([f.text_mask for f in features], dtype=torch.long)
role_token_ids = torch.tensor([f.role_token_ids + [1]*(512-len(f.role_token_ids)) for f in features], dtype=torch.long)
turn_ids = torch.tensor([f.turn_ids for f in features], dtype=torch.long)
audio_inputs = [f.audio_inputs for f in features]
f_start_pos = [f.start_pos for f in features]
f_end_pos = [f.end_pos for f in features]
f_inform_slot_ids = [f.inform_slot for f in features]
f_refer_ids = [f.refer_id for f in features]
f_diag_state = [f.diag_state for f in features]
f_class_label_ids = [f.class_label_id for f in features]
all_example_index = torch.arange(text_inputs.size(0), dtype=torch.long) # (0, 1, ..., b)
# {slot:(b)}
all_start_positions = {}
all_end_positions = {}
all_inform_slot_ids = {}
all_refer_ids = {}
all_diag_state = {}
all_class_label_ids = {}
for s in config.slot_list:
all_start_positions[s] = torch.tensor([f[s] for f in f_start_pos], dtype=torch.long)
all_end_positions[s] = torch.tensor([f[s] for f in f_end_pos], dtype=torch.long)
all_inform_slot_ids[s] = torch.tensor([f[s] for f in f_inform_slot_ids], dtype=torch.long)
all_refer_ids[s] = torch.tensor([f[s] for f in f_refer_ids], dtype=torch.long)
all_diag_state[s] = torch.tensor([f[s] for f in f_diag_state], dtype=torch.long)
all_class_label_ids[s] = torch.tensor([f[s] for f in f_class_label_ids], dtype=torch.long)
dataset = TensorListDataset(text_inputs, text_masks, role_token_ids, turn_ids,
all_start_positions, all_end_positions,
all_inform_slot_ids, all_refer_ids,
all_diag_state, all_class_label_ids, all_example_index)
return dataset, features, audio_inputs
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, config, model, tokenizer, processor, continue_from_global_step=0)` to solve the following problem:
Train the model
Here is the function:
def train(args, config, model, tokenizer, processor, continue_from_global_step=0):
""" Train the model """
# if args.local_rank in [-1, 0]:
# tb_writer = SummaryWriter()
if args.debug:
train_dataset, train_features, train_audio = load_and_cache_examples(args, config, 'debug', tokenizer)
else:
train_dataset, train_features, train_audio = load_and_cache_examples(args, config, 'train', tokenizer)
# log = {'train loss': [], 'valid loss': [], 'valid metric': []}
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
t_total = len(train_dataloader) // args.accum * args.num_train_epochs
num_warmup_steps = int(t_total * args.warmup_proportion)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=t_total)
if not args.no_amp:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
# multi-gpu training (should be after apex amp initialization)
model_single_gpu = model
# Distributed training (should be after apex amp initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.accum * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.accum)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...",
continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch")
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
model.train()
batch_loss = batch_step = 1
for step, batch in enumerate(epoch_iterator):
if global_step < continue_from_global_step:
if (step + 1) % args.accum == 0:
scheduler.step()
global_step += 1
continue
batch = batch_to_device(batch, args.device)
audio = [train_audio[i] for i in batch[-1]]
audio_a = [np.load(args.data_dir+'/'+i[0]) for i in audio]
audio_b = [np.load(args.data_dir+'/'+i[1]) for i in audio]
audio_a = processor(audio_a, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
audio_b = processor(audio_b, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
inputs = {'text_input': batch[0],
'text_mask': batch[1],
'role_token_id': batch[2],
'turn_id':batch[3],
'audio_input': (audio_a['input_values'].to(args.device), audio_b['input_values'].to(args.device)),
'audio_mask':(audio_a['attention_mask'].to(args.device), audio_b['attention_mask'].to(args.device)),
'start_pos': batch[4],
'end_pos': batch[5],
'inform_slot_id': batch[6],
'refer_id': batch[7],
'diag_state': batch[8],
'class_label_id': batch[9]}
# print(batch[-1])
# print(audio_a, audio_b)
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.accum > 1:
loss = loss / args.accum
if not args.no_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
batch_loss += loss.item()
if (step + 1) % args.accum == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
batch_step += 1
# Log metrics
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
print(batch_loss / batch_step)
# tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
# tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
# logging_loss = tr_loss
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir = f'{args.ckpt_path}/{global_step}.pt'
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
# epoch_iterator.set_description("Epoch {:0>3d} - Loss {:.4f} - Step {:}".format(epoch, batch_loss / batch_step, global_step))
train_iterator.set_description("Epoch {:0>3d} - Loss {:.4f} - Step {:}".format(epoch, batch_loss / batch_step, global_step))
return global_step, tr_loss / global_step | Train the model |
163,713 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from apex import amp
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from utils import ATConfig, get_rank, EarlyStopping
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
logger = logging.getLogger(__name__)
def batch_to_device(batch, device):
batch_on_device = []
for element in batch:
if isinstance(element, dict):
batch_on_device.append({k: v.to(device) for k, v in element.items()})
else:
batch_on_device.append(element.to(device))
return tuple(batch_on_device)
def predict_and_format(args, model, tokenizer, features, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits,
per_slot_refer_logits, ids, input_ids_unmasked, values, inform, prefix, ds):
prediction_list = []
dialog_state = ds
for i in range(len(ids)):
if int(ids[i].split("-")[2]) == 0:
dialog_state = {slot: 'none' for slot in model.slot_list}
prediction = {}
prediction_addendum = {}
for slot in model.slot_list:
class_logits = per_slot_class_logits[slot][i]
start_logits = per_slot_start_logits[slot][i]
end_logits = per_slot_end_logits[slot][i]
refer_logits = per_slot_refer_logits[slot][i]
# input_ids = features['text_input'][i].tolist()
class_label_id = int(features['class_label_id'][slot][i])
start_pos = int(features['start_pos'][slot][i])
end_pos = int(features['end_pos'][slot][i])
refer_id = int(features['refer_id'][slot][i])
class_prediction = int(class_logits.argmax())
start_prediction = int(start_logits.argmax())
end_prediction = int(end_logits.argmax())
refer_prediction = int(refer_logits.argmax())
prediction['guid'] = ids[i].split("-")
prediction['class_prediction_%s' % slot] = class_prediction
prediction['class_label_id_%s' % slot] = class_label_id
prediction['start_prediction_%s' % slot] = start_prediction
prediction['start_pos_%s' % slot] = start_pos
prediction['end_prediction_%s' % slot] = end_prediction
prediction['end_pos_%s' % slot] = end_pos
prediction['refer_prediction_%s' % slot] = refer_prediction
prediction['refer_id_%s' % slot] = refer_id
# prediction['input_ids_%s' % slot] = input_ids
if class_prediction == model.class_types.index('dontcare'):
dialog_state[slot] = 'dontcare'
elif class_prediction == model.class_types.index('copy_value'):
pred = tokenizer.convert_ids_to_tokens(input_ids_unmasked[i])[start_prediction:end_prediction + 1]
if args.model_type == 'roberta':
tokens = []
for idx in range(len(pred)):
if pred[idx][0] == 'Ġ':
tokens.append(pred[idx][1:])
else:
if tokens:
tokens[-1] = tokens[-1]+pred[idx]
else:
tokens.append(pred[idx])
else:
tokens = []
for idx in range(len(pred)):
if pred[idx][0] == '#':
if tokens:
tokens[-1] = tokens[-1]+pred[idx][2:]
else:
tokens.append(pred[idx][2:])
else:
tokens.append(pred[idx])
# print(tokens)
# tokens = pred
dialog_state[slot] = ' '.join(tokens)
dialog_state[slot] = re.sub("(^| )##", "", dialog_state[slot])
elif 'true' in model.class_types and class_prediction == model.class_types.index('true'):
dialog_state[slot] = 'true'
elif 'false' in model.class_types and class_prediction == model.class_types.index('false'):
dialog_state[slot] = 'false'
elif class_prediction == model.class_types.index('inform'):
dialog_state[slot] = inform[i][slot]
# Referral case is handled below
prediction_addendum['slot_prediction_%s' % slot] = dialog_state[slot]
prediction_addendum['slot_groundtruth_%s' % slot] = values[i][slot]
# Referral case. All other slot values need to be seen first in order
# to be able to do this correctly.
for slot in model.slot_list:
class_logits = per_slot_class_logits[slot][i]
refer_logits = per_slot_refer_logits[slot][i]
class_prediction = int(class_logits.argmax())
refer_prediction = int(refer_logits.argmax())
if 'refer' in model.class_types and class_prediction == model.class_types.index('refer'):
# Only slots that have been mentioned before can be referred to.
# One can think of a situation where one slot is referred to in the same utterance.
# This phenomenon is however currently not properly covered in the training data
# label generation process.
dialog_state[slot] = dialog_state[model.slot_list[refer_prediction - 1]]
prediction_addendum['slot_prediction_%s' % slot] = dialog_state[slot] # Value update
prediction.update(prediction_addendum)
prediction_list.append(prediction)
return prediction_list, dialog_state
def evaluate(args, dataset, features, audio, processor, model, tokenizer, prefix=""):
args.eval_batch_size = args.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
all_preds = []
ds = {slot: 'none' for slot in model.slot_list}
with torch.no_grad():
diag_state = {slot: torch.tensor([0 for _ in range(args.eval_batch_size)]).to(args.device) for slot in
model.slot_list}
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = batch_to_device(batch, args.device)
# Reset dialog state if turn is first in the dialog.
turn_itrs = [features[i.item()].guid.split('-')[2] for i in batch[-1]]
reset_diag_state = np.where(np.array(turn_itrs) == '0')[0]
for slot in model.slot_list:
for i in reset_diag_state:
diag_state[slot][i] = 0
with torch.no_grad():
all_audio = [audio[i] for i in batch[-1]]
audio_a = [np.load(args.data_dir+'/'+i[0]) for i in all_audio]
audio_b = [np.load(args.data_dir+'/'+i[1]) for i in all_audio]
audio_a = processor(audio_a, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
audio_b = processor(audio_b, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
inputs = {'text_input': batch[0],
'text_mask': batch[1],
'role_token_id': batch[2],
'turn_id':batch[3],
'audio_input': (audio_a['input_values'].to(args.device), audio_b['input_values'].to(args.device)),
'audio_mask':(audio_a['attention_mask'].to(args.device), audio_b['attention_mask'].to(args.device)),
'start_pos': batch[4],
'end_pos': batch[5],
'inform_slot_id': batch[6],
'refer_id': batch[7],
'diag_state': batch[8],
'class_label_id': batch[9]}
unique_ids = [features[i.item()].guid for i in batch[-1]]
values = [features[i.item()].values for i in batch[-1]]
input_ids_unmasked = [features[i.item()].text_inputs for i in batch[-1]]
inform = [features[i.item()].inform for i in batch[-1]]
outputs = model(**inputs)
# Update dialog state for next turn.
for slot in model.slot_list:
updates = outputs[2][slot].max(1)[1]
for i, u in enumerate(updates):
if u != 0:
diag_state[slot][i] = u
# results = eval_metric(model, inputs, outputs[0], outputs[1], outputs[2], outputs[3], outputs[4], outputs[5])
preds, ds = predict_and_format(args, model, tokenizer, inputs, outputs[2], outputs[3], outputs[4], outputs[5],
unique_ids, input_ids_unmasked, values, inform, prefix, ds)
# all_results.append(results)
all_preds.append(preds)
all_preds = [item for sublist in all_preds for item in sublist] # Flatten list
# Generate final results
# final_results = {}
# for k in all_results[0].keys():
# final_results[k] = torch.stack([r[k] for r in all_results]).mean()
# Write final predictions (for evaluation with external tool)
output_prediction_file = f"{args.pred_path}/{prefix}.json"
with open(output_prediction_file, "w") as f:
json.dump(all_preds, f, indent=2)
# return final_results | null |
163,714 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from apex import amp
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from utils import ATConfig, get_rank, EarlyStopping
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
def eval_metric(model, features, total_loss, per_slot_per_example_loss, per_slot_class_logits, per_slot_start_logits,
per_slot_end_logits, per_slot_refer_logits):
metric_dict = {}
per_slot_correctness = {}
for slot in model.slot_list:
per_example_loss = per_slot_per_example_loss[slot]
class_logits = per_slot_class_logits[slot]
start_logits = per_slot_start_logits[slot]
end_logits = per_slot_end_logits[slot]
refer_logits = per_slot_refer_logits[slot]
class_label_id = features['class_label_id'][slot]
start_pos = features['start_pos'][slot]
end_pos = features['end_pos'][slot]
refer_id = features['refer_id'][slot]
_, class_prediction = class_logits.max(1)
class_correctness = torch.eq(class_prediction, class_label_id).float()
class_accuracy = class_correctness.mean()
# "is pointable" means whether class label is "copy_value",
# i.e., that there is a span to be detected.
token_is_pointable = torch.eq(class_label_id, model.class_types.index('copy_value')).float()
_, start_prediction = start_logits.max(1)
start_correctness = torch.eq(start_prediction, start_pos).float()
_, end_prediction = end_logits.max(1)
end_correctness = torch.eq(end_prediction, end_pos).float()
token_correctness = start_correctness * end_correctness
token_accuracy = (token_correctness * token_is_pointable).sum() / token_is_pointable.sum()
# NaNs mean that none of the examples in this batch contain spans. -> division by 0
# The accuracy therefore is 1 by default. -> replace NaNs
if math.isnan(token_accuracy):
token_accuracy = torch.tensor(1.0, device=token_accuracy.device)
token_is_referrable = torch.eq(class_label_id, model.class_types.index('refer') if 'refer' in model.class_types else -1).float()
_, refer_prediction = refer_logits.max(1)
refer_correctness = torch.eq(refer_prediction, refer_id).float()
refer_accuracy = refer_correctness.sum() / token_is_referrable.sum()
# NaNs mean that none of the examples in this batch contain referrals. -> division by 0
# The accuracy therefore is 1 by default. -> replace NaNs
if math.isnan(refer_accuracy) or math.isinf(refer_accuracy):
refer_accuracy = torch.tensor(1.0, device=refer_accuracy.device)
total_correctness = class_correctness * (token_is_pointable * token_correctness + (1 - token_is_pointable))\
* (token_is_referrable * refer_correctness + (1 - token_is_referrable))
total_accuracy = total_correctness.mean()
loss = per_example_loss.mean()
metric_dict['eval_accuracy_class_%s' % slot] = class_accuracy
metric_dict['eval_accuracy_token_%s' % slot] = token_accuracy
metric_dict['eval_accuracy_refer_%s' % slot] = refer_accuracy
metric_dict['eval_accuracy_%s' % slot] = total_accuracy
metric_dict['eval_loss_%s' % slot] = loss
per_slot_correctness[slot] = total_correctness
goal_correctness = torch.stack([c for c in per_slot_correctness.values()], 1).prod(1)
goal_accuracy = goal_correctness.mean()
metric_dict['eval_accuracy_goal'] = goal_accuracy
metric_dict['loss'] = total_loss
return metric_dict | null |
163,715 | import re
import os
import json
import pickle
import librosa
import argparse
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
from utils_dst import (DSTExample, convert_to_unicode)
def is_in_list(tok, value):
found = False
tok_list = [item for item in map(str.strip, re.split("(\W+)", tok)) if len(item) > 0]
value_list = [item for item in map(str.strip, re.split("(\W+)", value)) if len(item) > 0]
tok_len = len(tok_list)
value_len = len(value_list)
for i in range(tok_len + 1 - value_len):
if tok_list[i:i + value_len] == value_list:
found = True
break
return found | null |
163,716 | import re
import os
import json
import pickle
import librosa
import argparse
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
from utils_dst import (DSTExample, convert_to_unicode)
def load_acts(input_file, data_indexs, slot_list):
def normalize_label(slot, value_label):
def get_turn_label(value_label, usr_utt_tok, slot, seen_slots, slot_last_occurrence):
def tokenize(utt):
class DSTExample(object):
def __init__(self, guid, text_a, text_b,
audio_a, audio_b, history,text_a_label, text_b_label,
history_label=None,
values=None,
inform_label=None,
inform_slot_label=None,
refer_label=None,
diag_state=None,
class_label=None):
def __str__(self):
def __repr__(self):
def create_examples(args, input_data, data_indexs, slot_list, label_maps, short=False, save_audio=False):
sys_inform_dict = load_acts(input_data, data_indexs, slot_list)
LABEL_MAPS, examples, samples, avg_len, utts = label_maps, [], 0, 0, 0
audios = os.listdir(args.audio_path)
for dialog_id in tqdm(data_indexs):
entry = input_data[dialog_id]
utterances = entry['log']
cumulative_labels = {slot: 'none' for slot in slot_list}
utt_tok_list = []
utt_audio_list = []
mod_slots_list = []
if save_audio:
audio, _ = librosa.load(f'{args.audio_path}/{dialog_id}/speech.wav', sr=16000)
usr_sys_switch = True
turn_itr = 0
for utt in utterances:
is_sys_utt = utt['metadata'] != {}
if usr_sys_switch == is_sys_utt:
print("WARN: Wrong order of system and user utterances. Skipping rest of dialog %s" % (dialog_id))
break
usr_sys_switch = is_sys_utt
if is_sys_utt:
turn_itr += 1
start = utt['words'][0]['BeginTime'] * 16
speaker = 'sys' if is_sys_utt else 'usr'
cur_aud = audio[start:utt['words'][-1]['EndTime'] * 16]
save = f'audio/{dialog_id}{turn_itr}-{speaker}.npy'
if save_audio:
save_path = f'{args.root}/{save}'
np.save(save_path, cur_aud)
utt_tok_list.append(tokenize(utt['text'])) # normalize utterances
utt_audio_list.append(save)
utts += 1
avg_len += (utt['words'][-1]['EndTime'] * 16 - utt['words'][0]['BeginTime'] * 16) / 16000
modified_slots = {}
# If sys utt, extract metadata (identify and collect modified slots)
if is_sys_utt:
for d in utt['metadata']:
booked = utt['metadata'][d]['book']['booked']
booked_slots = {}
if booked != []:
for s in booked[0]:
booked_slots[s] = normalize_label('%s-%s' % (d, s), booked[0][s]) # normalize labels
# Check the semi and the inform slots
for category in ['book', 'semi']:
for s in utt['metadata'][d][category]:
cs = '%s-book %s' % (d, s) if category == 'book' else '%s-%s' % (d, s)
value_label = normalize_label(cs, utt['metadata'][d][category][s]) # normalize labels
if s in booked_slots:
value_label = booked_slots[s]
if cs in slot_list and cumulative_labels[cs] != value_label:
modified_slots[cs] = value_label
cumulative_labels[cs] = value_label
mod_slots_list.append(modified_slots.copy())
turn_itr = 0
diag_seen_slots_dict = {}
diag_seen_slots_value_dict = {slot: 'none' for slot in slot_list}
diag_state = {slot: 'none' for slot in slot_list} # 积累整段对话的state
sys_utt_tok = []
sys_utt_aud = []
usr_utt_tok = []
usr_utt_aud = []
hst_utt_tok = []
hst_utt_aud = []
hst_utt_tok_label_dict = {slot: [] for slot in slot_list}
for i in range(1, len(utt_tok_list), 2):
sys_utt_tok_label_dict = {}
usr_utt_tok_label_dict = {}
value_dict = {}
inform_dict = {}
inform_slot_dict = {}
referral_dict = {slot: 'none' for slot in slot_list}
class_type_dict = {} # 当前turn更新的state
usr_utt_tok = utt_tok_list[i - 1]
sys_utt_tok = utt_tok_list[i]
turn_slots = mod_slots_list[turn_itr]
usr_utt_aud = utt_audio_list[i - 1]
sys_utt_aud = utt_audio_list[i]
guid = '%s-%s-%s' % ('train', str(dialog_id), str(turn_itr))
new_hst_utt_tok = hst_utt_tok.copy()
new_hst_utt_tok_label_dict = hst_utt_tok_label_dict.copy()
new_hst_utt_tok += usr_utt_tok + sys_utt_tok
new_diag_state = diag_state.copy()
for slot in slot_list:
value_label = 'none'
if slot in turn_slots:
value_label = turn_slots[slot]
value_dict[slot] = value_label
elif label_value_repetitions and slot in diag_seen_slots_dict:
# print('label_value_repetitions')
# print(slot, diag_seen_slots_value_dict[slot], dialog_id)
value_label = diag_seen_slots_value_dict[slot]
# Get dialog act annotations
informed_value = 'none'
inform_slot_dict[slot] = 0
if (str(dialog_id), turn_itr, slot) in sys_inform_dict and slot in turn_slots:
inform_slot_dict[slot] = 1
informed_value = normalize_label(slot, sys_inform_dict[(str(dialog_id), turn_itr, slot)])
(referred_slot, usr_utt_tok_label, class_type) = get_turn_label(value_label, usr_utt_tok, slot,
diag_seen_slots_value_dict,
slot_last_occurrence=True)
inform_dict[slot] = informed_value
sys_utt_tok_label = [0 for _ in sys_utt_tok]
if label_value_repetitions and slot in diag_seen_slots_dict:
if class_type == 'copy_value' and list(diag_seen_slots_value_dict.values()).count(value_label) > 1:
class_type = 'none'
usr_utt_tok_label = [0 for _ in usr_utt_tok_label]
sys_utt_tok_label_dict[slot] = sys_utt_tok_label
usr_utt_tok_label_dict[slot] = usr_utt_tok_label
new_hst_utt_tok_label_dict[slot] = usr_utt_tok_label + sys_utt_tok_label + new_hst_utt_tok_label_dict[slot]
if inform_slot_dict[slot]:
class_type_dict[slot] = 'inform'
class_type = 'inform'
referral_dict[slot] = 'none'
elif class_type == 'unpointable':
class_type_dict[slot] = 'none'
referral_dict[slot] = 'none'
elif slot in diag_seen_slots_dict and class_type == diag_seen_slots_dict[
slot] and class_type != 'copy_value' and class_type != 'inform':
class_type_dict[slot] = 'none'
referral_dict[slot] = 'none'
else:
class_type_dict[slot] = class_type
referral_dict[slot] = referred_slot
if class_type != 'none':
diag_seen_slots_dict[slot] = class_type
diag_seen_slots_value_dict[slot] = value_label
new_diag_state[slot] = class_type
if class_type == 'unpointable':
new_diag_state[slot] = 'copy_value'
txt_a = usr_utt_tok
txt_b = sys_utt_tok
aud_a = usr_utt_aud
aud_b = sys_utt_aud
txt_a_lbl = usr_utt_tok_label_dict
txt_b_lbl = sys_utt_tok_label_dict
examples.append(DSTExample(
guid=guid,
text_a=txt_a,
text_b=txt_b,
audio_a=aud_a,
audio_b=aud_b,
history=hst_utt_tok,
text_a_label=txt_a_lbl,
text_b_label=txt_b_lbl,
history_label=hst_utt_tok_label_dict,
values=diag_seen_slots_value_dict.copy(),
inform_label=inform_dict,
inform_slot_label=inform_slot_dict,
refer_label=referral_dict,
diag_state=diag_state,
class_label=class_type_dict))
hst_utt_tok_label_dict = new_hst_utt_tok_label_dict.copy()
hst_utt_tok = new_hst_utt_tok.copy()
diag_state = new_diag_state.copy()
turn_itr += 1
samples += 1
if short and samples == 100: break
pickle.dump(examples, open(f'{args.output_path}/{split}_example.pkl', 'wb'))
return avg_len / utts | null |
163,717 | import six
import json
import torch
import pickle
import logging
import argparse
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from joblib import Parallel, delayed
from transformers import Wav2Vec2Processor, RobertaTokenizerFast, BertTokenizer
def _truncate_seq_pair(tokens_a, tokens_b, history, max_length):
"""Truncates a sequence pair in place to the maximum length.
Copied from bert/run_classifier.py
"""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b) + len(history)
if total_length <= max_length:
break
if len(history) > 0:
history.pop()
elif len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _truncate_length_and_warn(tokens_a, tokens_b, history, max_seq_length, model_specs, guid):
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP], [SEP] with "- 4" (BERT)
# Account for <s>, </s></s>, </s></s>, </s> with "- 6" (RoBERTa)
if len(tokens_a) + len(tokens_b) + len(history) > max_seq_length - model_specs['TOKEN_CORRECTION']:
# logger.info("Truncate Example %s. Total len=%d." % (guid, len(tokens_a) + len(tokens_b) + len(history)))
input_text_too_long = True
else:
input_text_too_long = False
_truncate_seq_pair(tokens_a, tokens_b, history, max_seq_length - model_specs['TOKEN_CORRECTION'])
return input_text_too_long | null |
163,718 | import six
import json
import torch
import pickle
import logging
import argparse
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from joblib import Parallel, delayed
from transformers import Wav2Vec2Processor, RobertaTokenizerFast, BertTokenizer
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, text_inputs, text_mask, role_token_ids, turn_ids,
audio_inputs, start_pos, end_pos, values=None, inform=None,
inform_slot=None,
refer_id=None,
diag_state=None,
class_label_id=None,
guid="NONE"):
self.guid = guid
self.text_inputs = text_inputs
self.text_mask = text_mask
self.audio_inputs = audio_inputs
self.role_token_ids = role_token_ids
self.turn_ids = turn_ids
self.start_pos = start_pos
self.end_pos = end_pos
self.values = values
self.inform = inform
self.inform_slot = inform_slot
self.refer_id = refer_id
self.diag_state = diag_state
self.class_label_id = class_label_id
def __repr__(self):
s = ''
for k, v in self.__dict__.items():
s += f'{k} : {v} \n'
return s
def get_start_end_pos(class_type, token_label_ids, max_seq_length):
if class_type == 'copy_value' and 1 not in token_label_ids:
print("copy_value label, but token_label not detected. Setting label to 'none'.")
class_type = 'none'
start_pos = 0
end_pos = 0
if 1 in token_label_ids:
start_pos = token_label_ids.index(1)
if 0 not in token_label_ids[start_pos:]:
end_pos = len(token_label_ids[start_pos:]) + start_pos - 1
else:
end_pos = token_label_ids[start_pos:].index(0) + start_pos - 1
for i in range(start_pos, end_pos+1):
assert token_label_ids[i] == 1
return class_type, start_pos, end_pos
def _tokenize_text_and_label(text, text_label_dict, slot, tokenizer, model_specs, slot_value_dropout):
text_label = text_label_dict[slot]
tokens = []
token_labels = []
for token, token_label in zip(text, text_label):
token = convert_to_unicode(token)
if model_specs['MODEL_TYPE'] == 'roberta':
token = ' ' + token
sub_tokens = tokenizer.tokenize(token) # Most time intensive step
tokens.extend(sub_tokens)
token_labels.extend([token_label for _ in sub_tokens])
assert len(tokens) == len(token_labels)
return tokens, token_labels
def _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs):
token_label_ids = []
token_label_ids.append(0) # [CLS]/<s>
for token_label in token_labels_a:
token_label_ids.append(token_label)
token_label_ids.append(0) # [SEP]/</s></s>
if model_specs['MODEL_TYPE'] == 'roberta':
token_label_ids.append(0)
for token_label in token_labels_b:
token_label_ids.append(token_label)
token_label_ids.append(0) # [SEP]/</s></s>
# if model_specs['MODEL_TYPE'] == 'roberta':
# token_label_ids.append(0)
# for token_label in token_labels_history:
# token_label_ids.append(token_label)
# token_label_ids.append(0) # [SEP]/</s>
while len(token_label_ids) < max_seq_length:
token_label_ids.append(0) # padding
assert len(token_label_ids) == max_seq_length
return token_label_ids
def get_transformer_input(args, tokens_a, tokens_b, history, max_seq_length, tokenizer, model_specs):
# print(history)
if model_specs['MODEL_TYPE'] == 'roberta':
tokens_a = [0] + tokenizer.convert_tokens_to_ids(tokens_a) + [2,2]
tokens_b = tokenizer.convert_tokens_to_ids(tokens_b)+[2, 2]
elif model_specs['MODEL_TYPE'] == 'bert':
tokens_a = [101] + tokenizer.convert_tokens_to_ids(tokens_a) + [102]
tokens_b = tokenizer.convert_tokens_to_ids(tokens_b) + [102]
if not args.his:
tokens = tokens_a + tokens_b
turn_ids = [0] * len(tokens_a + tokens_b)
else:
history = tokenizer.convert_tokens_to_ids(history)
tokens = tokens_a + tokens_b + history
turn_ids = [0] * len(tokens_a + tokens_b) + [1] * len(history)
tokens, turn_ids = tokens[:511]+ [model_specs['SEP_TOKEN']], turn_ids[:511]+[1]
# print(tokens, len(tokens), len(turn_ids))
role_token_ids = [0] * len(tokens_a) + [1] * len(tokens_b)
input_mask = [1] * len(tokens)
gaplen = max_seq_length - len(tokens)
tokens += [model_specs['PAD_TOKEN']] * gaplen
input_mask += [0] * gaplen
turn_ids += [1] * gaplen
# print(len(tokens), len(turn_ids))
assert len(tokens) == len(input_mask) == len(turn_ids) == max_seq_length
# print(len(history['tokens']), len(history['role_ids']))
# assert len(history['tokens']) == len(history['role_ids'])
return tokens, input_mask, role_token_ids, turn_ids
def convert_examples_to_feature(args, example, slot_list, class_types, model_type, tokenizer, max_seq_length, slot_value_dropout=0.0):
if model_type == 'roberta':
model_specs = {'MODEL_TYPE': 'roberta',
'CLS_TOKEN': '<s>',
'UNK_TOKEN': '<unk>',
'SEP_TOKEN': 2,
'PAD_TOKEN': 1,
'TOKEN_CORRECTION': 6}
elif model_type == 'bert':
model_specs = {'MODEL_TYPE': 'bert',
'CLS_TOKEN': '[CLS]',
'UNK_TOKEN': '[UNK]',
'SEP_TOKEN': 102,
'PAD_TOKEN': 0,
'TOKEN_CORRECTION': 4
}
refer_list = ['none'] + slot_list
# Convert single example
value_dict = {}
inform_dict = {}
inform_slot_dict = {}
refer_id_dict = {}
diag_state_dict = {}
class_label_id_dict = {}
start_pos_dict = {}
end_pos_dict = {}
for slot in slot_list:
tokens_a, token_labels_a = _tokenize_text_and_label(
example.text_a, example.text_a_label, slot, tokenizer, model_specs, slot_value_dropout)
tokens_b, token_labels_b = _tokenize_text_and_label(
example.text_b, example.text_b_label, slot, tokenizer, model_specs, slot_value_dropout)
if not args.his:
tokens_history, token_labels_history = [], []
else:
tokens_history, token_labels_history = _tokenize_text_and_label(
example.history, example.history_label, slot, tokenizer, model_specs, slot_value_dropout)
# input_text_too_long = _truncate_length_and_warn(
# tokens_a, tokens_b, tokens_history, max_seq_length, model_specs, example.guid)
# if input_text_too_long:
# token_labels_a = token_labels_a[:len(tokens_a)]
# token_labels_b = token_labels_b[:len(tokens_b)]
# token_labels_history = token_labels_history[:len(tokens_history)]
assert len(token_labels_a) == len(tokens_a)
assert len(token_labels_b) == len(tokens_b)
assert len(token_labels_history) == len(tokens_history)
token_label_ids = _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs)
value_dict[slot] = example.values[slot]
inform_dict[slot] = example.inform_label[slot]
class_label_mod, start_pos_dict[slot], end_pos_dict[slot] = get_start_end_pos(
example.class_label[slot], token_label_ids, max_seq_length)
if class_label_mod != example.class_label[slot]:
example.class_label[slot] = class_label_mod
inform_slot_dict[slot] = example.inform_slot_label[slot]
refer_id_dict[slot] = refer_list.index(example.refer_label[slot]) if slot in example.refer_label else 0
diag_state_dict[slot] = class_types.index(example.diag_state[slot])
class_label_id_dict[slot] = class_types.index(example.class_label[slot])
tokens, input_mask, role_token_ids, turn_ids = get_transformer_input(args, tokens_a, tokens_b,
tokens_history, max_seq_length,
tokenizer, model_specs)
# audio_inputs, audio_mask, audio_sep, role_audio_ids audio_a, audio_b, max_audio_length,
# input_ids_unmasked = tokens
feature = InputFeatures(guid=example.guid, text_inputs=tokens, text_mask=input_mask, role_token_ids=role_token_ids,
turn_ids=turn_ids, audio_inputs=(example.audio_a, example.audio_b), start_pos=start_pos_dict, end_pos=end_pos_dict,
values=value_dict, inform=inform_dict, inform_slot=inform_slot_dict, refer_id=refer_id_dict,
diag_state=diag_state_dict, class_label_id=class_label_id_dict
)
# print(features[-1].audio_inputs[0].shape)
# if example_index == 3:break
# break
return feature | null |
163,719 | import math
import random
import warnings
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from typing import Optional, Tuple
from transformers.activations import ACT2FN
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_outputs import BaseModelOutput, CausalLMOutput
from transformers.modeling_utils import PreTrainedModel
from transformers import WavLMConfig
def select_interval(audio_length, mode="round"):
mask_consecutive = random.randint(MASK_CONSECUTIVE_MIN, MASK_CONSECUTIVE_MAX) # mask区间长度
valid_start_max = max(audio_length - mask_consecutive - 1, 0) # mask区间的起点
if mode == "round":
# 先计算有多少个MASK然后再决定位置
proportion = round(audio_length * MASK_PROPORTION / mask_consecutive) # 结合mask长度考虑后,得到mask的概率
chosen_starts = torch.randperm(valid_start_max + 1)[:proportion] # 允许多个mask重叠
else:
# 不决定MASK位置
chosen_starts = []
i = 0
while i < audio_length - mask_consecutive:
r = random.random()
if r < MASK_PROPORTION:
chosen_starts.append(i)
i += round(mask_consecutive * MASK_BLOCK)
i += 1
chosen_starts = torch.LongTensor(chosen_starts)
tiled = chosen_starts.expand(mask_consecutive, chosen_starts.size(0)).permute(1, 0)
offset = torch.arange(mask_consecutive).expand_as(tiled)
intervals = tiled + offset
return intervals.view(-1) # 被mask的所有位置
def create_mam_samples(audio, audio_len):
# spec_masked:输入 spec_stacked:target
dtype = audio.dtype
labels = audio.clone()
masked = torch.zeros(labels.shape[:2] + (1,), dtype=torch.uint8).to(audio.device)
for idx in range(labels.shape[0]):
chosen_intervals = select_interval(audio_len[idx], "mlm")
dice = np.random.uniform(0, 1, len(chosen_intervals))
# 以80%的概率替换为0,10%的概率替换为序列中的其他token,10%的概率不做修改。音频的mask会mask一整个token
zero_intervals = torch.BoolTensor(dice < 0.8)
zero_intervals = torch.masked_select(chosen_intervals, zero_intervals)
rand_intervals = torch.BoolTensor((dice >= 0.8) * (dice < 0.9))
rand_intervals = torch.masked_select(chosen_intervals, rand_intervals)
if len(zero_intervals) > 0:
audio[idx, zero_intervals, :] = 0
masked[idx, chosen_intervals, :] = 1
if len(rand_intervals) > 0:
random_intervals = torch.randperm(audio_len[idx])[:len(rand_intervals)]
audio[idx, rand_intervals, :] = labels[idx, random_intervals, :]
return audio.to(dtype=dtype), masked.to(dtype=torch.bool), labels.to(dtype=dtype) | null |
163,720 | import os
import sys
import json
import pickle
import librosa
import argparse
import numpy as np
from transformers import RobertaTokenizerFast as RTF
max_len = args.max_speech_slice_length * 10 - 1
def cut_by_limit(words):
cut = []
for j, word in enumerate(words):
st = round(float(word['startTime'][:-1]) * 10)
et = round(float(word['endTime'][:-1]) * 10)
# cut long words to avoid BGM in it
if et - st > 30:
if word['word'][0].isupper():
st = et - 30
else:
et = st + 30
if cut and et - cut[0][0][0] > max_len:
yield cut
cut = []
if cut:
cut[0].append([st, et + 1])
cut[1].append(word['word'].lower())
else:
cut = [[[st, et + 1]], [word['word'].lower()]]
if cut:
yield cut | null |
163,721 | import os
import sys
import json
import pickle
import librosa
import argparse
import numpy as np
from transformers import RobertaTokenizerFast as RTF
def get_path(f):
return "/".join(f.split('/')[:-1]) | null |
163,722 | import os
import sys
import json
import pickle
import librosa
import argparse
import numpy as np
from transformers import RobertaTokenizerFast as RTF
to = RTF.from_pretrained("roberta-base")
audio_path = args.speech_dir
target = args.save_processed_speech_dir
SAMPLE_RATE = args.sample_rate // 10
datas = []
audio_id = 0
length = 0
def collect(c, wf, audio=None):
global length, audio_id, target, audio_path
audio_start, audio_end = c[0][0][0], c[0][-1][1]
l = audio_end - audio_start
wfn = os.path.join(wf, f"{audio_id}.npy")
data = [wfn, []]
for (i, word) in enumerate(c[1]):
st, et = c[0][i]
tids = to.encode(word)
CLS, SEP = tids[0], tids[-1]
# [word, first token index, last token index, start speech frame index, end speech frame index]
data.append([word, len(data[1]) + 1, len(data[1]) + len(tids) - 1,
(st - audio_start) * SAMPLE_RATE, (et - audio_start) * SAMPLE_RATE])
data[1] += tids[1:-1]
data[1] = [CLS] + data[1] + [SEP]
data.extend([audio_start, audio_end])
if audio is not None:
audio_piece = audio[c[0][0][0] * SAMPLE_RATE: c[0][-1][1] * SAMPLE_RATE]
np.save(wfn, audio_piece)
datas.append(data)
length += l
audio_id += 1 | null |
163,723 | import json
import os
import torch
import pickle
import random
import numpy as np
from torch.utils.data import Dataset
def pad(sequence, length, pad_token=0):
seq_len = sequence.shape[0]
if length > seq_len:
padding = torch.ones(length - seq_len, dtype=sequence.dtype) * pad_token
att = torch.cat([torch.ones_like(sequence), padding])
sequence = torch.cat([sequence, padding])
else:
if sequence.dtype == torch.long:
sequence = torch.cat([sequence[:1], sequence[1 - length:]])
else:
sequence = sequence[:length]
att = torch.ones_like(sequence)
return sequence, att | null |
163,724 | import json
import os
import torch
import pickle
import random
import numpy as np
from torch.utils.data import Dataset
def compute_valid(transcript, offset, length):
sv = [0 for _ in range(length)]
ev = [0 for _ in range(length)]
start_labels, end_labels = [], []
for i, item in enumerate(transcript):
sv[offset + item[-4]] = 1
ev[offset + item[-3] - 1] = 1
start_labels.append(float(f"{item[-2] / 160000:.3f}"))
end_labels.append(float(f"{item[-1] / 160000:.3f}"))
return torch.BoolTensor(sv), torch.BoolTensor(ev), start_labels, end_labels | null |
163,728 | import json
import sqlite3
from nltk import word_tokenize
def tokenize(string):
def get_tables_with_alias(schema, toks):
def parse_sql(toks, start_idx, tables_with_alias, schema):
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias, toks = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.