id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
164,477 | import json
all_domain = [
"[taxi]","[police]","[hospital]","[hotel]","[attraction]","[train]","[restaurant]",'[profile]'
]
informable_slots = {'restaurant': ['people','day','time','name', 'adress', 'pricerange', 'food', 'post', 'bookpeople', 'phone', 'bookday', 'area', 'booktime'],
# 'profile': ['idnumber', 'name', 'email', 'platenumber', 'phonenumber'],
'profile': ['idnumber', 'namestr', 'email', 'platenumber', 'phonenumber'],
'hotel': ['people','stay','day','name', 'adress', 'pricerange', 'post', 'stars', 'parking', 'bookpeople', 'internet', 'phone', 'bookstay', 'bookday', 'area', 'type'],
'taxi': ['car', 'destination', 'arriveby', 'leaveat', 'phone', 'departure'],
'train': ['people','day','destination', 'arriveby', 'duration', 'leaveat', 'ticket', 'id', 'bookpeople', 'bookday', 'departure'],
# 'booking': {'name', 'bookpeople', 'bookday', 'bookstay', 'booktime'},
'attraction': ['name', 'adress', 'pricerange', 'post', 'fee', 'phone', 'area', 'type', 'open'],
'police': ['adress', 'phone'],
'hospital': ['adress', 'phone', 'department']}
all_slots = all_reqslot + all_infslot
all_slots = set(all_slots)
The provided code snippet includes necessary dependencies for implementing the `paser_bs` function. Write a Python function `def paser_bs(sent)` to solve the following problem:
Convert compacted bs span to triple list Ex:
Here is the function:
def paser_bs(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
if domain == '[profile]':
sub_span_temp = []
# print('hello')
for token in sub_span:
flag_append = 0
for profile_slot in informable_slots['profile']:
if profile_slot != token and profile_slot in token:
# print('1',token)
sub_span_temp.append(profile_slot)
sub_span_temp.append(token[len(profile_slot):])
flag_append = 1
else:
pass
if flag_append == 0:
sub_span_temp.append(token)
else:
pass
sub_span = sub_span_temp
else:
pass
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
# print('sent',sent)
# print('domain',domain)
# print('sub_span',sub_span)
# print('sub_s_idx',sub_s_idx)
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
belief_state.append(bs)
return list(set(belief_state)) | Convert compacted bs span to triple list Ex: |
164,478 | import json
def ignore_none(pred_belief, target_belief):
for pred in pred_belief:
if 'catherine s' in pred:
pred.replace('catherine s', 'catherines')
clean_target_belief = []
clean_pred_belief = []
for bs in target_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_target_belief.append(bs)
for bs in pred_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_pred_belief.append(bs)
dontcare_slots = []
for bs in target_belief:
if 'dontcare' in bs:
domain = bs.split()[0]
slot = bs.split()[1]
dontcare_slots.append('{}_{}'.format(domain, slot))
target_belief = clean_target_belief
pred_belief = clean_pred_belief
return pred_belief, target_belief | null |
164,479 | import json
GENERAL_TYPO = {
# type
"guesthouse":"guest house", "guesthouses":"guest house", "guest":"guest house", "mutiple sports":"multiple sports",
"sports":"multiple sports", "mutliple sports":"multiple sports","swimmingpool":"swimming pool", "concerthall":"concert hall",
"concert":"concert hall", "pool":"swimming pool", "night club":"nightclub", "mus":"museum", "ol":"architecture",
"colleges":"college", "coll":"college", "architectural":"architecture", "musuem":"museum", "churches":"church",
# area
"center":"centre", "center of town":"centre", "near city center":"centre", "in the north":"north", "cen":"centre", "east side":"east",
"east area":"east", "west part of town":"west", "ce":"centre", "town center":"centre", "centre of cambridge":"centre",
"city center":"centre", "the south":"south", "scentre":"centre", "town centre":"centre", "in town":"centre", "north part of town":"north",
"centre of town":"centre", "cb30aq": "none",
# price
"mode":"moderate", "moderate -ly": "moderate", "mo":"moderate",
# day
"next friday":"friday", "monda": "monday",
# parking
"free parking":"free",
# internet
"free internet":"yes",
# star
"4 star":"4", "4 stars":"4", "0 star rarting":"none",
# others
"y":"yes", "any":"dontcare", "n":"no", "does not care":"dontcare", "not men":"none", "not":"none", "not mentioned":"none",
'':"none", "not mendtioned":"none", "3 .":"3", "does not":"no", "fun":"none", "art":"none",
}
def fix_mismatch_jason(slot, value):
def default_cleaning(pred_belief, target_belief):
pred_belief_jason = []
target_belief_jason = []
for pred in pred_belief:
if pred in ['', ' ']:
continue
domain = pred.split()[0]
if 'book' in pred:
slot = ' '.join(pred.split()[1:3])
val = ' '.join(pred.split()[3:])
else:
slot = pred.split()[1]
val = ' '.join(pred.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
pred_belief_jason.append('{} {} {}'.format(domain, slot, val))
for tgt in target_belief:
domain = tgt.split()[0]
if 'book' in tgt:
slot = ' '.join(tgt.split()[1:3])
val = ' '.join(tgt.split()[3:])
else:
slot = tgt.split()[1]
val = ' '.join(tgt.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
target_belief_jason.append('{} {} {}'.format(domain, slot, val))
turn_pred = pred_belief_jason
turn_target = target_belief_jason
return turn_pred, turn_target | null |
164,480 | import json, os, re, copy, zipfile
import spacy
import space.utils.ontology as ontology
import space.utils.utils as utils
from collections import OrderedDict
from tqdm import tqdm
from config import global_config as cfg
from db_ops import MultiWozDB
from clean_dataset import clean_slot_values, clean_text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value
def get_db_values(value_set_path): # value_set.json, all the domain[slot] values in datasets
processed = {}
bspn_word = []
nlp = spacy.load('en_core_web_sm')
with open(value_set_path, 'r') as f: # read value set file in lower
value_set = json.loads(f.read().lower())
with open('db/ontology.json', 'r') as f: # read ontology in lower, all the domain-slot values
otlg = json.loads(f.read().lower())
for domain, slots in value_set.items(): # add all informable slots to bspn_word, create lists holder for values
processed[domain] = {}
bspn_word.append('['+domain+']')
for slot, values in slots.items():
if domain == 'profile':
if slot == 'name':
slot = 'namestr'
else:
pass
s_p = ontology.normlize_slot_names.get(slot, slot)
if s_p in ontology.informable_slots[domain]:
bspn_word.append(s_p)
processed[domain][s_p] = []
for domain, slots in value_set.items(): # add all words of values of informable slots to bspn_word
for slot, values in slots.items():
if domain == 'profile':
if slot == 'name':
slot = 'namestr'
else:
pass
s_p = ontology.normlize_slot_names.get(slot, slot)
# print(s_p)
if s_p in ontology.informable_slots[domain]:
for v in values:
_, v_p = clean_slot_values(domain, slot, v)
v_p = ' '.join([token.text for token in nlp(v_p)]).strip()
processed[domain][s_p].append(v_p)
for x in v_p.split():
if x not in bspn_word:
bspn_word.append(x)
for domain_slot, values in otlg.items(): # split domain-slots to domains and slots
domain, slot = domain_slot.split('-')
if domain == 'profile':
if slot == 'name':
slot = 'namestr'
continue
else:
continue
if domain == 'bus':
domain = 'taxi'
if slot == 'price range':
slot = 'pricerange'
if slot == 'book stay':
slot = 'stay'
if slot == 'book day':
slot = 'day'
if slot == 'book people':
slot = 'people'
if slot == 'book time':
slot = 'time'
if slot == 'arrive by':
slot = 'arrive'
if slot == 'leave at':
slot = 'leave'
if slot == 'leaveat':
slot = 'leave'
if slot not in processed[domain]: # add all slots and words of values if not already in processed and bspn_word
processed[domain][slot] = []
bspn_word.append(slot)
for v in values:
_, v_p = clean_slot_values(domain, slot, v)
v_p = ' '.join([token.text for token in nlp(v_p)]).strip()
if v_p not in processed[domain][slot]:
processed[domain][slot].append(v_p)
for x in v_p.split():
if x not in bspn_word:
bspn_word.append(x)
with open(value_set_path.replace('.json', '_processed.json'), 'w') as f:
json.dump(processed, f, indent=2) # save processed.json
with open('space/data/multiwoz2.0/bspn_word_collection.json', 'w') as f:
json.dump(bspn_word, f, indent=2) # save bspn_word
print('DB value set processed! ') | null |
164,481 | import json, os, re, copy, zipfile
import spacy
import space.utils.ontology as ontology
import space.utils.utils as utils
from collections import OrderedDict
from tqdm import tqdm
from config import global_config as cfg
from db_ops import MultiWozDB
from clean_dataset import clean_slot_values, clean_text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value
def preprocess_db(db_paths): # apply clean_slot_values to all dbs
dbs = {}
nlp = spacy.load('en_core_web_sm')
for domain in ontology.all_domains:
if domain != 'profile': #修改db
with open(db_paths[domain], 'r') as f: # for every db_domain, read json file
dbs[domain] = json.loads(f.read().lower())
for idx, entry in enumerate(dbs[domain]): # entry has information about slots of said domain
new_entry = copy.deepcopy(entry)
for key, value in entry.items(): # key = slot
if type(value) is not str:
continue
del new_entry[key]
key, value = clean_slot_values(domain, key, value)
tokenize_and_back = ' '.join([token.text for token in nlp(value)]).strip()
new_entry[key] = tokenize_and_back
dbs[domain][idx] = new_entry
with open(db_paths[domain].replace('.json', '_processed.json'), 'w') as f:
json.dump(dbs[domain], f, indent=2)
# print('[%s] DB processed! '%domain) | null |
164,482 | import re
import space.utils.ontology as ontology
def my_clean_text(text):
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
return text | null |
164,483 | import re
import space.utils.ontology as ontology
def clean_text(text):
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'profile':
if slot == 'name':
slot = 'namestr'
else:
pass
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value | null |
164,484 | import os, json, copy, re, zipfile
from collections import OrderedDict
from space.utils.ontology import all_domains
data_path = './space/data/multiwoz2.0/'
save_path = './space/data/multiwoz2.0/'
save_path_exp = './space/data/multiwoz2.0/'
data_file = 'data.json'
domains = all_domains
def analysis():
compressed_raw_data = {}
goal_of_dials = {}
req_slots = {}
info_slots = {}
dom_count = {}
dom_fnlist = {}
all_domain_specific_slots = set()
for domain in domains:
req_slots[domain] = []
info_slots[domain] = []
archive = zipfile.ZipFile(data_path+data_file+'.zip', 'r')
data = archive.open(data_file, 'r').read().decode('utf-8').lower()
ref_nos = list(set(re.findall(r'\"reference\"\: \"(\w+)\"', data)))
data = json.loads(data)
for fn, dial in data.items():
goals = dial['goal']
if 'log' in dial.keys():
pass
else:
continue
logs = dial['log']
# get compressed_raw_data and goal_of_dials
compressed_raw_data[fn] = {'goal': {}, 'log': []}
goal_of_dials[fn] = {}
for dom, goal in goals.items(): # get goal of domains that are in demmand
# print(dom)
if dom != 'topic' and dom != 'message' and goal:
compressed_raw_data[fn]['goal'][dom] = goal
goal_of_dials[fn][dom] = goal
for turn in logs:
if not turn['metadata']: # user's turn
compressed_raw_data[fn]['log'].append({'text': turn['text']})
else: # system's turn
meta = turn['metadata']
turn_dict = {'text': turn['text'], 'metadata': {}}
for dom, book_semi in meta.items(): # for every domain, sys updates "book" and "semi"
book, semi = book_semi['book'], book_semi['semi']
record = False
for slot, value in book.items(): # record indicates non-empty-book domain
if value not in ['', []]:
record = True
if record:
turn_dict['metadata'][dom] = {}
turn_dict['metadata'][dom]['book'] = book # add that domain's book
record = False
for slot, value in semi.items(): # here record indicates non-empty-semi domain
if value not in ['', []]:
record = True
break
if record:
for s, v in copy.deepcopy(semi).items():
if v == 'not mentioned':
del semi[s]
if not turn_dict['metadata'].get(dom):
turn_dict['metadata'][dom] = {}
turn_dict['metadata'][dom]['semi'] = semi # add that domain's semi
compressed_raw_data[fn]['log'].append(turn_dict) # add to log the compressed turn_dict
# get domain statistics
dial_type = 'multi' if 'mul' in fn or 'MUL' in fn else 'single' # determine the dialog's type: sinle or multi
if fn in ['pmul2756.json', 'pmul4958.json', 'pmul3599.json']:
dial_type = 'single'
dial_domains = [dom for dom in domains if goals[dom]] # domains that are in demmand
dom_str = ''
for dom in dial_domains:
if not dom_count.get(dom+'_'+dial_type): # count each domain type, with single or multi considered
dom_count[dom+'_'+dial_type] = 1
else:
dom_count[dom+'_'+dial_type] += 1
if not dom_fnlist.get(dom+'_'+dial_type): # keep track the file number of each domain type
dom_fnlist[dom+'_'+dial_type] = [fn]
else:
dom_fnlist[dom+'_'+dial_type].append(fn)
dom_str += '%s_'%dom
dom_str = dom_str[:-1] # substract the last char in dom_str
if dial_type=='multi': # count multi-domains
if not dom_count.get(dom_str):
dom_count[dom_str] = 1
else:
dom_count[dom_str] += 1
if not dom_fnlist.get(dom_str):
dom_fnlist[dom_str] = [fn]
else:
dom_fnlist[dom_str].append(fn)
######
# get informable and requestable slots statistics
# print(domains)
for domain in domains:
info_ss = goals[domain].get('info', {})
book_ss = goals[domain].get('book', {})
req_ss = goals[domain].get('reqt', {})
# profile_ss = goal
for info_s in info_ss:
all_domain_specific_slots.add(domain+'-'+info_s)
if info_s not in info_slots[domain]:
info_slots[domain]+= [info_s]
for book_s in book_ss:
if 'book_' + book_s not in info_slots[domain] and book_s not in ['invalid', 'pre_invalid']:
all_domain_specific_slots.add(domain+'-'+book_s)
info_slots[domain]+= ['book_' + book_s]
for req_s in req_ss:
if req_s not in req_slots[domain]:
req_slots[domain]+= [req_s]
# result statistics
if not os.path.exists(save_path):
os.mkdir(save_path)
if not os.path.exists(save_path_exp):
os.mkdir(save_path_exp)
with open(save_path+'req_slots.json', 'w') as sf:
json.dump(req_slots,sf,indent=2)
with open(save_path+'info_slots.json', 'w') as sf:
json.dump(info_slots,sf,indent=2)
with open(save_path+'all_domain_specific_info_slots.json', 'w') as sf:
json.dump(list(all_domain_specific_slots),sf,indent=2)
print("slot num:", len(list(all_domain_specific_slots)))
with open(save_path+'goal_of_each_dials.json', 'w') as sf:
json.dump(goal_of_dials, sf, indent=2)
with open(save_path+'compressed_data.json', 'w') as sf:
json.dump(compressed_raw_data, sf, indent=2)
with open(save_path + 'domain_count.json', 'w') as sf:
single_count = [d for d in dom_count.items() if 'single' in d[0]]
multi_count = [d for d in dom_count.items() if 'multi' in d[0]]
other_count = [d for d in dom_count.items() if 'multi' not in d[0] and 'single' not in d[0]]
dom_count_od = OrderedDict(single_count+multi_count+other_count)
json.dump(dom_count_od, sf, indent=2)
with open(save_path_exp + 'reference_no.json', 'w') as sf:
json.dump(ref_nos,sf,indent=2)
with open(save_path_exp + 'domain_files.json', 'w') as sf:
json.dump(dom_fnlist, sf, indent=2) | null |
164,501 | import multiprocessing
import random
from itertools import chain
import os
import glob
import json
import numpy as np
import time
import re
from tqdm import tqdm
from space.args import str2bool
from space.data.tokenizer import Tokenizer
from space.utils import ontology
from space.utils.scores import tree_edit_score
def max_lens(X):
def list2np(X, padding=0, dtype="int64"):
shape = max_lens(X)
ret = np.full(shape, padding, dtype=np.int32)
if len(shape) == 1:
ret = np.array(X)
elif len(shape) == 2:
for i, x in enumerate(X):
ret[i, :len(x)] = np.array(x)
elif len(shape) == 3:
for i, xs in enumerate(X):
for j, x in enumerate(xs):
ret[i, j, :len(x)] = np.array(x)
return ret.astype(dtype) | null |
164,502 | import os
import random
from collections import OrderedDict, defaultdict
from itertools import chain
import json
import sqlite3 as sql
import numpy as np
import spacy
from tqdm import tqdm
from nltk.tokenize import word_tokenize as nltk_word_tokenize
from nltk.stem import WordNetLemmatizer
from space.args import str2bool
from space.data.tokenizer import Tokenizer
from space.utils import ontology, utils
from space.utils.db_ops import MultiWozDB
from space.utils.ontologies import CamRest676Ontology, KvretOntology
def max_lens(X):
def list2np(X, padding=0, dtype="int64"):
shape = max_lens(X)
ret = np.full(shape, padding, dtype=np.int32)
if len(shape) == 1:
ret = np.array(X)
elif len(shape) == 2:
for i, x in enumerate(X):
ret[i, :len(x)] = np.array(x)
elif len(shape) == 3:
for i, xs in enumerate(X):
for j, x in enumerate(xs):
ret[i, j, :len(x)] = np.array(x)
return ret.astype(dtype) | null |
164,507 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from dst import default_cleaning, IGNORE_TURNS_TYPE2, paser_bs,ignore_none
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
IGNORE_TURNS_TYPE2 = \
{
'PMUL1812': [1, 2]
}
def paser_bs(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
# print('sent',sent)
# print('domain',domain)
# print('sub_span',sub_span)
# print('sub_s_idx',sub_s_idx)
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
#print('bs',bs)
belief_state.append(bs)
return list(set(belief_state))
def ignore_none(pred_belief, target_belief):
for pred in pred_belief:
if 'catherine s' in pred:
pred.replace('catherine s', 'catherines')
clean_target_belief = []
clean_pred_belief = []
for bs in target_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_target_belief.append(bs)
for bs in pred_belief:
if 'not mentioned' in bs or 'none' in bs:
continue
clean_pred_belief.append(bs)
dontcare_slots = []
for bs in target_belief:
if 'dontcare' in bs:
domain = bs.split()[0]
slot = bs.split()[1]
dontcare_slots.append('{}_{}'.format(domain, slot))
target_belief = clean_target_belief
pred_belief = clean_pred_belief
return pred_belief, target_belief
def default_cleaning(pred_belief, target_belief):
pred_belief_jason = []
target_belief_jason = []
for pred in pred_belief:
if pred in ['', ' ']:
continue
domain = pred.split()[0]
if 'book' in pred:
slot = ' '.join(pred.split()[1:3])
val = ' '.join(pred.split()[3:])
else:
slot = pred.split()[1]
val = ' '.join(pred.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
pred_belief_jason.append('{} {} {}'.format(domain, slot, val))
for tgt in target_belief:
domain = tgt.split()[0]
if 'book' in tgt:
slot = ' '.join(tgt.split()[1:3])
val = ' '.join(tgt.split()[3:])
else:
slot = tgt.split()[1]
val = ' '.join(tgt.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
target_belief_jason.append('{} {} {}'.format(domain, slot, val))
turn_pred = pred_belief_jason
turn_target = target_belief_jason
return turn_pred, turn_target
def compute_jacc(data,default_cleaning_flag=True,type2_cleaning_flag=False):
num_turns = 0
joint_acc = 0
joint_acc_wo_cross = 0
joint_acc_wo_wrong = 0
joint_acc_wo_namestr = 0
error = {}
clean_tokens = ['<|endoftext|>', ]
dict_slot_acc_right = {}
dict_slot_acc_all = {}
dict_rate = {}
for file_name in data:
last_turn_flag = 0
for turn_id, turn_data in data[file_name].items():
turn_target = turn_data['bspn']
turn_pred = turn_data['bspn_gen']
turn_target = paser_bs(turn_target)
turn_pred = paser_bs(turn_pred)
# clean
for bs in turn_pred:
if bs in clean_tokens + ['', ' '] or bs.split()[-1] == 'none':
turn_pred.remove(bs)
new_turn_pred = []
for bs in turn_pred:
for tok in clean_tokens:
bs = bs.replace(tok, '').strip()
new_turn_pred.append(bs)
turn_pred = new_turn_pred
turn_pred, turn_target = ignore_none(turn_pred, turn_target)
# MultiWOZ default cleaning
if default_cleaning_flag:
turn_pred, turn_target = default_cleaning(turn_pred, turn_target)
if turn_id + 1 not in data[file_name].keys():
for domain_slot_value in turn_target:
domain = domain_slot_value.split()[0]
slot = domain_slot_value.split()[1]
if domain + '-' + slot in dict_slot_acc_all.keys():
dict_slot_acc_all[domain + '-' + slot] = dict_slot_acc_all[domain + '-' + slot] + 1
else:
dict_slot_acc_all[domain + '-' + slot] = 1
for pred_domain_slot_value in turn_pred:
if pred_domain_slot_value in set(turn_target):
domain = pred_domain_slot_value.split()[0]
slot = pred_domain_slot_value.split()[1]
if domain + '-' + slot in dict_slot_acc_right.keys():
dict_slot_acc_right[domain + '-' + slot] = dict_slot_acc_right[domain + '-' + slot] + 1
else:
dict_slot_acc_right[domain + '-' + slot] = 1
else:
pass
for domain_slot in dict_slot_acc_right.keys():
dict_rate[domain_slot] = dict_slot_acc_right[domain_slot] / dict_slot_acc_all[domain_slot]
join_flag = False
turn_pred_wo_namestr = []
turn_target_wo_namestr = []
for item in turn_pred:
if 'namestr' not in item:
turn_pred_wo_namestr.append(item)
else:
pass
for item in turn_target:
if 'namestr' not in item:
turn_target_wo_namestr.append(item)
else:
pass
if set(turn_target_wo_namestr) == set(turn_pred_wo_namestr):
joint_acc_wo_namestr += 1
join_flag = True
elif type2_cleaning_flag: # check for possible Type 2 noisy annotations
flag = True
for bs in turn_target_wo_namestr:
if bs not in turn_pred_wo_namestr:
flag = False
break
if flag:
for bs in turn_pred_wo_namestr:
if bs not in turn_target_wo_namestr:
flag = False
break
if flag: # model prediction might be correct if found in Type 2 list of noisy annotations
dial_name = dial.split('.')[0]
if dial_name in IGNORE_TURNS_TYPE2 and turn_id in IGNORE_TURNS_TYPE2[dial_name]: # ignore these turns
pass
else:
joint_acc_wo_namestr += 1
join_flag = False
#卡掉莫名其妙的输出
turn_pred_wo_wrong = []
turn_target_wo_wrong = []
for item in turn_pred:
if 'emma' not in item and 'jerry' not in item and 'namestr' not in item:
turn_pred_wo_wrong.append(item)
else:
pass
for item in turn_target:
if 'emma' not in item and 'jerry' not in item and 'namestr' not in item:
turn_target_wo_wrong .append(item)
else:
pass
if set(turn_target_wo_wrong) == set(turn_pred_wo_wrong):
joint_acc_wo_wrong += 1
join_flag = True
elif type2_cleaning_flag: # check for possible Type 2 noisy annotations
flag = True
for bs in turn_target_wo_wrong:
if bs not in turn_pred_wo_wrong:
flag = False
break
if flag:
for bs in turn_pred_wo_wrong:
if bs not in turn_target_wo_wrong:
flag = False
break
if flag: # model prediction might be correct if found in Type 2 list of noisy annotations
dial_name = dial.split('.')[0]
if dial_name in IGNORE_TURNS_TYPE2 and turn_id in IGNORE_TURNS_TYPE2[dial_name]: # ignore these turns
pass
else:
joint_acc_wo_wrong += 1
join_flag = False
turn_pred_wo_cross = []
turn_target_wo_cross = []
for item in turn_pred:
if '[profile]' not in item:
turn_pred_wo_cross.append(item)
else:
pass
for item in turn_target:
if '[profile]' not in item:
turn_target_wo_cross.append(item)
else:
pass
if set(turn_target_wo_cross) == set(turn_pred_wo_cross):
joint_acc_wo_cross += 1
join_flag = True
elif type2_cleaning_flag: # check for possible Type 2 noisy annotations
flag = True
for bs in turn_target_wo_cross:
if bs not in turn_pred_wo_cross:
flag = False
break
if flag:
for bs in turn_pred_wo_cross:
if bs not in turn_target_wo_cross:
flag = False
break
if flag: # model prediction might be correct if found in Type 2 list of noisy annotations
dial_name = dial.split('.')[0]
if dial_name in IGNORE_TURNS_TYPE2 and turn_id in IGNORE_TURNS_TYPE2[dial_name]: # ignore these turns
pass
else:
joint_acc_wo_cross += 1
join_flag = False
# print('turn_pred ',turn_pred)
# print('turn_target',turn_target)
# print('turn_pred_wo_cross',set(turn_pred_wo_cross))
# print('turn_target_wo_cross',set(turn_target_wo_cross))
# print('turn_pred ',set(turn_pred))
# print('turn_target',set(turn_target))
if set(turn_target) == set(turn_pred):
joint_acc += 1
join_flag = True
elif type2_cleaning_flag: # check for possible Type 2 noisy annotations
flag = True
for bs in turn_target:
if bs not in turn_pred:
flag = False
break
if flag:
for bs in turn_pred:
if bs not in turn_target:
flag = False
break
if flag: # model prediction might be correct if found in Type 2 list of noisy annotations
dial_name = dial.split('.')[0]
if dial_name in IGNORE_TURNS_TYPE2 and turn_id in IGNORE_TURNS_TYPE2[dial_name]: # ignore these turns
pass
else:
joint_acc += 1
join_flag = True
if not join_flag:
if file_name not in error:
error[file_name] = {}
turn_data['gtbs'] = turn_target
turn_data['predbs'] = turn_pred
error[file_name][turn_id] = turn_data
num_turns += 1
joint_acc /= num_turns
joint_acc_wo_cross /= num_turns
joint_acc_wo_namestr /= num_turns
joint_acc_wo_wrong /= num_turns
print('joint accuracy: {}'.format(joint_acc))
print('joint accuracy_wo_cross: {}'.format(joint_acc_wo_cross))
print('joint accuracy_wo_namestr: {}'.format(joint_acc_wo_cross))
print('joint_acc_wo_wrong: {}'.format(joint_acc_wo_wrong))
print('dict_rate: {}'.format(dict_rate))
with open('bs_error.json',"w") as f:
json.dump(error,f,indent=2)
return joint_acc, joint_acc_wo_cross, dict_rate | null |
164,508 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from dst import default_cleaning, IGNORE_TURNS_TYPE2, paser_bs,ignore_none
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
def get_logger(log_path, name="default"):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler(log_path, mode="w")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | null |
164,521 | import re
from space.utils import ontology
def clean_text(text):
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value | null |
164,528 | import json
GENERAL_TYPO = {
# type
"guesthouse":"guest house", "guesthouses":"guest house", "guest":"guest house", "mutiple sports":"multiple sports",
"sports":"multiple sports", "mutliple sports":"multiple sports","swimmingpool":"swimming pool", "concerthall":"concert hall",
"concert":"concert hall", "pool":"swimming pool", "night club":"nightclub", "mus":"museum", "ol":"architecture",
"colleges":"college", "coll":"college", "architectural":"architecture", "musuem":"museum", "churches":"church",
# area
"center":"centre", "center of town":"centre", "near city center":"centre", "in the north":"north", "cen":"centre", "east side":"east",
"east area":"east", "west part of town":"west", "ce":"centre", "town center":"centre", "centre of cambridge":"centre",
"city center":"centre", "the south":"south", "scentre":"centre", "town centre":"centre", "in town":"centre", "north part of town":"north",
"centre of town":"centre", "cb30aq": "none",
# price
"mode":"moderate", "moderate -ly": "moderate", "mo":"moderate",
# day
"next friday":"friday", "monda": "monday",
# parking
"free parking":"free",
# internet
"free internet":"yes",
# star
"4 star":"4", "4 stars":"4", "0 star rarting":"none",
# others
"y":"yes", "any":"dontcare", "n":"no", "does not care":"dontcare", "not men":"none", "not":"none", "not mentioned":"none",
'':"none", "not mendtioned":"none", "3 .":"3", "does not":"no", "fun":"none", "art":"none",
}
def fix_mismatch_jason(slot, value):
# miss match slot and value
if slot == "type" and value in ["nigh", "moderate -ly priced", "bed and breakfast",
"centre", "venetian", "intern", "a cheap -er hotel"] or \
slot == "internet" and value == "4" or \
slot == "pricerange" and value == "2" or \
slot == "type" and value in ["gastropub", "la raza", "galleria", "gallery",
"science", "m"] or \
"area" in slot and value in ["moderate"] or \
"day" in slot and value == "t":
value = "none"
elif slot == "type" and value in ["hotel with free parking and free wifi", "4",
"3 star hotel"]:
value = "hotel"
elif slot == "star" and value == "3 star hotel":
value = "3"
elif "area" in slot:
if value == "no":
value = "north"
elif value == "we":
value = "west"
elif value == "cent":
value = "centre"
elif "day" in slot:
if value == "we":
value = "wednesday"
elif value == "no":
value = "none"
elif "price" in slot and value == "ch":
value = "cheap"
elif "internet" in slot and value == "free":
value = "yes"
# some out-of-define classification slot values
if slot == "area" and value in ["stansted airport", "cambridge", "silver street"] or \
slot == "area" and value in ["norwich", "ely", "museum", "same area as hotel"]:
value = "none"
return slot, value
def default_cleaning(pred_belief, target_belief):
pred_belief_jason = []
target_belief_jason = []
for pred in pred_belief:
if pred in ['', ' ']:
continue
domain = pred.split()[0]
if 'book' in pred:
slot = ' '.join(pred.split()[1:3])
val = ' '.join(pred.split()[3:])
else:
slot = pred.split()[1]
val = ' '.join(pred.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
pred_belief_jason.append('{} {} {}'.format(domain, slot, val))
for tgt in target_belief:
domain = tgt.split()[0]
if 'book' in tgt:
slot = ' '.join(tgt.split()[1:3])
val = ' '.join(tgt.split()[3:])
else:
slot = tgt.split()[1]
val = ' '.join(tgt.split()[2:])
if slot in GENERAL_TYPO:
val = GENERAL_TYPO[slot]
slot, val = fix_mismatch_jason(slot, val)
target_belief_jason.append('{} {} {}'.format(domain, slot, val))
turn_pred = pred_belief_jason
turn_target = target_belief_jason
return turn_pred, turn_target | null |
164,529 | import json, os, re, copy, zipfile
import spacy
import space.utils.ontology as ontology
import space.utils.utils as utils
from collections import OrderedDict
from tqdm import tqdm
from config import global_config as cfg
from db_ops import MultiWozDB
from clean_dataset import clean_slot_values, clean_text
def clean_slot_values(domain, slot, value):
def get_db_values(value_set_path): # value_set.json, all the domain[slot] values in datasets
processed = {}
bspn_word = []
nlp = spacy.load('en_core_web_sm')
with open(value_set_path, 'r') as f: # read value set file in lower
value_set = json.loads(f.read().lower())
with open('db/ontology.json', 'r') as f: # read ontology in lower, all the domain-slot values
otlg = json.loads(f.read().lower())
for domain, slots in value_set.items(): # add all informable slots to bspn_word, create lists holder for values
processed[domain] = {}
bspn_word.append('['+domain+']')
for slot, values in slots.items():
if domain == 'profile':
if slot == 'name':
slot = 'namestr'
else:
pass
s_p = ontology.normlize_slot_names.get(slot, slot)
if s_p in ontology.informable_slots[domain]:
bspn_word.append(s_p)
processed[domain][s_p] = []
for domain, slots in value_set.items(): # add all words of values of informable slots to bspn_word
for slot, values in slots.items():
if domain == 'profile':
if slot == 'name':
slot = 'namestr'
else:
pass
s_p = ontology.normlize_slot_names.get(slot, slot)
# print(s_p)
if s_p in ontology.informable_slots[domain]:
for v in values:
_, v_p = clean_slot_values(domain, slot, v)
v_p = ' '.join([token.text for token in nlp(v_p)]).strip()
processed[domain][s_p].append(v_p)
for x in v_p.split():
if x not in bspn_word:
bspn_word.append(x)
for domain_slot, values in otlg.items(): # split domain-slots to domains and slots
domain, slot = domain_slot.split('-')
if domain == 'profile':
if slot == 'name':
slot = 'namestr'
continue
else:
continue
if domain == 'bus':
domain = 'taxi'
if slot == 'price range':
slot = 'pricerange'
if slot == 'book stay':
slot = 'stay'
if slot == 'book day':
slot = 'day'
if slot == 'book people':
slot = 'people'
if slot == 'book time':
slot = 'time'
if slot == 'arrive by':
slot = 'arrive'
if slot == 'leave at':
slot = 'leave'
if slot == 'leaveat':
slot = 'leave'
if slot not in processed[domain]: # add all slots and words of values if not already in processed and bspn_word
processed[domain][slot] = []
bspn_word.append(slot)
for v in values:
_, v_p = clean_slot_values(domain, slot, v)
v_p = ' '.join([token.text for token in nlp(v_p)]).strip()
if v_p not in processed[domain][slot]:
processed[domain][slot].append(v_p)
for x in v_p.split():
if x not in bspn_word:
bspn_word.append(x)
with open(value_set_path.replace('.json', '_processed.json'), 'w') as f:
json.dump(processed, f, indent=2) # save processed.json
with open('space/data/multiwoz2.0/bspn_word_collection.json', 'w') as f:
json.dump(bspn_word, f, indent=2) # save bspn_word
print('DB value set processed! ') | null |
164,557 | import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from space.args import str2bool
from space.data.data_loader import DataLoader
from space.metrics.metrics_tracker import MetricsTracker
from space.metrics.metrics import bleu
from space.metrics.metrics import distinct
class MetricsTracker(object):
def __init__(self):
def update(self, metrics, num_samples):
def clear(self):
def items(self):
def get(self, name):
def state_dict(self):
def load_state_dict(self, state_dict):
def value(self):
def summary(self):
def distinct(seqs):
def bleu(hyps, refs):
def evaluate_generation_result(results):
tgt = [result["tgt"].split(" ") for result in results]
pred = [result["preds"][np.argmax(result["scores"])]
if isinstance(result["preds"], list)
else result["preds"]
for result in results]
pred = [p.split(" ") for p in pred]
metrics = {}
metrics_tracker = MetricsTracker()
bleu1, bleu2 = bleu(pred, tgt)
metrics.update({"bleu_1": bleu1, "bleu_2": bleu2})
intra_dist1, intra_dist2, inter_dist1, inter_dist2 = distinct(pred)
metrics.update({"intra_dist_1": intra_dist1,
"intra_dist_2": intra_dist2,
"inter_dist_1": inter_dist1,
"inter_dist_2": inter_dist2})
avg_len = sum(map(len, pred)) / len(pred)
metrics.update({"len": avg_len})
metrics_tracker.update(metrics, num_samples=1) # 一次更新所有数据的指标到位,没有累积更新,故num_sample取为1
return metrics_tracker | null |
164,574 | import re
from space.utils import ontology
def clean_text(text):
text = text.strip()
text = text.lower()
text = text.replace(u"’", "'")
text = text.replace(u"‘", "'")
text = text.replace(';', ',')
text = text.replace('"', ' ')
text = text.replace('/', ' and ')
text = text.replace("don't", "do n't")
text = clean_time(text)
baddata = { r'c\.b (\d), (\d) ([a-z])\.([a-z])': r'cb\1\2\3\4',
'c.b. 1 7 d.y': 'cb17dy',
'c.b.1 7 d.y': 'cb17dy',
'c.b 25, 9 a.q': 'cb259aq',
'isc.b 25, 9 a.q': 'is cb259aq',
'c.b2, 1 u.f': 'cb21uf',
'c.b 1,2 q.a':'cb12qa',
'0-122-336-5664': '01223365664',
'postcodecb21rs': 'postcode cb21rs',
r'i\.d': 'id',
' i d ': 'id',
'Telephone:01223358966': 'Telephone: 01223358966',
'depature': 'departure',
'depearting': 'departing',
'-type': ' type',
r"b[\s]?&[\s]?b": "bed and breakfast",
"b and b": "bed and breakfast",
r"guesthouse[s]?": "guest house",
r"swimmingpool[s]?": "swimming pool",
"wo n\'t": "will not",
" \'d ": " would ",
" \'m ": " am ",
" \'re' ": " are ",
" \'ll' ": " will ",
" \'ve ": " have ",
r'^\'': '',
r'\'$': '',
}
for tmpl, good in baddata.items():
text = re.sub(tmpl, good, text)
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
with open('../text_data/mapping.pair', 'r') as fin:
for line in fin.readlines():
fromx, tox = line.replace('\n', '').split('\t')
text = ' ' + text + ' '
text = text.replace(' ' + fromx + ' ', ' ' + tox + ' ')[1:-1]
return text
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value = value.replace(".", ":")
if value in ['dont care', "don't care", "do nt care", "doesn't care"]:
value = "do n't care"
if ontology.normlize_slot_names.get(slot):
slot = ontology.normlize_slot_names[slot]
return slot, value | null |
164,576 | import json
import math
from collections import Counter
import numpy as np
from nltk.util import ngrams
from sklearn.metrics import f1_score
from space.utils import ontology, utils
from space.utils.clean_dataset import clean_slot_values
def setsub(a,b):
def setsim(a,b):
a,b = set(a),set(b)
return setsub(a,b) and setsub(b,a) | null |
164,583 | import json, os, re, copy, zipfile
import spacy
import space.utils.ontology as ontology
import space.utils.utils as utils
from collections import OrderedDict
from tqdm import tqdm
from config import global_config as cfg
from db_ops import MultiWozDB
from clean_dataset import clean_slot_values, clean_text
def clean_slot_values(domain, slot, value):
def preprocess_db(db_paths): # apply clean_slot_values to all dbs
dbs = {}
nlp = spacy.load('en_core_web_sm')
for domain in ontology.all_domains:
if domain != 'profile': #修改db
with open(db_paths[domain], 'r') as f: # for every db_domain, read json file
dbs[domain] = json.loads(f.read().lower())
for idx, entry in enumerate(dbs[domain]): # entry has information about slots of said domain
new_entry = copy.deepcopy(entry)
for key, value in entry.items(): # key = slot
if type(value) is not str:
continue
del new_entry[key]
key, value = clean_slot_values(domain, key, value)
tokenize_and_back = ' '.join([token.text for token in nlp(value)]).strip()
new_entry[key] = tokenize_and_back
dbs[domain][idx] = new_entry
with open(db_paths[domain].replace('.json', '_processed.json'), 'w') as f:
json.dump(dbs[domain], f, indent=2)
# print('[%s] DB processed! '%domain) | null |
164,591 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
def to_list(tensor):
return tensor.detach().cpu().tolist() | null |
164,592 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
def fetch_args():
parser = argparse.ArgumentParser()
# model parameters
parser.add_argument("--model", type=str)
parser.add_argument("--pool", action='store_true')
parser.add_argument("--hidden_size", default=768, type=int)
parser.add_argument("--model_type", default='roberta', type=str)
parser.add_argument("--max_token_length", default=512, type=int)
parser.add_argument("--max_audio_length", default=320000, type=int)
parser.add_argument("--dropout_rate", default=0.1, type=float)
parser.add_argument("--heads_dropout", default=0.0, type=float)
parser.add_argument("--class_loss_ratio", default=0.8, type=float)
parser.add_argument("--no_audio", action='store_true')
# training parameters
parser.add_argument("--resume", action='store_true')
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int)
parser.add_argument("--per_gpu_eval_batch_size", default=24, type=int)
parser.add_argument("--lr", default=2e-5, type=float)
parser.add_argument('--accum', type=int, default=2)
parser.add_argument("--weight_decay", default=0.0, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument("--num_train_epochs", default=12, type=int)
parser.add_argument("--max_steps", default=-1, type=int)
parser.add_argument("--warmup_proportion", default=0.1, type=float)
parser.add_argument("--svd", default=0.0, type=float)
parser.add_argument('--seed', type=int, default=3407)
# path parameters
parser.add_argument('--model_dir')
parser.add_argument("--data_dir")
parser.add_argument("--dataset_config")
parser.add_argument("--output_dir")
# other parameters
parser.add_argument('--ckpt', type=str)
parser.add_argument("--debug", action='store_true')
parser.add_argument('--no_amp', action='store_true')
parser.add_argument("--evaluate", action='store_true')
parser.add_argument("--no_cuda", action='store_true')
parser.add_argument('--save_steps', type=int, default=200)
parser.add_argument("--evaluate_all", action='store_true')
parser.add_argument("--token_loss_for_nonpointable", action='store_true',
help="Whether the token loss for classes other than copy_value contribute towards total loss.")
parser.add_argument("--refer_loss_for_nonpointable", action='store_true',
help="Whether the refer loss for classes other than refer contribute towards total loss.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--class_aux_feats_inform", action='store_true',
help="Whether or not to use the identity of informed slots as auxiliary featurs for class prediction.")
parser.add_argument("--class_aux_feats_ds", action='store_true',
help="Whether or not to use the identity of slots in the current dialog state as auxiliary featurs for class prediction.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--logging_steps', type=int, default=10,
help="Log every X updates steps.")
parser.add_argument('--save_epochs', type=int, default=0,
help="Save checkpoint every X epochs. Overrides --save_steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--amp_opt_level', type=str, default='O1')
args = parser.parse_args()
return args | null |
164,593 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def batch_to_device(batch, device):
batch_on_device = []
for element in batch:
if isinstance(element, dict):
batch_on_device.append({k: v.to(device) for k, v in element.items()})
else:
batch_on_device.append(element.to(device))
return tuple(batch_on_device)
def load_and_cache_examples(args, slot_list, split, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
cached_file = f'{args.data_dir}/{split}_feature_{args.model_type}_nohistory.pkl'
logger.info("Loading features from cached file %s", cached_file)
features = pickle.load(open(cached_file, 'rb'))
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
text_inputs = torch.tensor([f.text_inputs for f in features], dtype=torch.long)
text_masks = torch.tensor([f.text_mask for f in features], dtype=torch.long)
role_token_ids = torch.tensor([f.role_token_ids + [1]*(512-len(f.role_token_ids)) for f in features], dtype=torch.long)
turn_ids = torch.tensor([f.turn_ids for f in features], dtype=torch.long)
audio_inputs = [f.audio_inputs for f in features]
f_start_pos = [f.start_pos for f in features]
f_end_pos = [f.end_pos for f in features]
f_inform_slot_ids = [f.inform_slot for f in features]
f_refer_ids = [f.refer_id for f in features]
f_diag_state = [f.diag_state for f in features]
f_class_label_ids = [f.class_label_id for f in features]
all_example_index = torch.arange(text_inputs.size(0), dtype=torch.long) # (0, 1, ..., b)
# {slot:(b)}
all_start_positions = {} # 每个样本 每个slot的开始下标
all_end_positions = {} # 每个样本 每个slot的结束下标
all_inform_slot_ids = {} # 每个样本 每个slot是否为inform
all_refer_ids = {}
all_diag_state = {} # 每个样本 每个slot 累加到当前turn的类别
all_class_label_ids = {} # 每个样本 每个slot 当前turn更新的类别
for s in slot_list:
all_start_positions[s] = torch.tensor([f[s] for f in f_start_pos], dtype=torch.long)
all_end_positions[s] = torch.tensor([f[s] for f in f_end_pos], dtype=torch.long)
all_inform_slot_ids[s] = torch.tensor([f[s] for f in f_inform_slot_ids], dtype=torch.long)
all_refer_ids[s] = torch.tensor([f[s] for f in f_refer_ids], dtype=torch.long)
all_diag_state[s] = torch.tensor([f[s] for f in f_diag_state], dtype=torch.long)
all_class_label_ids[s] = torch.tensor([f[s] for f in f_class_label_ids], dtype=torch.long)
dataset = TensorListDataset(text_inputs, text_masks, role_token_ids, turn_ids,
all_start_positions, all_end_positions,
all_inform_slot_ids, all_refer_ids,
all_diag_state, all_class_label_ids, all_example_index)
return dataset, features, audio_inputs
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, slot_list, model, tokenizer, processor, continue_from_global_step=0)` to solve the following problem:
Train the model
Here is the function:
def train(args, slot_list, model, tokenizer, processor, continue_from_global_step=0):
""" Train the model """
if args.debug:
train_dataset, train_features, train_audio = load_and_cache_examples(args, slot_list, 'debug', tokenizer)
else:
train_dataset, train_features, train_audio = load_and_cache_examples(args, slot_list, 'train', tokenizer)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
t_total = len(train_dataloader) // args.accum * args.num_train_epochs
num_warmup_steps = int(t_total * args.warmup_proportion)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=t_total)
if not args.no_amp:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
# multi-gpu training (should be after apex amp initialization)
model_single_gpu = model
# Distributed training (should be after apex amp initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.accum * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.accum)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...",
continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch")
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
model.train()
batch_loss = batch_step = 1
for step, batch in enumerate(epoch_iterator):
if global_step < continue_from_global_step:
if (step + 1) % args.accum == 0:
scheduler.step()
global_step += 1
continue
batch = batch_to_device(batch, args.device)
audio = [train_audio[i] for i in batch[-1]]
audio_a = [np.load(args.data_dir+'/'+i[0]) for i in audio]
audio_b = [np.load(args.data_dir+'/'+i[1]) for i in audio]
audio_a = processor(audio_a, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
audio_b = processor(audio_b, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
inputs = {'text_input': batch[0],
'text_mask': batch[1],
'role_token_id': batch[2],
'turn_id':batch[3],
'audio_input': (audio_a['input_values'].to(args.device), audio_b['input_values'].to(args.device)),
'audio_mask':(audio_a['attention_mask'].to(args.device), audio_b['attention_mask'].to(args.device)),
'start_pos': batch[4],
'end_pos': batch[5],
'inform_slot_id': batch[6],
'refer_id': batch[7],
'diag_state': batch[8],
'class_label_id': batch[9]}
# print(batch[-1])
# print(audio_a, audio_b)
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.accum > 1:
loss = loss / args.accum
if not args.no_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
batch_loss += loss.item()
if (step + 1) % args.accum == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
batch_step += 1
# Log metrics
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
print(batch_loss / batch_step)
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir = f'{args.ckpt_path}/{global_step}.pt'
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
epoch_iterator.set_description("Epoch {:0>3d} - Loss {:.4f} - Step {:}".format(epoch, batch_loss / batch_step, global_step))
train_iterator.set_description("Epoch {:0>3d} - Loss {:.4f} - Step {:}".format(epoch, batch_loss / batch_step, global_step))
return global_step, tr_loss / global_step | Train the model |
164,594 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
logger = logging.getLogger(__name__)
def batch_to_device(batch, device):
batch_on_device = []
for element in batch:
if isinstance(element, dict):
batch_on_device.append({k: v.to(device) for k, v in element.items()})
else:
batch_on_device.append(element.to(device))
return tuple(batch_on_device)
def predict_and_format(args, model, tokenizer, features, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits,
per_slot_refer_logits, ids, input_ids_unmasked, values, inform, prefix, ds):
prediction_list = []
dialog_state = ds
for i in range(len(ids)):
if int(ids[i].split("-")[2]) == 0:
dialog_state = {slot: 'none' for slot in model.slot_list}
prediction = {}
prediction_addendum = {}
for slot in model.slot_list:
class_logits = per_slot_class_logits[slot][i]
start_logits = per_slot_start_logits[slot][i]
end_logits = per_slot_end_logits[slot][i]
refer_logits = per_slot_refer_logits[slot][i]
# input_ids = features['text_input'][i].tolist()
class_label_id = int(features['class_label_id'][slot][i])
start_pos = int(features['start_pos'][slot][i])
end_pos = int(features['end_pos'][slot][i])
refer_id = int(features['refer_id'][slot][i])
class_prediction = int(class_logits.argmax())
start_prediction = int(start_logits.argmax())
end_prediction = int(end_logits.argmax())
refer_prediction = int(refer_logits.argmax())
prediction['guid'] = ids[i].split("-")
prediction['class_prediction_%s' % slot] = class_prediction
prediction['class_label_id_%s' % slot] = class_label_id
prediction['start_prediction_%s' % slot] = start_prediction
prediction['start_pos_%s' % slot] = start_pos
prediction['end_prediction_%s' % slot] = end_prediction
prediction['end_pos_%s' % slot] = end_pos
prediction['refer_prediction_%s' % slot] = refer_prediction
prediction['refer_id_%s' % slot] = refer_id
# prediction['input_ids_%s' % slot] = input_ids
if class_prediction == model.class_types.index('dontcare'):
dialog_state[slot] = 'dontcare'
elif class_prediction == model.class_types.index('copy_value'):
pred = tokenizer.convert_ids_to_tokens(input_ids_unmasked[i])[start_prediction:end_prediction + 1]
if args.model_type == 'roberta':
tokens = []
for idx in range(len(pred)):
if pred[idx][0] == 'Ġ':
tokens.append(pred[idx][1:])
else:
if tokens:
tokens[-1] = tokens[-1]+pred[idx]
else:
tokens.append(pred[idx])
else:
tokens = []
for idx in range(len(pred)):
if pred[idx][0] == '#':
if tokens:
tokens[-1] = tokens[-1]+pred[idx][2:]
else:
tokens.append(pred[idx][2:])
else:
tokens.append(pred[idx])
# print(tokens)
# tokens = pred
dialog_state[slot] = ' '.join(tokens)
dialog_state[slot] = re.sub("(^| )##", "", dialog_state[slot])
elif 'true' in model.class_types and class_prediction == model.class_types.index('true'):
dialog_state[slot] = 'true'
elif 'false' in model.class_types and class_prediction == model.class_types.index('false'):
dialog_state[slot] = 'false'
elif class_prediction == model.class_types.index('inform'):
dialog_state[slot] = inform[i][slot]
# Referral case is handled below
prediction_addendum['slot_prediction_%s' % slot] = dialog_state[slot]
prediction_addendum['slot_groundtruth_%s' % slot] = values[i][slot]
# Referral case. All other slot values need to be seen first in order
# to be able to do this correctly.
for slot in model.slot_list:
class_logits = per_slot_class_logits[slot][i]
refer_logits = per_slot_refer_logits[slot][i]
class_prediction = int(class_logits.argmax())
refer_prediction = int(refer_logits.argmax())
if 'refer' in model.class_types and class_prediction == model.class_types.index('refer'):
# Only slots that have been mentioned before can be referred to.
# One can think of a situation where one slot is referred to in the same utterance.
# This phenomenon is however currently not properly covered in the training data
# label generation process.
dialog_state[slot] = dialog_state[model.slot_list[refer_prediction - 1]]
prediction_addendum['slot_prediction_%s' % slot] = dialog_state[slot] # Value update
prediction.update(prediction_addendum)
prediction_list.append(prediction)
return prediction_list, dialog_state
def evaluate(args, dataset, features, audio, processor, model, tokenizer, prefix=""):
args.eval_batch_size = args.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
all_preds = []
ds = {slot: 'none' for slot in model.slot_list}
with torch.no_grad():
diag_state = {slot: torch.tensor([0 for _ in range(args.eval_batch_size)]).to(args.device) for slot in
model.slot_list}
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = batch_to_device(batch, args.device)
# Reset dialog state if turn is first in the dialog.
turn_itrs = [features[i.item()].guid.split('-')[2] for i in batch[-1]]
reset_diag_state = np.where(np.array(turn_itrs) == '0')[0]
for slot in model.slot_list:
for i in reset_diag_state:
diag_state[slot][i] = 0
with torch.no_grad():
all_audio = [audio[i] for i in batch[-1]]
audio_a = [np.load(args.data_dir+'/'+i[0]) for i in all_audio]
audio_b = [np.load(args.data_dir+'/'+i[1]) for i in all_audio]
audio_a = processor(audio_a, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
audio_b = processor(audio_b, sampling_rate=16000, padding=True, return_attention_mask=True,
return_tensors="pt")
inputs = {'text_input': batch[0],
'text_mask': batch[1],
'role_token_id': batch[2],
'turn_id':batch[3],
'audio_input': (audio_a['input_values'].to(args.device), audio_b['input_values'].to(args.device)),
'audio_mask':(audio_a['attention_mask'].to(args.device), audio_b['attention_mask'].to(args.device)),
'start_pos': batch[4],
'end_pos': batch[5],
'inform_slot_id': batch[6],
'refer_id': batch[7],
'diag_state': batch[8],
'class_label_id': batch[9]}
unique_ids = [features[i.item()].guid for i in batch[-1]]
values = [features[i.item()].values for i in batch[-1]]
input_ids_unmasked = [features[i.item()].text_inputs for i in batch[-1]]
inform = [features[i.item()].inform for i in batch[-1]]
outputs = model(**inputs)
# Update dialog state for next turn.
for slot in model.slot_list:
updates = outputs[2][slot].max(1)[1]
for i, u in enumerate(updates):
if u != 0:
diag_state[slot][i] = u
# results = eval_metric(model, inputs, outputs[0], outputs[1], outputs[2], outputs[3], outputs[4], outputs[5])
preds, ds = predict_and_format(args, model, tokenizer, inputs, outputs[2], outputs[3], outputs[4], outputs[5],
unique_ids, input_ids_unmasked, values, inform, prefix, ds)
all_preds.append(preds)
all_preds = [item for sublist in all_preds for item in sublist] # Flatten list
# Generate final results
# final_results = {}
# for k in all_results[0].keys():
# final_results[k] = torch.stack([r[k] for r in all_results]).mean()
# Write final predictions (for evaluation with external tool)
output_prediction_file = f"{args.pred_path}/{prefix}.json"
with open(output_prediction_file, "w") as f:
json.dump(all_preds, f, indent=2)
# return final_results | null |
164,595 | import os
import re
import glob
import json
import math
import torch
import pickle
import random
import logging
import argparse
import numpy as np
from model import DSTModel
from tqdm import tqdm, trange
from utils_dst import InputFeatures
from torch.nn.utils.rnn import pad_sequence
from tensorlistdataset import TensorListDataset
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import WEIGHTS_NAME, RobertaTokenizerFast, WavLMConfig, RobertaConfig, Wav2Vec2Processor
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
def eval_metric(model, features, total_loss, per_slot_per_example_loss, per_slot_class_logits, per_slot_start_logits,
per_slot_end_logits, per_slot_refer_logits):
metric_dict = {}
per_slot_correctness = {}
for slot in model.slot_list:
per_example_loss = per_slot_per_example_loss[slot]
class_logits = per_slot_class_logits[slot]
start_logits = per_slot_start_logits[slot]
end_logits = per_slot_end_logits[slot]
refer_logits = per_slot_refer_logits[slot]
class_label_id = features['class_label_id'][slot]
start_pos = features['start_pos'][slot]
end_pos = features['end_pos'][slot]
refer_id = features['refer_id'][slot]
_, class_prediction = class_logits.max(1)
class_correctness = torch.eq(class_prediction, class_label_id).float()
class_accuracy = class_correctness.mean()
# "is pointable" means whether class label is "copy_value",
# i.e., that there is a span to be detected.
token_is_pointable = torch.eq(class_label_id, model.class_types.index('copy_value')).float()
_, start_prediction = start_logits.max(1)
start_correctness = torch.eq(start_prediction, start_pos).float()
_, end_prediction = end_logits.max(1)
end_correctness = torch.eq(end_prediction, end_pos).float()
token_correctness = start_correctness * end_correctness
token_accuracy = (token_correctness * token_is_pointable).sum() / token_is_pointable.sum()
# NaNs mean that none of the examples in this batch contain spans. -> division by 0
# The accuracy therefore is 1 by default. -> replace NaNs
if math.isnan(token_accuracy):
token_accuracy = torch.tensor(1.0, device=token_accuracy.device)
token_is_referrable = torch.eq(class_label_id, model.class_types.index('refer') if 'refer' in model.class_types else -1).float()
_, refer_prediction = refer_logits.max(1)
refer_correctness = torch.eq(refer_prediction, refer_id).float()
refer_accuracy = refer_correctness.sum() / token_is_referrable.sum()
# NaNs mean that none of the examples in this batch contain referrals. -> division by 0
# The accuracy therefore is 1 by default. -> replace NaNs
if math.isnan(refer_accuracy) or math.isinf(refer_accuracy):
refer_accuracy = torch.tensor(1.0, device=refer_accuracy.device)
total_correctness = class_correctness * (token_is_pointable * token_correctness + (1 - token_is_pointable))\
* (token_is_referrable * refer_correctness + (1 - token_is_referrable))
total_accuracy = total_correctness.mean()
loss = per_example_loss.mean()
metric_dict['eval_accuracy_class_%s' % slot] = class_accuracy
metric_dict['eval_accuracy_token_%s' % slot] = token_accuracy
metric_dict['eval_accuracy_refer_%s' % slot] = refer_accuracy
metric_dict['eval_accuracy_%s' % slot] = total_accuracy
metric_dict['eval_loss_%s' % slot] = loss
per_slot_correctness[slot] = total_correctness
goal_correctness = torch.stack([c for c in per_slot_correctness.values()], 1).prod(1)
goal_accuracy = goal_correctness.mean()
metric_dict['eval_accuracy_goal'] = goal_accuracy
metric_dict['loss'] = total_loss
return metric_dict | null |
164,597 | import re
import os
import json
import pickle
import librosa
import argparse
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
from utils_dst import (DSTExample, convert_to_unicode)
def load_acts(input_file, data_indexs, slot_list):
s_dict = {}
for d in data_indexs:
# print(d)
try:
utterences = input_file[d]['log']
except Exception as e:
print(d, e)
for utt_id in range(1, len(utterences), 2):
acts_list = utterences[utt_id]['dialog_act']
for a in acts_list:
aa = a.lower().split('-') # domain-act
if aa[1] in ['inform', 'recommend', 'select', 'book']:
for i in acts_list[a]:
s = i[0].lower() # slot
v = i[1].lower().strip() # value
if s == 'none' or v == '?' or v == 'none':
continue
slot = aa[0] + '-' + s # domain-act
if slot in ACTS_DICT:
slot = ACTS_DICT[slot]
if slot not in slot_list and aa[0] != 'booking':
continue
t_key = (utt_id - 1) // 2
d_key = d
key = d_key, t_key, slot
s_dict[key] = list([v])
key = d_key, t_key+1, slot
s_dict[key] = list([v])
return s_dict
def normalize_label(slot, value_label):
# Normalization of capitalization
if isinstance(value_label, str):
value_label = value_label.lower().strip()
elif isinstance(value_label, list):
if len(value_label) > 1:
value_label = value_label[
0] # TODO: Workaround. Note that Multiwoz 2.2 supports variants directly in the labels.
elif len(value_label) == 1:
value_label = value_label[0]
elif len(value_label) == 0:
value_label = ""
# Normalization of empty slots
if value_label == '' or value_label == "not mentioned":
return "none"
# Normalization of 'dontcare'
if value_label == 'dont care':
return "dontcare"
# Normalization of time slots
if "leaveAt" in slot or "arriveBy" in slot or slot == 'restaurant-book time':
return normalize_time(value_label)
# Normalization
if "type" in slot or "name" in slot or "destination" in slot or "departure" in slot:
value_label = re.sub(" ?'s", "s", value_label)
value_label = re.sub("guesthouse", "guest house", value_label)
# Map to boolean slots
if slot == 'hotel-parking' or slot == 'hotel-internet':
if value_label == 'yes':
return "true"
if value_label == "no":
return "false"
if slot == 'hotel-type':
if value_label == "hotel":
return "true"
if value_label == "guest house":
return "false"
return value_label
def get_turn_label(value_label, usr_utt_tok, slot, seen_slots, slot_last_occurrence):
usr_utt_tok_label = [0 for _ in usr_utt_tok]
referred_slot = 'none'
if value_label == 'none' or value_label == 'dontcare' or value_label == 'true' or value_label == 'false':
class_type = value_label
else:
in_usr, usr_pos = check_label_existence(value_label, usr_utt_tok)
if in_usr:
class_type = 'copy_value'
if slot_last_occurrence:
(s, e) = usr_pos[-1]
for i in range(s, e):
usr_utt_tok_label[i] = 1
else:
for (s, e) in usr_pos:
for i in range(s, e):
usr_utt_tok_label[i] = 1
else:
referred_slot = check_slot_referral(value_label, slot, seen_slots)
if referred_slot != 'none':
class_type = 'refer'
else:
class_type = 'unpointable'
return referred_slot, usr_utt_tok_label, class_type
def tokenize(utt):
utt_lower = convert_to_unicode(utt).lower()
utt_lower = normalize_text(utt_lower)
utt_tok = utt_to_token(utt_lower)
return utt_tok
class DSTExample(object):
"""
A single training/test example for the DST dataset.
"""
def __init__(self, guid, text_a, text_b,
audio_a, audio_b, history,text_a_label, text_b_label,
history_label=None,
values=None,
inform_label=None,
inform_slot_label=None,
refer_label=None,
diag_state=None,
class_label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.audio_a = audio_a
self.audio_b = audio_b
self.history = history
self.text_a_label = text_a_label
self.text_b_label = text_b_label
self.history_label = history_label
self.values = values
self.inform_label = inform_label
self.inform_slot_label = inform_slot_label
self.refer_label = refer_label
self.diag_state = diag_state
self.class_label = class_label
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ''
for k, v in self.__dict__.items():
s += f'{k} : {v} \n'
return s
def create_examples(args, input_data, data_indexs, slot_list, label_maps, short=False, save_audio=False):
sys_inform_dict = load_acts(input_data, data_indexs, slot_list)
LABEL_MAPS, examples, samples, avg_len, utts = label_maps, [], 0, 0, 0
audios = os.listdir(args.audio_path)
for dialog_id in tqdm(data_indexs):
try:
entry = input_data[dialog_id]
except Exception as e:
print(e, dialog_id)
continue
utterances = entry['log']
cumulative_labels = {slot: 'none' for slot in slot_list}
utt_tok_list = []
utt_audio_list = []
mod_slots_list = []
if save_audio:
# audio, _ = librosa.load(f'{args.audio_path}/{dialog_id}/speech.wav', sr=16000)
audio, _ = librosa.load(f'{args.audio_path}/{dialog_id}.wav', sr=16000)
usr_sys_switch = True
turn_itr = 0
for utt in utterances:
is_sys_utt = utt['metadata'] != {}
if usr_sys_switch == is_sys_utt:
print("WARN: Wrong order of system and user utterances. Skipping rest of dialog %s" % (dialog_id))
break
usr_sys_switch = is_sys_utt
if is_sys_utt:
turn_itr += 1
start = utt['words'][0]['BeginTime'] * 16
speaker = 'sys' if is_sys_utt else 'usr'
cur_aud = audio[start:utt['words'][-1]['EndTime'] * 16]
# save = f'audio/{dialog_id}{turn_itr}-{speaker}.npy'
npy_dir = args.audio_path + '_npy'
# save = f'{args.audio_path}_npy/{dialog_id}{turn_itr}-{speaker}.npy'
save = f'{npy_dir}/{dialog_id}{turn_itr}-{speaker}.npy'
# print(save)
# save_path = f'{args.root}/{save}'
if save_audio:
# save_path = f'{args.root}/{save}'
save_path = save
np.save(save_path, cur_aud)
utt_tok_list.append(tokenize(utt['text'])) # normalize utterances
utt_audio_list.append(save)
utts += 1
avg_len += (utt['words'][-1]['EndTime'] * 16 - utt['words'][0]['BeginTime'] * 16) / 16000
modified_slots = {}
# If sys utt, extract metadata (identify and collect modified slots)
if is_sys_utt:
for d in utt['metadata']:
booked = utt['metadata'][d]['book']['booked']
booked_slots = {}
if booked != []:
for s in booked[0]:
booked_slots[s] = normalize_label('%s-%s' % (d, s), booked[0][s]) # normalize labels
# Check the semi and the inform slots
for category in ['book', 'semi']:
for s in utt['metadata'][d][category]:
cs = '%s-book %s' % (d, s) if category == 'book' else '%s-%s' % (d, s)
value_label = normalize_label(cs, utt['metadata'][d][category][s]) # normalize labels
if s in booked_slots:
value_label = booked_slots[s]
if cs in slot_list and cumulative_labels[cs] != value_label:
modified_slots[cs] = value_label
cumulative_labels[cs] = value_label
mod_slots_list.append(modified_slots.copy())
turn_itr = 0
diag_seen_slots_dict = {}
diag_seen_slots_value_dict = {slot: 'none' for slot in slot_list}
diag_state = {slot: 'none' for slot in slot_list} # 积累整段对话的state
sys_utt_tok = []
sys_utt_aud = []
usr_utt_tok = []
usr_utt_aud = []
hst_utt_tok = []
hst_utt_aud = []
hst_utt_tok_label_dict = {slot: [] for slot in slot_list}
for i in range(1, len(utt_tok_list), 2):
sys_utt_tok_label_dict = {}
usr_utt_tok_label_dict = {}
value_dict = {}
inform_dict = {}
inform_slot_dict = {}
referral_dict = {slot: 'none' for slot in slot_list}
class_type_dict = {} # 当前turn更新的state
usr_utt_tok = utt_tok_list[i - 1]
sys_utt_tok = utt_tok_list[i]
turn_slots = mod_slots_list[turn_itr]
usr_utt_aud = utt_audio_list[i - 1]
sys_utt_aud = utt_audio_list[i]
guid = '%s-%s-%s' % ('train', str(dialog_id), str(turn_itr))
new_hst_utt_tok = hst_utt_tok.copy()
new_hst_utt_tok_label_dict = hst_utt_tok_label_dict.copy()
new_hst_utt_tok += usr_utt_tok + sys_utt_tok
new_diag_state = diag_state.copy()
for slot in slot_list:
value_label = 'none'
if slot in turn_slots:
value_label = turn_slots[slot]
value_dict[slot] = value_label
elif label_value_repetitions and slot in diag_seen_slots_dict:
# print('label_value_repetitions')
# print(slot, diag_seen_slots_value_dict[slot], dialog_id)
value_label = diag_seen_slots_value_dict[slot]
# Get dialog act annotations
informed_value = 'none'
inform_slot_dict[slot] = 0
if (str(dialog_id), turn_itr, slot) in sys_inform_dict and slot in turn_slots:
inform_slot_dict[slot] = 1
informed_value = normalize_label(slot, sys_inform_dict[(str(dialog_id), turn_itr, slot)])
(referred_slot, usr_utt_tok_label, class_type) = get_turn_label(value_label, usr_utt_tok, slot,
diag_seen_slots_value_dict,
slot_last_occurrence=True)
inform_dict[slot] = informed_value
sys_utt_tok_label = [0 for _ in sys_utt_tok]
if label_value_repetitions and slot in diag_seen_slots_dict:
if class_type == 'copy_value' and list(diag_seen_slots_value_dict.values()).count(value_label) > 1:
class_type = 'none'
usr_utt_tok_label = [0 for _ in usr_utt_tok_label]
sys_utt_tok_label_dict[slot] = sys_utt_tok_label
usr_utt_tok_label_dict[slot] = usr_utt_tok_label
new_hst_utt_tok_label_dict[slot] = usr_utt_tok_label + sys_utt_tok_label + new_hst_utt_tok_label_dict[slot]
if inform_slot_dict[slot]:
class_type_dict[slot] = 'inform'
class_type = 'inform'
referral_dict[slot] = 'none'
elif class_type == 'unpointable':
class_type_dict[slot] = 'none'
referral_dict[slot] = 'none'
elif slot in diag_seen_slots_dict and class_type == diag_seen_slots_dict[
slot] and class_type != 'copy_value' and class_type != 'inform':
class_type_dict[slot] = 'none'
referral_dict[slot] = 'none'
else:
class_type_dict[slot] = class_type
referral_dict[slot] = referred_slot
if class_type != 'none':
diag_seen_slots_dict[slot] = class_type
diag_seen_slots_value_dict[slot] = value_label
new_diag_state[slot] = class_type
if class_type == 'unpointable':
new_diag_state[slot] = 'copy_value'
txt_a = usr_utt_tok
txt_b = sys_utt_tok
aud_a = usr_utt_aud
aud_b = sys_utt_aud
txt_a_lbl = usr_utt_tok_label_dict
txt_b_lbl = sys_utt_tok_label_dict
examples.append(DSTExample(
guid=guid,
text_a=txt_a,
text_b=txt_b,
audio_a=aud_a,
audio_b=aud_b,
history=hst_utt_tok,
text_a_label=txt_a_lbl,
text_b_label=txt_b_lbl,
history_label=hst_utt_tok_label_dict,
values=diag_seen_slots_value_dict.copy(),
inform_label=inform_dict,
inform_slot_label=inform_slot_dict,
refer_label=referral_dict,
diag_state=diag_state,
class_label=class_type_dict))
hst_utt_tok_label_dict = new_hst_utt_tok_label_dict.copy()
hst_utt_tok = new_hst_utt_tok.copy()
diag_state = new_diag_state.copy()
turn_itr += 1
samples += 1
if short and samples == 100: break
pickle.dump(examples, open(f'{args.output_path}/{split}_example.pkl', 'wb'))
return avg_len / utts | null |
164,600 | import six
import json
import torch
import pickle
import logging
import argparse
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from joblib import Parallel, delayed
from transformers import Wav2Vec2Processor, RobertaTokenizerFast, BertTokenizer
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, text_inputs, text_mask, role_token_ids, turn_ids,
audio_inputs, start_pos, end_pos, values=None, inform=None,
inform_slot=None,
refer_id=None,
diag_state=None,
class_label_id=None,
guid="NONE"):
self.guid = guid
self.text_inputs = text_inputs
self.text_mask = text_mask
self.audio_inputs = audio_inputs
self.role_token_ids = role_token_ids
self.turn_ids = turn_ids
self.start_pos = start_pos
self.end_pos = end_pos
self.values = values
self.inform = inform
self.inform_slot = inform_slot
self.refer_id = refer_id
self.diag_state = diag_state
self.class_label_id = class_label_id
def __repr__(self):
s = ''
for k, v in self.__dict__.items():
s += f'{k} : {v} \n'
return s
def get_start_end_pos(class_type, token_label_ids, max_seq_length):
if class_type == 'copy_value' and 1 not in token_label_ids:
print("copy_value label, but token_label not detected. Setting label to 'none'.")
class_type = 'none'
start_pos = 0
end_pos = 0
if 1 in token_label_ids:
start_pos = token_label_ids.index(1)
if 0 not in token_label_ids[start_pos:]:
end_pos = len(token_label_ids[start_pos:]) + start_pos - 1
else:
end_pos = token_label_ids[start_pos:].index(0) + start_pos - 1
for i in range(start_pos, end_pos+1):
assert token_label_ids[i] == 1
return class_type, start_pos, end_pos
def _tokenize_text_and_label(text, text_label_dict, slot, tokenizer, model_specs, slot_value_dropout):
text_label = text_label_dict[slot]
tokens = []
token_labels = []
for token, token_label in zip(text, text_label):
token = convert_to_unicode(token)
if model_specs['MODEL_TYPE'] == 'roberta':
token = ' ' + token
sub_tokens = tokenizer.tokenize(token) # Most time intensive step
tokens.extend(sub_tokens)
token_labels.extend([token_label for _ in sub_tokens])
assert len(tokens) == len(token_labels)
return tokens, token_labels
def _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs):
token_label_ids = []
token_label_ids.append(0) # [CLS]/<s>
for token_label in token_labels_a:
token_label_ids.append(token_label)
token_label_ids.append(0) # [SEP]/</s></s>
if model_specs['MODEL_TYPE'] == 'roberta':
token_label_ids.append(0)
for token_label in token_labels_b:
token_label_ids.append(token_label)
token_label_ids.append(0) # [SEP]/</s></s>
# if model_specs['MODEL_TYPE'] == 'roberta':
# token_label_ids.append(0)
# for token_label in token_labels_history:
# token_label_ids.append(token_label)
# token_label_ids.append(0) # [SEP]/</s>
while len(token_label_ids) < max_seq_length:
token_label_ids.append(0) # padding
assert len(token_label_ids) == max_seq_length
return token_label_ids
def get_transformer_input(args, tokens_a, tokens_b, history, max_seq_length, tokenizer, model_specs):
# print(history)
if model_specs['MODEL_TYPE'] == 'roberta':
tokens_a = [0] + tokenizer.convert_tokens_to_ids(tokens_a) + [2,2]
tokens_b = tokenizer.convert_tokens_to_ids(tokens_b)+[2, 2]
elif model_specs['MODEL_TYPE'] == 'bert':
tokens_a = [101] + tokenizer.convert_tokens_to_ids(tokens_a) + [102]
tokens_b = tokenizer.convert_tokens_to_ids(tokens_b) + [102]
if not args.his:
tokens = tokens_a + tokens_b
turn_ids = [0] * len(tokens_a + tokens_b)
else:
history = tokenizer.convert_tokens_to_ids(history)
tokens = tokens_a + tokens_b + history
turn_ids = [0] * len(tokens_a + tokens_b) + [1] * len(history)
tokens, turn_ids = tokens[:511]+ [model_specs['SEP_TOKEN']], turn_ids[:511]+[1]
# print(tokens, len(tokens), len(turn_ids))
role_token_ids = [0] * len(tokens_a) + [1] * len(tokens_b)
input_mask = [1] * len(tokens)
gaplen = max_seq_length - len(tokens)
tokens += [model_specs['PAD_TOKEN']] * gaplen
input_mask += [0] * gaplen
turn_ids += [1] * gaplen
# print(len(tokens), len(turn_ids))
assert len(tokens) == len(input_mask) == len(turn_ids) == max_seq_length
# print(len(history['tokens']), len(history['role_ids']))
# assert len(history['tokens']) == len(history['role_ids'])
return tokens, input_mask, role_token_ids, turn_ids
The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(args, examples, slot_list, class_types, model_type, tokenizer, max_seq_length, slot_value_dropout=0.0)` to solve the following problem:
Loads a data file into a list of `InputBatch`s.
Here is the function:
def convert_examples_to_features(args, examples, slot_list, class_types, model_type, tokenizer, max_seq_length, slot_value_dropout=0.0):
"""Loads a data file into a list of `InputBatch`s."""
if model_type == 'roberta':
model_specs = {'MODEL_TYPE': 'roberta',
'CLS_TOKEN': '<s>',
'UNK_TOKEN': '<unk>',
'SEP_TOKEN': 2,
'PAD_TOKEN': 1,
'TOKEN_CORRECTION': 6}
elif model_type == 'bert':
model_specs = {'MODEL_TYPE': 'bert',
'CLS_TOKEN': '[CLS]',
'UNK_TOKEN': '[UNK]',
'SEP_TOKEN': 102,
'PAD_TOKEN': 0,
'TOKEN_CORRECTION': 4
}
total_cnt = 0
too_long_cnt = 0
features, refer_list = [], ['none'] + slot_list
session = ''
# Convert single example
for (example_index, example) in enumerate(tqdm(examples)):
# if session != example.guid.split('-')[1]:
# session = example.guid.split('-')[1]
# his = defaultdict(list)
total_cnt += 1
value_dict = {}
inform_dict = {}
inform_slot_dict = {}
refer_id_dict = {}
diag_state_dict = {}
class_label_id_dict = {}
start_pos_dict = {}
end_pos_dict = {}
for slot in slot_list:
tokens_a, token_labels_a = _tokenize_text_and_label(
example.text_a, example.text_a_label, slot, tokenizer, model_specs, slot_value_dropout)
tokens_b, token_labels_b = _tokenize_text_and_label(
example.text_b, example.text_b_label, slot, tokenizer, model_specs, slot_value_dropout)
if not args.his:
tokens_history, token_labels_history = [], []
else:
tokens_history, token_labels_history = _tokenize_text_and_label(
example.history, example.history_label, slot, tokenizer, model_specs, slot_value_dropout)
# input_text_too_long = _truncate_length_and_warn(
# tokens_a, tokens_b, tokens_history, max_seq_length, model_specs, example.guid)
# if input_text_too_long:
# token_labels_a = token_labels_a[:len(tokens_a)]
# token_labels_b = token_labels_b[:len(tokens_b)]
# token_labels_history = token_labels_history[:len(tokens_history)]
assert len(token_labels_a) == len(tokens_a)
assert len(token_labels_b) == len(tokens_b)
# assert len(token_labels_history) == len(tokens_history)
token_label_ids = _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs)
value_dict[slot] = example.values[slot]
inform_dict[slot] = example.inform_label[slot]
class_label_mod, start_pos_dict[slot], end_pos_dict[slot] = get_start_end_pos(
example.class_label[slot], token_label_ids, max_seq_length)
if class_label_mod != example.class_label[slot]:
example.class_label[slot] = class_label_mod
inform_slot_dict[slot] = example.inform_slot_label[slot]
refer_id_dict[slot] = refer_list.index(example.refer_label[slot]) if slot in example.refer_label else 0
diag_state_dict[slot] = class_types.index(example.diag_state[slot])
class_label_id_dict[slot] = class_types.index(example.class_label[slot])
tokens, input_mask, role_token_ids, turn_ids = get_transformer_input(args, tokens_a, tokens_b,
tokens_history, max_seq_length,
tokenizer, model_specs)
# audio_inputs, audio_mask, audio_sep, role_audio_ids audio_a, audio_b, max_audio_length,
# input_ids_unmasked = tokens
features.append(
InputFeatures(guid=example.guid, text_inputs=tokens, text_mask=input_mask, role_token_ids=role_token_ids,
turn_ids=turn_ids, audio_inputs=(example.audio_a, example.audio_b), start_pos=start_pos_dict, end_pos=end_pos_dict,
values=value_dict, inform=inform_dict, inform_slot=inform_slot_dict, refer_id=refer_id_dict,
diag_state=diag_state_dict, class_label_id=class_label_id_dict
))
# print(features[-1].audio_inputs[0].shape)
# if example_index == 3:break
# break
return features | Loads a data file into a list of `InputBatch`s. |
164,601 | import gc
import json
import logging
import os
import textwrap
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from anchor import logger_root
from common import setup_env, mk_parser, AdvantageLogger
from models import build_model_signature, build_tokenizer, build_model
from models.meta_optimizer import AttnOptimWrapper
from tasks import load_task
from utils.logger import setup_logger, tabular_pretty_print
from utils.tools import ensure_folder
def the_shape(pack):
if isinstance(pack, (list, tuple)):
return f"{len(pack)} * {the_shape(pack[0])}"
if isinstance(pack, torch.Tensor):
return pack.size() | null |
164,602 | import gc
import json
import logging
import os
import textwrap
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from anchor import logger_root
from common import setup_env, mk_parser, AdvantageLogger
from models import build_model_signature, build_tokenizer, build_model
from models.meta_optimizer import AttnOptimWrapper
from tasks import load_task
from utils.logger import setup_logger, tabular_pretty_print
from utils.tools import ensure_folder
def do_infer_probs(exemplar_attn_kv, exemplar_attn_mask, batched_choices_input):
batched_choices_logprobs = []
for batched_one_choice_input in batched_choices_input:
batch_input_ids, batch_attention_mask, batch_choice_start, batch_choice_end = batched_one_choice_input
bs = len(batch_input_ids)
merged_attn_mask = torch.cat((exemplar_attn_mask.expand(bs, -1), batch_attention_mask), dim=1)
if args.model_type == "bloom":
# [B*#Heads, Length, Hidden]
def _expand(t, target_size):
_bs, _head, _len, _hidden = 1, *t.size()
return t.reshape(_bs, _head, _len, _hidden).expand(target_size * _bs, -1, -1, -1).reshape(target_size * _bs * _head, _len, _hidden)
expand_exemplar_attn_kv = [[_expand(layer_k, bs), _expand(layer_v, bs)] for layer_k, layer_v in exemplar_attn_kv]
else:
# [B, #Heads, Length, Hidden]
expand_exemplar_attn_kv = [[layer_k.expand((bs, -1, -1, -1)), layer_v.expand((bs, -1, -1, -1))] for layer_k, layer_v in exemplar_attn_kv]
batched_logits = model(
input_ids=batch_input_ids, # [B, L']
attention_mask=merged_attn_mask, # [B, L + L']
past_key_values=expand_exemplar_attn_kv, # num_layers * 2 * [B, num_heads, L, H]
).logits
batched_output = F.log_softmax(batched_logits, dim=-1) # [B, L', Vocab]
batched_one_choice_logprobs = []
for input_ids, choice_start, choice_end, lm_logprobs in zip(batch_input_ids, batch_choice_start, batch_choice_end, batched_output):
choice_tokens = input_ids[choice_start:choice_end].unsqueeze(1) # [L, 1]
choice_logprobs = lm_logprobs[choice_start - 1 : choice_end - 1] # [L, Vocab]
extracted = torch.gather(choice_logprobs, -1, choice_tokens).squeeze(-1)
choice_length = choice_end - choice_start
lm_log_p = torch.sum(extracted).item()
norm_lm_log_p = (lm_log_p / choice_length).item()
choice_lm_info = {"lm_log_p": lm_log_p, "norm_lm_log_p": norm_lm_log_p}
batched_one_choice_logprobs.append(choice_lm_info)
batched_choices_logprobs.append(batched_one_choice_logprobs)
return batched_choices_logprobs | null |
164,603 | import argparse
import os
import random
import numpy as np
import torch
from tasks import task_mapper
from utils.logger import tabular_pretty_print, fmt_float
def setup_seed(SEED):
def setup_gpu(gpu_s):
def setup_env(gpu_s, seed):
os.environ["BITSANDBYTES_NOWELCOME"] = "1"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
setup_gpu(gpu_s)
setup_seed(seed) | null |
164,604 | import argparse
import os
import random
import numpy as np
import torch
from tasks import task_mapper
from utils.logger import tabular_pretty_print, fmt_float
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
task_mapper = {
"qasc": QASCProbInferenceForMC,
"obqa": OBQAProbInferenceForMC,
"sst2": SST2ProbInferenceForMC,
"sst5": SST5ProbInferenceForMC,
"mr": MRProbInferenceForMC,
"agnews": AGNewsProbInferenceForMC,
"trec": TRECProbInferenceForMC,
"hellaswag": HellaSwagProbInferenceForMC,
"copa": COPAProbInferenceForMC,
"winogrande": WinoGrandeProbInferenceForMC,
}
def mk_parser():
psr = argparse.ArgumentParser(add_help=False)
psr.add_argument("--seed", type=int, default=42)
psr.add_argument("--prompt_version", type=str, default="v1")
psr.add_argument("--dataset", type=str, choices=task_mapper.keys())
psr.add_argument("--data_file", type=str)
psr.add_argument("--model_type", type=str, choices=["opt", "gpt2", "e-gpt", "bloom"])
psr.add_argument("--model_size", type=str)
psr.add_argument("--gpus", type=str, default="0")
psr.add_argument("--batch_size", type=int, default=0) # 0 for auto-detect, -1 for FORCE auto-detect
psr.add_argument("--in_8bit", type=str2bool, default=False)
psr.add_argument("--no_console", action="store_true", default=False)
psr.add_argument("--exemplar_method", type=str, default="random", choices=["random", "written", "stratified"])
# if `num_base_shot` is set, `num_k_shot * num_base_shot` is the number of exemplars to be sampled
psr.add_argument("--num_k_shots", type=int, default=1)
psr.add_argument("--kv_iter", type=int, default=1)
psr.add_argument("--step_size", type=float, default=0.01)
psr.add_argument("--momentum", type=float, default=0.9)
return psr | null |
164,605 | import argparse
import os
import random
import numpy as np
import torch
from tasks import task_mapper
from utils.logger import tabular_pretty_print, fmt_float
def mk_parser_openai():
psr = argparse.ArgumentParser(add_help=False)
psr.add_argument("--prompt_version", type=str, default="v1")
psr.add_argument("--dataset", type=str, choices=["numersense", "piqa"])
psr.add_argument("--data_file", type=str)
psr.add_argument("--engine", type=str, choices=["text", "codex"])
psr.add_argument("--batch_size", type=int, default=4)
psr.add_argument("--top_p", type=float, default=1.0)
psr.add_argument("--temperature", type=float, default=1.0)
return psr | null |
164,606 | from transformers import AutoTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM
from anchor import checkpoints_root
def build_model_signature(model_type, model_size):
if model_type == "opt":
# ["125m", "350m", "1.3b", "2.7b", "6.7b", "13b", "30b", "66b"]
return f"facebook/opt-{model_size}"
if model_type == "gpt2":
# ["sm", "medium", "large", "xl"]
if model_size == "sm":
return "gpt2"
return f"gpt2-{model_size}"
if model_type == "e-gpt":
# ["neo-125M", "neo-1.3B", "neo-2.7B", "j-6B", "neox-20b"]
return f"EleutherAI/gpt-{model_size}"
if model_type == "bloom":
# ["560m", "1b1", "1b7", "3b", "7b1"]
return f"bigscience/bloom-{model_size}"
checkpoints_root = Path("huggingface_cache")
def build_tokenizer(model_type, model_size, padding_side="left", use_fast=False):
sign = build_model_signature(model_type, model_size)
if not use_fast:
tok = AutoTokenizer.from_pretrained(sign, padding_side=padding_side, cache_dir=str(checkpoints_root))
else:
tok = PreTrainedTokenizerFast.from_pretrained(sign, padding_side=padding_side, cache_dir=str(checkpoints_root))
if model_type in ["gpt2", "e-gpt"]:
tok.pad_token_id = tok.eos_token_id
tok.pad_token = tok.eos_token
return tok | null |
164,607 | from transformers import AutoTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM
from anchor import checkpoints_root
def build_model_signature(model_type, model_size):
if model_type == "opt":
# ["125m", "350m", "1.3b", "2.7b", "6.7b", "13b", "30b", "66b"]
return f"facebook/opt-{model_size}"
if model_type == "gpt2":
# ["sm", "medium", "large", "xl"]
if model_size == "sm":
return "gpt2"
return f"gpt2-{model_size}"
if model_type == "e-gpt":
# ["neo-125M", "neo-1.3B", "neo-2.7B", "j-6B", "neox-20b"]
return f"EleutherAI/gpt-{model_size}"
if model_type == "bloom":
# ["560m", "1b1", "1b7", "3b", "7b1"]
return f"bigscience/bloom-{model_size}"
checkpoints_root = Path("huggingface_cache")
def build_model(model_type, model_size, in_8bit):
sign = build_model_signature(model_type, model_size)
model = AutoModelForCausalLM.from_pretrained(
sign,
cache_dir=str(checkpoints_root),
device_map="auto",
load_in_8bit=in_8bit,
)
model.eval()
return model | null |
164,608 | import multiprocessing
from pathlib import Path
import json
def yield_chunks(data, size):
data = list(data)
for i in range(0, len(data), size):
yield data[i : i + size] | null |
164,609 | import multiprocessing
from pathlib import Path
import json
def ensure_folder(folder: Path, parents=False):
if not folder.exists():
folder.mkdir(parents=parents) | null |
164,610 | import multiprocessing
from pathlib import Path
import json
def pick_if_present(d: dict, key_in_dict, key_new=None):
if key_in_dict in d:
if not key_new:
return {key_in_dict: d[key_in_dict]}
else:
return {key_new: d[key_in_dict]}
return {} | null |
164,611 | from __future__ import absolute_import, division, unicode_literals
import logging
from pathlib import Path
import logging
import multiprocessing
import threading
def setup_logger(folder_path, log_file_name="logger.log", console_output=False, logger_name="task"):
dir_root = Path(folder_path)
full_path = dir_root.joinpath(log_file_name)
# print("File: ", full_path)
already_exist = Path(full_path).exists()
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s| %(message)s", "%m-%d|%H:%M:%S")
file_hdl = logging.FileHandler(full_path)
file_hdl.setFormatter(formatter)
logger.addHandler(file_hdl)
if console_output:
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(formatter)
logger.addHandler(console_hdl)
logger.info("")
logger.info("-*" * 30)
logger.info("Logger ready")
if already_exist:
logger.info("")
logger.info("")
logger.info(f">>>>> Logger file {full_path} already exist, append to it. <<<<<")
logger.info("")
logger.info("") | null |
164,612 | from __future__ import absolute_import, division, unicode_literals
import logging
from pathlib import Path
import logging
import multiprocessing
import threading
def setup_simple_logger():
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s| %(message)s", "%m-%d|%H:%M:%S")
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(formatter)
root_logger.addHandler(console_hdl) | null |
164,613 | from __future__ import absolute_import, division, unicode_literals
import logging
from pathlib import Path
import logging
import multiprocessing
import threading
def tabular_pretty_print(grid):
lens = [max(map(len, col)) for col in zip(*grid)]
fmt = " | ".join("{{:{}}}".format(x) for x in lens)
table = [fmt.format(*row) for row in grid]
sep = ["~" * len(table[0])]
table = sep + table + sep
res = []
for idx, line in enumerate(table):
if idx == 0 or idx == len(table) - 1:
ps = "* {} *".format(line)
else:
ps = "| {} |".format(line)
res.append(ps)
return res | null |
164,614 | from __future__ import absolute_import, division, unicode_literals
import logging
from pathlib import Path
import logging
import multiprocessing
import threading
def fmt_float(num, d=4):
fmt_string = "{{:.{}f}}".format(d)
return fmt_string.format(num) | null |
164,615 | from __future__ import absolute_import, division, unicode_literals
import logging
from pathlib import Path
import logging
import multiprocessing
import threading
class MultiProcessingHandler(logging.Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcessingHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
self.filters = self.sub_handler.filters
self.queue = multiprocessing.Queue(-1)
self._is_closed = False
# The thread handles receiving records asynchronously.
self._receive_thread = threading.Thread(target=self._receive, name=name)
self._receive_thread.daemon = True
self._receive_thread.start()
def setFormatter(self, fmt):
super(MultiProcessingHandler, self).setFormatter(fmt)
self.sub_handler.setFormatter(fmt)
def _receive(self):
while True:
try:
if self._is_closed and self.queue.empty():
break
record = self.queue.get(timeout=0.2)
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except (EOFError, OSError):
break # The queue was closed by child?
except Empty:
pass # This periodically checks if the logger is closed.
except:
from sys import stderr
from traceback import print_exc
print_exc(file=stderr)
raise
self.queue.close()
self.queue.join_thread()
def _send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe.
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self._send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
if not self._is_closed:
self._is_closed = True
self._receive_thread.join(5.0) # Waits for receive queue to empty.
self.sub_handler.close()
super(MultiProcessingHandler, self).close()
The provided code snippet includes necessary dependencies for implementing the `install_mp_handler` function. Write a Python function `def install_mp_handler(logger=None)` to solve the following problem:
Wraps the handlers in the given Logger with an MultiProcessingHandler. :param logger: whose handlers to wrap. By default, the root logger.
Here is the function:
def install_mp_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcessingHandler("mp-handler-{0}".format(i), sub_handler=orig_handler)
logger.removeHandler(orig_handler)
logger.addHandler(handler) | Wraps the handlers in the given Logger with an MultiProcessingHandler. :param logger: whose handlers to wrap. By default, the root logger. |
164,616 | from __future__ import absolute_import, division, unicode_literals
import logging
from pathlib import Path
import logging
import multiprocessing
import threading
class MultiProcessingHandler(logging.Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcessingHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
self.filters = self.sub_handler.filters
self.queue = multiprocessing.Queue(-1)
self._is_closed = False
# The thread handles receiving records asynchronously.
self._receive_thread = threading.Thread(target=self._receive, name=name)
self._receive_thread.daemon = True
self._receive_thread.start()
def setFormatter(self, fmt):
super(MultiProcessingHandler, self).setFormatter(fmt)
self.sub_handler.setFormatter(fmt)
def _receive(self):
while True:
try:
if self._is_closed and self.queue.empty():
break
record = self.queue.get(timeout=0.2)
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except (EOFError, OSError):
break # The queue was closed by child?
except Empty:
pass # This periodically checks if the logger is closed.
except:
from sys import stderr
from traceback import print_exc
print_exc(file=stderr)
raise
self.queue.close()
self.queue.join_thread()
def _send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe.
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self._send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
if not self._is_closed:
self._is_closed = True
self._receive_thread.join(5.0) # Waits for receive queue to empty.
self.sub_handler.close()
super(MultiProcessingHandler, self).close()
The provided code snippet includes necessary dependencies for implementing the `uninstall_mp_handler` function. Write a Python function `def uninstall_mp_handler(logger=None)` to solve the following problem:
Unwraps the handlers in the given Logger from a MultiProcessingHandler wrapper :param logger: whose handlers to unwrap. By default, the root logger.
Here is the function:
def uninstall_mp_handler(logger=None):
"""Unwraps the handlers in the given Logger from a MultiProcessingHandler wrapper
:param logger: whose handlers to unwrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for handler in logger.handlers:
if isinstance(handler, MultiProcessingHandler):
orig_handler = handler.sub_handler
logger.removeHandler(handler)
logger.addHandler(orig_handler) | Unwraps the handlers in the given Logger from a MultiProcessingHandler wrapper :param logger: whose handlers to unwrap. By default, the root logger. |
164,617 | from transformers import Seq2SeqTrainer, is_torch_tpu_available, EvalPrediction
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import nltk
import datasets
import re
import os
import numpy as np
import torch
import random
from pathlib import Path
import nltk
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
HubStrategy,
IntervalStrategy,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
number_of_arguments,
set_seed,
speed_metrics,
)
import warnings
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
get_full_repo_name,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
)
from transformers.trainer_utils import PredictionOutput,EvalLoopOutput
def fix_buggy_characters(str):
return re.sub("[{}^\\\\`\u2047<]", " ", str)
def replace_punctuation(str):
return str.replace("\"", "").replace("'", "")
def score_string_similarity(str1, str2):
if str1 == str2:
return 3.0 # Better than perfect token match
str1 = fix_buggy_characters(replace_punctuation(str1))
str2 = fix_buggy_characters(replace_punctuation(str2))
if str1 == str2:
return 2.0
if " " in str1 or " " in str2:
str1_split = str1.split(" ")
str2_split = str2.split(" ")
overlap = list(set(str1_split) & set(str2_split))
return len(overlap) / max(len(str1_split), len(str2_split))
else:
if str1 == str2:
return 1.0
else:
return 0.0 | null |
164,618 | import logging
import os
import torch
import copy,random
import sys
import json
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from sklearn.cluster import KMeans
from models.metadeca import T5ForConditionalGeneration as PromptT5
from metrics import compute_metrics
from downstreamdeca.simple_processors import *
from downstreamdeca.l2ptrainer import QuestionAnsweringTrainer
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def preprocess_validation_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_prompt_embedding(model,path):
prompt_embedding = model.state_dict()['encoder.prompt_embeddings.weight']
save_prompt_info = {'encoder.prompt_embeddings.weight':copy.deepcopy(prompt_embedding),'task2id':task2id,'format2id':format2id}
prompt_path = os.path.join(path,'prompt_embedding_info')
torch.save(save_prompt_info,prompt_path)
logger.info(f'Saving prompt embedding information to {prompt_path}')
def preprocess_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_load_diverse_sample(model,trainset):
with torch.no_grad():
device = 'cuda:0'
search_upbound = len(trainset)//4
query_idxs = [None]*30
keys = model.encoder.domain_keys
for idx,item in enumerate(trainset.select(range(search_upbound))):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
result = torch.matmul(query,keys.t())
result = torch.topk(result,5).indices[0].cpu().numpy().tolist()
key_sel = None
for key_idx in result:
if query_idxs[key_idx] is None or len(query_idxs[key_idx])<3:
key_sel = key_idx
break
if key_sel is not None:
if query_idxs[key_sel] is None:
query_idxs[key_sel] = [idx]
else:
query_idxs[key_sel].append(idx)
total_idxs = []
for item in query_idxs:
try:
total_idxs.extend(item[:3])
except:
total_idxs.extend(random.sample(list(range(search_upbound,len(trainset))),3))
total_idxs = list(set(total_idxs))
total_idxs = random.sample(total_idxs,50)
sub_set = trainset.select(total_idxs)
features = []
for idx,item in enumerate(sub_set):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
features.append(query.detach().cpu().numpy())
return sub_set,features
def dataset_name_to_func(dataset_name):
mapping = {
'squad': preprocess_sqaud_batch,
'squad_v2': preprocess_sqaud_batch,
'boolq': preprocess_boolq_batch,
'narrativeqa': preprocess_narrativeqa_batch,
'race': preprocess_race_batch,
'newsqa': preprocess_newsqa_batch,
'quoref': preprocess_sqaud_batch,
'ropes': preprocess_ropes_batch,
'drop': preprocess_drop_batch,
'nqopen': preprocess_sqaud_abstractive_batch,
# 'multirc': preprocess_boolq_batch,
'boolq_np': preprocess_boolq_batch,
'openbookqa': preprocess_openbookqa_batch,
'mctest': preprocess_race_batch,
'social_iqa': preprocess_social_iqa_batch,
'dream': preprocess_dream_batch,
}
return mapping[dataset_name]
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
dataset_name_to_metric = {
'squad': 'metric/squad_v1_local/squad_v1_local.py',
'squad_v2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v2_local/squad_v2_local.py',
'boolq': 'metric/accuracy.py',
'narrativeqa': 'metric/rouge_local/rouge_metric.py',
'race': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'nqopen': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/accuracy.py',
'openbookqa': 'metric/accuracy.py',
'mctest': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
}
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# tokenizer.add_tokens(['[TASK]', '[ABSTRACTIVE]','[QUESTION]','[CONTEXT]','[BOOL]','[EXTRACTIVE]','[MultiChoice]',
# '[OPTIONS]'])
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# task_num = data_args.max_task_num,
# prompt_num = data_args.prompt_number,
# format_num = data_args.qa_task_type_num,
# add_task_prompt = False
)
model.resize_token_embeddings(len(tokenizer))
#reload format specific task-prompt for newly involved task
#format_prompts###task_promptsf
data_args.reload_from_trained_prompt = False#@
data_args.load_from_format_task_id = False#@
### before pretrain come !!!!!!
if data_args.load_from_format_task_id and (data_args.dataset_name not in seed_datasets) and not data_args.reload_from_trained_prompt:
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
format_task_id = task_start_id + task2id[dataset2format[data_args.dataset_name]] * data_args.prompt_number
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = model.state_dict()['encoder.prompt_embeddings.weight'][format_task_id:format_task_id+data_args.prompt_number,:]
logger.info(f'Successfully initialize format {dataset2format[data_args.dataset_name]} task prompt for new task {data_args.dataset_name}, task id {task_id}')
# print(dataset2format[data_args.dataset_name])
# print(data_args.dataset_name)
elif data_args.reload_from_trained_prompt:
assert data_args.trained_prompt_path,'Must specify the path of stored prompt'
prompt_info = torch.load(data_args.trained_prompt_path)
assert prompt_info['task2id'][data_args.dataset_name]==task2id[data_args.dataset_name],f'the task id in trained prompt task id is not matched to the current task id for {data_args.dataset_name}'
assert prompt_info['format2id'].keys()==format2id.keys(),'the format dont match'
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
logger.info('task id range {} {}'.format(task_id,task_id+data_args.prompt_number))
# assert torch.sum(model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] - prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:])==0
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:]
format_id = format2id[dataset2format[data_args.dataset_name]]
model.state_dict()['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :] = prompt_info['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :]
logger.info(
f'Successfully restore task+format prompt for the task {data_args.dataset_name} from {data_args.trained_prompt_path}')
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
if data_args.max_source_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_source_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
all_replay = None
for ds_name in ["squad","wikisql","sst","srl","woz.en"]:
if True:
cur_dataset = ds_name
train_dataloaders[ds_name] = load_from_disk("./oursdeca/{}-train.hf".format(cur_dataset)).select(range(200))
eval_dataloaders[ds_name] = (load_from_disk("./oursdeca/{}-eval.hf".format(cur_dataset)).select(range(200)),load_from_disk("./oursdeca/{}-evalex.hf".format(cur_dataset)).select(range(200)))
for ds_name in ["cnn_dailymail","multinli.in.out","zre"]:
eval_dataloaders[ds_name] = (load_from_disk("./oursdeca/{}-eval.hf".format(ds_name)),load_from_disk("./oursdeca/{}-evalex.hf".format(ds_name)))
pre_tasks = []
pre_general = []
pre_test = []
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
task_sequence = ["squad","wikisql","sst","srl","woz.en"]
# task_sequence = ["woz.en","srl","sst","wikisql","squad"]
need_to_do_dss = []
fileout = open("diana_log.txt",'w')
all_replay = None
all_features = []
all_ids = []
cluster_num=0
for cur_dataset in task_sequence:
cluster_num+=5
pre_tasks.append(cur_dataset)
if cur_dataset==task_sequence[-1]:
pre_tasks.extend(["cnn_dailymail","multinli.in.out","zre"])
data_args.dataset_name = cur_dataset
logger.info("current_dataset:"+cur_dataset)
training_args.do_train = True
training_args.to_eval = False
metric = load_metric("metric/squad_v1_local/squad_v1_local.py")
if all_replay is not None:
fused = datasets.concatenate_datasets([all_replay,train_dataloaders[cur_dataset]])
else:
fused = train_dataloaders[cur_dataset]
training_args.num_train_epochs = 5
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=fused,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
if training_args.local_rank<=0:
save_set,features = save_load_diverse_sample(model,train_dataloaders[cur_dataset])
if all_replay is None:
all_replay = save_set
else:
all_replay = datasets.concatenate_datasets([all_replay,save_set])
if all_features==[]:
all_features=features
else:
all_features.extend(features)
np.save("./all_features.npy",np.array(all_features))
all_replay.save_to_disk("all_replay@{}.hf".format(cur_dataset))
if training_args.local_rank!=-1:
torch.distributed.barrier()
all_replay = load_from_disk("all_replay@{}.hf".format(cur_dataset))
all_ids.extend([task2id[cur_dataset]]*50)
all_features=np.load("./all_features.npy").tolist()
model.encoder.add_negs(all_ids,all_features)
for pre_dataset in pre_tasks:
data_args.dataset_name = pre_dataset
metric = load_metric("metric/squad_v1_local/squad_v1_local.py")
eval_dataset,eval_examples = eval_dataloaders[pre_dataset]
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
torch.cuda.empty_cache()
logger.info("*** Evaluate:{} ***".format(data_args.dataset_name))
max_length, num_beams, ignore_keys_for_eval = None, None, None
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, ignore_keys=ignore_keys_for_eval,metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.local_rank<=0:
try:
print("after_train_",cur_dataset,"_test_",pre_dataset,file=fileout)
print(metrics,file=fileout)
except:
pass
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,619 | import logging
import os
import torch
import copy,random
import sys
import json
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from sklearn.cluster import KMeans
from models.metadecanometa import T5ForConditionalGeneration as PromptT5
from metrics import compute_metrics
from downstreamdeca.simple_processors import *
from downstreamdeca.l2ptrainer import QuestionAnsweringTrainer
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def preprocess_validation_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_prompt_embedding(model,path):
prompt_embedding = model.state_dict()['encoder.prompt_embeddings.weight']
save_prompt_info = {'encoder.prompt_embeddings.weight':copy.deepcopy(prompt_embedding),'task2id':task2id,'format2id':format2id}
prompt_path = os.path.join(path,'prompt_embedding_info')
torch.save(save_prompt_info,prompt_path)
logger.info(f'Saving prompt embedding information to {prompt_path}')
def preprocess_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_load_diverse_sample(model,trainset):
with torch.no_grad():
device = 'cuda:0'
sub_set = trainset.select(range(50))
features = []
for idx,item in enumerate(sub_set):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
features.append(query.detach().cpu().numpy())
return sub_set,features
def dataset_name_to_func(dataset_name):
mapping = {
'squad': preprocess_sqaud_batch,
'squad_v2': preprocess_sqaud_batch,
'boolq': preprocess_boolq_batch,
'narrativeqa': preprocess_narrativeqa_batch,
'race': preprocess_race_batch,
'newsqa': preprocess_newsqa_batch,
'quoref': preprocess_sqaud_batch,
'ropes': preprocess_ropes_batch,
'drop': preprocess_drop_batch,
'nqopen': preprocess_sqaud_abstractive_batch,
# 'multirc': preprocess_boolq_batch,
'boolq_np': preprocess_boolq_batch,
'openbookqa': preprocess_openbookqa_batch,
'mctest': preprocess_race_batch,
'social_iqa': preprocess_social_iqa_batch,
'dream': preprocess_dream_batch,
}
return mapping[dataset_name]
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
dataset_name_to_metric = {
'squad': 'metric/squad_v1_local/squad_v1_local.py',
'squad_v2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v2_local/squad_v2_local.py',
'boolq': 'metric/accuracy.py',
'narrativeqa': 'metric/rouge_local/rouge_metric.py',
'race': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'nqopen': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/accuracy.py',
'openbookqa': 'metric/accuracy.py',
'mctest': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
}
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# tokenizer.add_tokens(['[TASK]', '[ABSTRACTIVE]','[QUESTION]','[CONTEXT]','[BOOL]','[EXTRACTIVE]','[MultiChoice]',
# '[OPTIONS]'])
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# task_num = data_args.max_task_num,
# prompt_num = data_args.prompt_number,
# format_num = data_args.qa_task_type_num,
# add_task_prompt = False
)
model.resize_token_embeddings(len(tokenizer))
#reload format specific task-prompt for newly involved task
#format_prompts###task_promptsf
data_args.reload_from_trained_prompt = False#@
data_args.load_from_format_task_id = False#@
### before pretrain come !!!!!!
if data_args.load_from_format_task_id and (data_args.dataset_name not in seed_datasets) and not data_args.reload_from_trained_prompt:
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
format_task_id = task_start_id + task2id[dataset2format[data_args.dataset_name]] * data_args.prompt_number
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = model.state_dict()['encoder.prompt_embeddings.weight'][format_task_id:format_task_id+data_args.prompt_number,:]
logger.info(f'Successfully initialize format {dataset2format[data_args.dataset_name]} task prompt for new task {data_args.dataset_name}, task id {task_id}')
# print(dataset2format[data_args.dataset_name])
# print(data_args.dataset_name)
elif data_args.reload_from_trained_prompt:
assert data_args.trained_prompt_path,'Must specify the path of stored prompt'
prompt_info = torch.load(data_args.trained_prompt_path)
assert prompt_info['task2id'][data_args.dataset_name]==task2id[data_args.dataset_name],f'the task id in trained prompt task id is not matched to the current task id for {data_args.dataset_name}'
assert prompt_info['format2id'].keys()==format2id.keys(),'the format dont match'
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
logger.info('task id range {} {}'.format(task_id,task_id+data_args.prompt_number))
# assert torch.sum(model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] - prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:])==0
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:]
format_id = format2id[dataset2format[data_args.dataset_name]]
model.state_dict()['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :] = prompt_info['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :]
logger.info(
f'Successfully restore task+format prompt for the task {data_args.dataset_name} from {data_args.trained_prompt_path}')
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
if data_args.max_source_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_source_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
all_replay = None
for ds_name in ["squad","wikisql","sst","srl","woz.en"]:
if True:
cur_dataset = ds_name
train_dataloaders[ds_name] = load_from_disk("./oursdecanometa/{}-train.hf".format(cur_dataset)).select(range(200))
eval_dataloaders[ds_name] = (load_from_disk("./oursdecanometa/{}-eval.hf".format(cur_dataset)).select(range(200)),load_from_disk("./oursdecanometa/{}-evalex.hf".format(cur_dataset)).select(range(200)))
for ds_name in ["cnn_dailymail","multinli.in.out","zre"]:
eval_dataloaders[ds_name] = (load_from_disk("./oursdecanometa/{}-eval.hf".format(ds_name)),load_from_disk("./oursdecanometa/{}-evalex.hf".format(ds_name)))
pre_tasks = []
pre_general = []
pre_test = []
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
task_sequence = ["squad","wikisql","sst","srl","woz.en"]
# task_sequence = ["woz.en","srl","sst","wikisql","squad"]
need_to_do_dss = []
fileout = open("diana_log.txt",'w')
all_replay = None
all_features = []
all_ids = []
cluster_num=0
for cur_dataset in task_sequence:
cluster_num+=5
pre_tasks.append(cur_dataset)
if cur_dataset==task_sequence[-1]:
pre_tasks.extend(["cnn_dailymail","multinli.in.out","zre"])
data_args.dataset_name = cur_dataset
logger.info("current_dataset:"+cur_dataset)
training_args.do_train = True
training_args.to_eval = False
metric = load_metric("metric/squad_v1_local/squad_v1_local.py")
if all_replay is not None:
fused = datasets.concatenate_datasets([all_replay,train_dataloaders[cur_dataset]])
else:
fused = train_dataloaders[cur_dataset]
training_args.num_train_epochs = 5
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=fused,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
if training_args.local_rank<=0:
save_set,features = save_load_diverse_sample(model,train_dataloaders[cur_dataset])
if all_replay is None:
all_replay = save_set
else:
all_replay = datasets.concatenate_datasets([all_replay,save_set])
if all_features==[]:
all_features=features
else:
all_features.extend(features)
np.save("./all_features.npy",np.array(all_features))
all_replay.save_to_disk("all_replay@{}.hf".format(cur_dataset))
if training_args.local_rank!=-1:
torch.distributed.barrier()
all_replay = load_from_disk("all_replay@{}.hf".format(cur_dataset))
all_ids.extend([task2id[cur_dataset]]*50)
all_features=np.load("./all_features.npy").tolist()
model.encoder.add_negs(all_ids,all_features)
for pre_dataset in pre_tasks:
data_args.dataset_name = pre_dataset
metric = load_metric("metric/squad_v1_local/squad_v1_local.py")
eval_dataset,eval_examples = eval_dataloaders[pre_dataset]
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
torch.cuda.empty_cache()
logger.info("*** Evaluate:{} ***".format(data_args.dataset_name))
max_length, num_beams, ignore_keys_for_eval = None, None, None
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, ignore_keys=ignore_keys_for_eval,metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.local_rank<=0:
try:
print("after_train_",cur_dataset,"_test_",pre_dataset,file=fileout)
print(metrics,file=fileout)
except:
pass
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,620 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_sqaud_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,621 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_sqaud_abstractive_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,622 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_boolq_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
question_column, context_column, answer_column = 'question', 'passage', 'answer'
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_boolqa(context, question) for question, context in zip(questions, contexts)]
targets = [str(ans) for ans in answers] #[answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
# print(inputs,targets)
return inputs, targets | null |
164,623 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_boolq_batch_pretrain(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_boolqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0].capitalize() if len(answer["text"]) > 0 else "" for answer in answers]
# print(inputs,targets)
return inputs, targets | null |
164,624 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_narrativeqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = [exp['summary']['text'] for exp in examples['document']]
questions = [exp['text'] for exp in examples['question']]
answers = [ans[0]['text'] for ans in examples['answers']]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = answers #[answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,625 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_narrativeqa_batch_pretrain(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,626 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_drop_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['passage']
questions = examples['question']
answers = examples['answers']
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,627 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_race_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['article']
questions = examples['question']
all_options = examples['options']
answers = examples['answer']
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}; D. {options[3]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3 }
targets = [options[ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
164,628 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_newsqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(context, question) for question, context in zip(questions, contexts)]
# inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,629 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_ropes_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
backgrounds = examples["background"]
situations = examples["situation"]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(" ".join([background, situation]), question) for question, background, situation in zip(questions, backgrounds, situations)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,630 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_openbookqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples['question_stem']
all_options = examples['choices']
answers = examples['answerKey']
options_texts = [f"options: A. {options['text'][0]}; B. {options['text'][1]}; C. {options['text'][2]}; D. {options['text'][3]}" for options in all_options]
inputs = [QAInput.qg_input_multirc("", question, ops) for question, ops in zip(questions, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
targets = [options['text'][ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
164,631 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_social_iqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['article']
questions = examples['question']
all_options = examples['options']
answers = examples['answer']
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2,}
targets = [options[ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
164,632 | import sys
from typing import List, Optional, Tuple
from QAInput import StructuralQAInput as QAInput
def preprocess_dream_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = [" ".join(dialogue) for dialogue in examples['dialogue']]
questions = examples['question']
all_options = examples['choice']
answers = examples['answer']
answer_idxs = [options.index(answer) for answer, options in zip(answers, all_options)]
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
targets = answers
return inputs, targets | null |
164,633 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
import codecs
import nltk
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from nltk.corpus import stopwords
from collections import Counter
import json
def preprocess_function(examples):
preprocess_fn = preprocess_all#dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, "input","output")
model_inputs = tokenizer(inputs, max_length=1024, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=padding, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs | null |
164,634 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
import codecs
import nltk
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from nltk.corpus import stopwords
from collections import Counter
import json
def preprocess_function_test(examples):
preprocess_fn = preprocess_all#dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, "input","output")
model_inputs = tokenizer(inputs, max_length=1024, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=padding, truncation=True)
model_inputs["example_id"] = []
model_inputs["id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(i)
model_inputs["id"].append(i)
model_inputs["labels"] = labels["input_ids"]
return model_inputs | null |
164,635 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
import codecs
import nltk
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from nltk.corpus import stopwords
from collections import Counter
import json
def add_id(example,index):
example.update({'id':index})
return example | null |
164,636 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
import codecs
import nltk
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from nltk.corpus import stopwords
from collections import Counter
import json
def prep(raw_ds,fname):
ds = []
dss = map(lambda x: x["paragraphs"], raw_ds["data"])
for dd in dss:
ds.extend(dd)
print(len(ds))
print(len(raw_ds["data"]))
examples = {"context":[],"question":[],"answer":[]}
fout = open("{}-train.jsonl".format(fname),'w')
for d in ds:
context = d["context"]
#TOKENIZER.encode(d["context"])
for qa in d["qas"]:
question = qa["question"]#TOKENIZER.encode(qa["question"])
raw_answers = qa["answers"]
if len(raw_answers) == 0:
assert qa["is_impossible"]
raw_answers.append({"text": ""})
answers = []
for i, raw_answer in enumerate(raw_answers):
answers.append(raw_answer["text"])
jsonline = json.dumps({"question":question,"context":context,"answer":answers})
print(jsonline,file=fout)
fout.close() | null |
164,637 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
import codecs
import nltk
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from nltk.corpus import stopwords
from collections import Counter
import json
def prep_test(raw_ds,use_answers,fname):
ds = []
dss = map(lambda x: x["paragraphs"], raw_ds["data"])
for dd in dss:
ds.extend(dd)
print(len(ds))
print(len(raw_ds["data"]))
fout = open("{}-eval.jsonl".format(fname),'w')
idx = 0
f_answers = []
use_answers = None
all_json_lines = []
for d in ds:
context = d["context"]
#TOKENIZER.encode(d["context"])
for qa in d["qas"]:
question = qa["question"]#TOKENIZER.encode(qa["question"])
raw_answers = qa["answers"]
f_answers.extend([_["text"] for _ in qa["answers"]])
if True:
if len(raw_answers) == 0:
assert qa["is_impossible"]
raw_answers.append({"text": ""})
answers = []
for i, raw_answer in enumerate(raw_answers):
answers.append(raw_answer["text"])
all_json_lines.append({"question":question,"context":context,"answer":answers,"preid":qa["id"]})
if fname in ["wikisql","woz.en","multinli.in.out"]:
all_json_lines.sort(key=lambda x: x["preid"])
for item in all_json_lines:
jsonline = json.dumps(item)
print(jsonline,file=fout)
fout.close() | null |
164,641 | import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
def normalize_answer(s):
def compute_exact(a_gold, a_pred):
def compute_f1(a_gold, a_pred):
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
qid = qa["id"]
if qid not in preds:
print(f"Missing prediction for {qid}")
continue
a_pred = preds[qid]
gold_answers = [t for t in qa["answers"]["text"] if normalize_answer(t)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if a_pred != "":
exact_scores[qid] = 0
f1_scores[qid] = 0
else:
exact_scores[qid] = 1
f1_scores[qid] = 1
else:
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores | null |
164,653 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
from tqdm import tqdm
import codecs
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from QAInput import StructuralQAInput, SimpleQAInput
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
import copy
from collections import Counter
import json
class SimpleQAInput:
def qg_input(cls, context, question, options=None):
question = question
source_text = f'{question} \\n {context}'
return source_text
def preprocess_plain(
examples,
question_column: str,
context_column: str,
answer_column:str):
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [SimpleQAInput.qg_input(question, context) for question,context in zip(questions,contexts)]
answers = [_[0] for _ in answers]
return inputs,answers | null |
164,654 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
from tqdm import tqdm
import codecs
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from QAInput import StructuralQAInput, SimpleQAInput
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
def preprocess_function(examples):
preprocess_fn = preprocess_proqa#dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, "question","context","answer")
model_inputs = tokenizer(inputs, max_length=1024, padding=False, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=False, truncation=True)
model_inputs["labels"] = labels["input_ids"]
gen_prompt_ids = [-(i+1) for i in range(1520,1520+10)]
format_id = task2format[dataset_name]
format_prompt_id_start = 300
format_prompt_ids = [-(i+1) for i in range(format_prompt_id_start + format_id * 10,
format_prompt_id_start + (format_id + 1) * 10)]
task_id = task2id[dataset_name]
task_prompt_id_start = 0
task_prompt_ids = [- (i + 1) for i in range(task_prompt_id_start + task_id * 20,
task_prompt_id_start + (task_id + 1) * 20)]
domain_prompt_id_start = 20*30
domain_prompt_number = 20
domain_prompt_ids = [- (i + 1) for i in range(domain_prompt_id_start,
domain_prompt_id_start + 20)]*5
input_ids = copy.deepcopy(
[gen_prompt_ids+format_prompt_ids+task_prompt_ids + domain_prompt_ids+input_ids for input_ids in model_inputs['input_ids']])
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [[1] * 140 + attention_mask for attention_mask in
model_inputs['attention_mask']]
return model_inputs
import copy
from collections import Counter
import json
def prep(raw_ds,fname):
if True:
column_names = raw_ds.column_names
global dataset_name
dataset_name = fname
train_dataset = raw_ds.map(
preprocess_function,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
train_dataset = train_dataset.add_column("id",range(len(train_dataset)))
train_dataset.save_to_disk("./ours/{}-train.hf".format(fname)) | null |
164,655 | import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
from tqdm import tqdm
import codecs
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
from QAInput import StructuralQAInput, SimpleQAInput
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
def chuli_example(examples,fname):
cur_dataset = fname
answers_start = []
if fname in ["none"]:
addition_answers = open(fname+"_answers.json",'r')
faddition = json.load(addition_answers)
for item in faddition:
answers_start.append({"text":item,"answer_start":[0]*len(item)})
else:
for item in examples["answer"]:
answers_start.append({"text":item,"answer_start":[0]*len(item)})
print(answers_start[:10])
examples = examples.add_column("answers",answers_start)
return examples
def preprocess_function_valid(examples):
preprocess_fn = preprocess_proqa#dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, "question","context","answer")
model_inputs = tokenizer(inputs, max_length=1024, padding=False, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=False, truncation=True)
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
gen_prompt_ids = [-(i+1) for i in range(1520,1520+10)]
format_id = task2format[dataset_name]
format_prompt_id_start = 300
format_prompt_ids = [-(i+1) for i in range(format_prompt_id_start + format_id * 10,
format_prompt_id_start + (format_id + 1) * 10)]
task_id = task2id[dataset_name]
task_prompt_id_start = 0
task_prompt_ids = [- (i + 1) for i in range(task_prompt_id_start + task_id * 20,
task_prompt_id_start + (task_id + 1) * 20)]
domain_prompt_id_start = 20*30
domain_prompt_number = 20
domain_prompt_ids = [- (i + 1) for i in range(domain_prompt_id_start,
domain_prompt_id_start + 20)]*5
input_ids = copy.deepcopy(
[gen_prompt_ids+format_prompt_ids+task_prompt_ids + domain_prompt_ids+input_ids for input_ids in model_inputs['input_ids']])
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [[1] * 140 + attention_mask for attention_mask in
model_inputs['attention_mask']]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def add_id(example,index):
example.update({'id':index})
return example
import copy
from collections import Counter
import json
def prep_valid(raw_ds,fname):
global dataset_name
dataset_name = fname
eval_examples = copy.deepcopy(raw_ds)
eval_examples = chuli_example(eval_examples,fname)
column_names = raw_ds.column_names
if 'id' not in eval_examples.features.keys():
eval_examples = eval_examples.map(add_id,with_indices=True)
if True:
eval_dataset = raw_ds.map(add_id,with_indices=True)
eval_dataset = eval_dataset.map(
preprocess_function_valid,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on validation dataset",
)
eval_dataset.save_to_disk("./ours/{}-eval.hf".format(fname))
eval_examples.save_to_disk("./ours/{}-evalex.hf".format(fname)) | null |
164,656 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_sqaud_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,657 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_sqaud_abstractive_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,658 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_boolq_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
question_column, context_column, answer_column = 'question', 'passage', 'answer'
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_boolqa(context, question) for question, context in zip(questions, contexts)]
targets = [str(ans) for ans in answers] #[answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
# print(inputs,targets)
return inputs, targets | null |
164,659 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_boolq_batch_pretrain(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_boolqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0].capitalize() if len(answer["text"]) > 0 else "" for answer in answers]
# print(inputs,targets)
return inputs, targets | null |
164,660 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_narrativeqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = [exp['summary']['text'] for exp in examples['document']]
questions = [exp['text'] for exp in examples['question']]
answers = [ans[0]['text'] for ans in examples['answers']]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = answers #[answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,661 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_narrativeqa_batch_pretrain(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,662 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_drop_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['passage']
questions = examples['question']
answers = examples['answers']
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,663 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_race_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['article']
questions = examples['question']
all_options = examples['options']
answers = examples['answer']
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}; D. {options[3]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3 }
targets = [options[ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
164,664 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_newsqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(context, question) for question, context in zip(questions, contexts)]
# inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,665 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_ropes_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
backgrounds = examples["background"]
situations = examples["situation"]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(" ".join([background, situation]), question) for question, background, situation in zip(questions, backgrounds, situations)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
164,666 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_openbookqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples['question_stem']
all_options = examples['choices']
answers = examples['answerKey']
options_texts = [f"options: A. {options['text'][0]}; B. {options['text'][1]}; C. {options['text'][2]}; D. {options['text'][3]}" for options in all_options]
inputs = [QAInput.qg_input_multirc("", question, ops) for question, ops in zip(questions, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
targets = [options['text'][ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
164,667 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_social_iqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['article']
questions = examples['question']
all_options = examples['options']
answers = examples['answer']
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2,}
targets = [options[ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
164,668 | import sys
from typing import List, Optional, Tuple
from QAInput import SimpleQAInput as QAInput
def preprocess_dream_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = [" ".join(dialogue) for dialogue in examples['dialogue']]
questions = examples['question']
all_options = examples['choice']
answers = examples['answer']
answer_idxs = [options.index(answer) for answer, options in zip(answers, all_options)]
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
targets = answers
return inputs, targets | null |
164,669 | import logging
import os
import torch
import copy,random
import sys
import json
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from sklearn.cluster import KMeans
from models.metadecanotask import T5ForConditionalGeneration as PromptT5
from metrics import compute_metrics
from downstreamdeca.simple_processors import *
from downstreamdeca.l2ptrainer import QuestionAnsweringTrainer
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def preprocess_validation_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_prompt_embedding(model,path):
prompt_embedding = model.state_dict()['encoder.prompt_embeddings.weight']
save_prompt_info = {'encoder.prompt_embeddings.weight':copy.deepcopy(prompt_embedding),'task2id':task2id,'format2id':format2id}
prompt_path = os.path.join(path,'prompt_embedding_info')
torch.save(save_prompt_info,prompt_path)
logger.info(f'Saving prompt embedding information to {prompt_path}')
def preprocess_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_load_diverse_sample(model,trainset):
with torch.no_grad():
device = 'cuda:0'
search_upbound = len(trainset)//4
query_idxs = [None]*30
keys = model.encoder.domain_keys
for idx,item in enumerate(trainset.select(range(search_upbound))):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
result = torch.matmul(query,keys.t())
result = torch.topk(result,5).indices[0].cpu().numpy().tolist()
key_sel = None
for key_idx in result:
if query_idxs[key_idx] is None or len(query_idxs[key_idx])<3:
key_sel = key_idx
break
if key_sel is not None:
if query_idxs[key_sel] is None:
query_idxs[key_sel] = [idx]
else:
query_idxs[key_sel].append(idx)
total_idxs = []
for item in query_idxs:
try:
total_idxs.extend(item[:3])
except:
total_idxs.extend(random.sample(list(range(search_upbound,len(trainset))),3))
total_idxs = list(set(total_idxs))
total_idxs = random.sample(total_idxs,50)
sub_set = trainset.select(total_idxs)
features = []
for idx,item in enumerate(sub_set):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
features.append(query.detach().cpu().numpy())
return sub_set,features
def dataset_name_to_func(dataset_name):
mapping = {
'squad': preprocess_sqaud_batch,
'squad_v2': preprocess_sqaud_batch,
'boolq': preprocess_boolq_batch,
'narrativeqa': preprocess_narrativeqa_batch,
'race': preprocess_race_batch,
'newsqa': preprocess_newsqa_batch,
'quoref': preprocess_sqaud_batch,
'ropes': preprocess_ropes_batch,
'drop': preprocess_drop_batch,
'nqopen': preprocess_sqaud_abstractive_batch,
# 'multirc': preprocess_boolq_batch,
'boolq_np': preprocess_boolq_batch,
'openbookqa': preprocess_openbookqa_batch,
'mctest': preprocess_race_batch,
'social_iqa': preprocess_social_iqa_batch,
'dream': preprocess_dream_batch,
}
return mapping[dataset_name]
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
dataset_name_to_metric = {
'squad': 'metric/squad_v1_local/squad_v1_local.py',
'squad_v2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v2_local/squad_v2_local.py',
'boolq': 'metric/accuracy.py',
'narrativeqa': 'metric/rouge_local/rouge_metric.py',
'race': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'nqopen': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/accuracy.py',
'openbookqa': 'metric/accuracy.py',
'mctest': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
}
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# tokenizer.add_tokens(['[TASK]', '[ABSTRACTIVE]','[QUESTION]','[CONTEXT]','[BOOL]','[EXTRACTIVE]','[MultiChoice]',
# '[OPTIONS]'])
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# task_num = data_args.max_task_num,
# prompt_num = data_args.prompt_number,
# format_num = data_args.qa_task_type_num,
# add_task_prompt = False
)
model.resize_token_embeddings(len(tokenizer))
#reload format specific task-prompt for newly involved task
#format_prompts###task_promptsf
data_args.reload_from_trained_prompt = False#@
data_args.load_from_format_task_id = False#@
### before pretrain come !!!!!!
if data_args.load_from_format_task_id and (data_args.dataset_name not in seed_datasets) and not data_args.reload_from_trained_prompt:
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
format_task_id = task_start_id + task2id[dataset2format[data_args.dataset_name]] * data_args.prompt_number
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = model.state_dict()['encoder.prompt_embeddings.weight'][format_task_id:format_task_id+data_args.prompt_number,:]
logger.info(f'Successfully initialize format {dataset2format[data_args.dataset_name]} task prompt for new task {data_args.dataset_name}, task id {task_id}')
# print(dataset2format[data_args.dataset_name])
# print(data_args.dataset_name)
elif data_args.reload_from_trained_prompt:
assert data_args.trained_prompt_path,'Must specify the path of stored prompt'
prompt_info = torch.load(data_args.trained_prompt_path)
assert prompt_info['task2id'][data_args.dataset_name]==task2id[data_args.dataset_name],f'the task id in trained prompt task id is not matched to the current task id for {data_args.dataset_name}'
assert prompt_info['format2id'].keys()==format2id.keys(),'the format dont match'
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
logger.info('task id range {} {}'.format(task_id,task_id+data_args.prompt_number))
# assert torch.sum(model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] - prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:])==0
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:]
format_id = format2id[dataset2format[data_args.dataset_name]]
model.state_dict()['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :] = prompt_info['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :]
logger.info(
f'Successfully restore task+format prompt for the task {data_args.dataset_name} from {data_args.trained_prompt_path}')
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
if data_args.max_source_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_source_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
all_replay = None
for ds_name in ["squad","wikisql","sst","srl","woz.en"]:
if True:
cur_dataset = ds_name
train_dataloaders[ds_name] = load_from_disk("./oursdecanotask/{}-train.hf".format(cur_dataset)).select(range(200))
eval_dataloaders[ds_name] = (load_from_disk("./oursdecanotask/{}-eval.hf".format(cur_dataset)).select(range(200)),load_from_disk("./oursdecanotask/{}-evalex.hf".format(cur_dataset)).select(range(200)))
for ds_name in ["cnn_dailymail","multinli.in.out","zre"]:
eval_dataloaders[ds_name] = (load_from_disk("./oursdecanotask/{}-eval.hf".format(ds_name)),load_from_disk("./oursdecanotask/{}-evalex.hf".format(ds_name)))
pre_tasks = []
pre_general = []
pre_test = []
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
task_sequence = ["squad","wikisql","sst","srl","woz.en"]
# task_sequence = ["woz.en","srl","sst","wikisql","squad"]
need_to_do_dss = []
fileout = open("diana_log.txt",'w')
all_replay = None
all_features = []
all_ids = []
cluster_num=0
for cur_dataset in task_sequence:
cluster_num+=5
pre_tasks.append(cur_dataset)
if cur_dataset==task_sequence[-1]:
pre_tasks.extend(["cnn_dailymail","multinli.in.out","zre"])
data_args.dataset_name = cur_dataset
logger.info("current_dataset:"+cur_dataset)
training_args.do_train = True
training_args.to_eval = False
metric = load_metric("metric/squad_v1_local/squad_v1_local.py")
if all_replay is not None:
fused = datasets.concatenate_datasets([all_replay,train_dataloaders[cur_dataset]])
else:
fused = train_dataloaders[cur_dataset]
training_args.num_train_epochs = 5
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=fused,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
if training_args.local_rank<=0:
save_set,features = save_load_diverse_sample(model,train_dataloaders[cur_dataset])
if all_replay is None:
all_replay = save_set
else:
all_replay = datasets.concatenate_datasets([all_replay,save_set])
if all_features==[]:
all_features=features
else:
all_features.extend(features)
np.save("./all_features.npy",np.array(all_features))
all_replay.save_to_disk("all_replay@{}.hf".format(cur_dataset))
if training_args.local_rank!=-1:
torch.distributed.barrier()
all_replay = load_from_disk("all_replay@{}.hf".format(cur_dataset))
all_ids.extend([task2id[cur_dataset]]*50)
all_features=np.load("./all_features.npy").tolist()
model.encoder.add_negs(all_ids,all_features)
for pre_dataset in pre_tasks:
data_args.dataset_name = pre_dataset
metric = load_metric("metric/squad_v1_local/squad_v1_local.py")
eval_dataset,eval_examples = eval_dataloaders[pre_dataset]
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
torch.cuda.empty_cache()
logger.info("*** Evaluate:{} ***".format(data_args.dataset_name))
max_length, num_beams, ignore_keys_for_eval = None, None, None
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, ignore_keys=ignore_keys_for_eval,metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.local_rank<=0:
try:
print("after_train_",cur_dataset,"_test_",pre_dataset,file=fileout)
print(metrics,file=fileout)
except:
pass
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,670 | import logging
import os
import torch
import copy,random
import sys
import json
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from models.nopt5 import T5ForConditionalGeneration as PromptT5
from downstream.dataset_processors import *
from downstream.trainer import QuestionAnsweringTrainer
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def preprocess_valid_function(examples):
preprocess_fn = preprocess_proqa#dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, "question","context","answer")
model_inputs = tokenizer(inputs, max_length=1024, padding=False, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=False, truncation=True)
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
gen_prompt_ids = [-(i+1) for i in range(1520,1520+20)]
format_id = task2format[dataset_name]
format_prompt_id_start = 500
format_prompt_ids = [-(i+1) for i in range(format_prompt_id_start + format_id * 40,
format_prompt_id_start + (format_id + 1) * 40)]
task_id = task2id[dataset_name]
task_prompt_id_start = 0
task_prompt_ids = [- (i + 1) for i in range(task_prompt_id_start + task_id * 40,
task_prompt_id_start + (task_id + 1) * 40)]
domain_prompt_id_start = 800
domain_prompt_number = 20
domain_prompt_ids = [- (i + 1) for i in range(domain_prompt_id_start,
domain_prompt_id_start + 20)]*5
input_ids = copy.deepcopy(
[gen_prompt_ids+format_prompt_ids+task_prompt_ids + domain_prompt_ids+input_ids for input_ids in model_inputs['input_ids']])
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [[1] * 200 + attention_mask for attention_mask in
model_inputs['attention_mask']]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
def save_prompt_embedding(model,path):
prompt_embedding = model.state_dict()['encoder.prompt_embeddings.weight']
save_prompt_info = {'encoder.prompt_embeddings.weight':copy.deepcopy(prompt_embedding),'task2id':task2id,'format2id':format2id}
prompt_path = os.path.join(path,'prompt_embedding_info')
torch.save(save_prompt_info,prompt_path)
logger.info(f'Saving prompt embedding information to {prompt_path}')
def preprocess_function(examples):
preprocess_fn = preprocess_proqa#dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, "question","context","answer")
model_inputs = tokenizer(inputs, max_length=1024, padding=False, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=False, truncation=True)
model_inputs["labels"] = labels["input_ids"]
gen_prompt_ids = [-(i+1) for i in range(1520,1520+20)]
format_id = task2format[dataset_name]
format_prompt_id_start = 500
format_prompt_ids = [-(i+1) for i in range(format_prompt_id_start + format_id * 40,
format_prompt_id_start + (format_id + 1) * 40)]
task_id = task2id[dataset_name]
task_prompt_id_start = 0
task_prompt_ids = [- (i + 1) for i in range(task_prompt_id_start + task_id * 40,
task_prompt_id_start + (task_id + 1) * 40)]
domain_prompt_id_start = 800
domain_prompt_number = 20
domain_prompt_ids = [- (i + 1) for i in range(domain_prompt_id_start,
domain_prompt_id_start + 20)]*5
input_ids = copy.deepcopy(
[gen_prompt_ids+format_prompt_ids+task_prompt_ids + domain_prompt_ids+input_ids for input_ids in model_inputs['input_ids']])
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [[1] * 200 + attention_mask for attention_mask in
model_inputs['attention_mask']]
return model_inputs
def dataset_name_to_func(dataset_name):
mapping = {
'squad': preprocess_sqaud_batch,
'squad_v2': preprocess_sqaud_batch,
'boolq': preprocess_boolq_batch,
'narrativeqa': preprocess_narrativeqa_batch,
'race': preprocess_race_batch,
'newsqa': preprocess_newsqa_batch,
'quoref': preprocess_sqaud_batch,
'ropes': preprocess_ropes_batch,
'drop': preprocess_drop_batch,
'nqopen': preprocess_sqaud_abstractive_batch,
# 'multirc': preprocess_boolq_batch,
'boolq_np': preprocess_boolq_batch,
'openbookqa': preprocess_openbookqa_batch,
'mctest': preprocess_race_batch,
'social_iqa': preprocess_social_iqa_batch,
'dream': preprocess_dream_batch,
}
return mapping[dataset_name]
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if 'same' in model_args.model_name_or_path:
task2id = {'squad': 0, 'extractive': 0, 'narrativeqa': 1, 'abstractive': 1, 'race': 2, 'multichoice': 2,
'boolq': 3, 'bool': 3, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
else:
task2id = {'squad': 0, 'extractive': 1, 'narrativeqa': 2, 'abstractive': 3, 'race': 4, 'multichoice': 5,
'boolq': 6, 'bool': 7, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
dataset_name_to_metric = {
'squad': 'squad',
'squad_v2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v2_local/squad_v2_local.py',
'boolq': 'accuracy',
'narrativeqa': 'rouge',
'race': 'accuracy',
'quoref': 'squad',
'ropes': 'squad',
'drop': 'squad',
'nqopen': 'squad',
# 'multirc': 'accuracy',
'boolq_np': 'accuracy',
'openbookqa': 'accuracy',
'mctest': 'accuracy',
'social_iqa': 'accuracy',
'dream': 'accuracy',
}
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# tokenizer.add_tokens(['[TASK]', '[ABSTRACTIVE]','[QUESTION]','[CONTEXT]','[BOOL]','[EXTRACTIVE]','[MultiChoice]',
# '[OPTIONS]'])
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# task_num = data_args.max_task_num,
# prompt_num = data_args.prompt_number,
# format_num = data_args.qa_task_type_num,
# add_task_prompt = False
)
model.resize_token_embeddings(len(tokenizer))
#reload format specific task-prompt for newly involved task
#format_prompts###task_promptsf
data_args.reload_from_trained_prompt = False#@
data_args.load_from_format_task_id = False#@
### before pretrain come !!!!!!
if data_args.load_from_format_task_id and (data_args.dataset_name not in seed_datasets) and not data_args.reload_from_trained_prompt:
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
format_task_id = task_start_id + task2id[dataset2format[data_args.dataset_name]] * data_args.prompt_number
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = model.state_dict()['encoder.prompt_embeddings.weight'][format_task_id:format_task_id+data_args.prompt_number,:]
logger.info(f'Successfully initialize format {dataset2format[data_args.dataset_name]} task prompt for new task {data_args.dataset_name}, task id {task_id}')
# print(dataset2format[data_args.dataset_name])
# print(data_args.dataset_name)
elif data_args.reload_from_trained_prompt:
assert data_args.trained_prompt_path,'Must specify the path of stored prompt'
prompt_info = torch.load(data_args.trained_prompt_path)
assert prompt_info['task2id'][data_args.dataset_name]==task2id[data_args.dataset_name],f'the task id in trained prompt task id is not matched to the current task id for {data_args.dataset_name}'
assert prompt_info['format2id'].keys()==format2id.keys(),'the format dont match'
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
logger.info('task id range {} {}'.format(task_id,task_id+data_args.prompt_number))
# assert torch.sum(model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] - prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:])==0
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:]
format_id = format2id[dataset2format[data_args.dataset_name]]
model.state_dict()['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :] = prompt_info['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :]
logger.info(
f'Successfully restore task+format prompt for the task {data_args.dataset_name} from {data_args.trained_prompt_path}')
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
if data_args.max_source_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_source_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
for ds_name in task2id.keys():
if (not ds_name in ["extractive","abstractive","multichoice","bool","boolq","boolq_np","ropes"]):
# data_args.dataset_name = cur_dataset
cur_dataset = ds_name
data_args.dataset_name = cur_dataset
if True:
# Downloading and loading a dataset from the hub.
if not data_args.dataset_name in ['newsqa', 'nqopen', 'mctest', 'social_iqa']:
if data_args.dataset_name == "race":
data_args.dataset_config_name = "all"
elif data_args.dataset_name == "openbookqa":
data_args.dataset_config_name = "main"
elif data_args.dataset_name == "dream":
data_args.dataset_config_name = "plain_text"
else:
data_args.dataset_config_name = "plain_text"
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
if data_args.dataset_name in ['ropes']:
# add answer_start (not used for squad evaluation but required)
def add_answer_start(example):
example['answers'].update({"answer_start": [0]})
return example
raw_datasets = raw_datasets.map(add_answer_start)
elif data_args.dataset_name in ['drop']:
# add answer_start (not used for squad evaluation but required)
# add answers (for squad evaluation)
def add_answers(example):
answers = []
answer_start = []
for _a in example['answers_spans']['spans']:
answers.append(_a)
answer_start.append(-1)
example['answers'] = {"text": answers, "answer_start": answer_start}
return example
raw_datasets = raw_datasets.map(add_answers)
column_names = raw_datasets["validation"].column_names
else:
data_files = {}
basic_file = "../data_process/data/"+data_args.dataset_name+"/"
data_files["train"] = basic_file+"train.json"
# extension = data_args.train_file.split(".")[-1]
data_files["validation"] = basic_file+"dev.json"
# extension = data_args.validation_file.split(".")[-1]
if data_args.dataset_name in ['newsqa', 'nqopen', 'multirc', 'boolq_np', 'mctest', 'social_iqa']:
raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
else:
print(f"Unknown dataset {data_args.dataset_name}")
raise NotImplementedError
column_names = raw_datasets["validation"].column_names
metric = load_metric(dataset_name_to_metric[data_args.dataset_name])
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if True:
all_num = list(range(0, len(train_dataset)))
random.shuffle(all_num)
selected_indices = all_num[:100]
replay_dataset = train_dataset.select(selected_indices)
# train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
replay_dataset = replay_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on replay dataset",
)
replay_dataloaders[ds_name] = replay_dataset
# replay_dataset = load_from_disk("./processed/{}-replay.hf".format(ds_name))
# print(replay_dataset)
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
# print(train_dataset)
train_dataloaders[ds_name] = train_dataset
train_dataset.save_to_disk("./ours/{}-train.hf".format(ds_name))
# train_dataset = load_from_disk("./processed/{}-train.hf".format(ds_name))
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
def add_id(example,index):
example.update({'id':index})
return example
if 'id' not in eval_examples.features.keys():
eval_examples = eval_examples.map(add_id,with_indices=True)
if data_args.max_eval_samples is not None:
eval_examples = eval_examples.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_examples.map(
preprocess_validation_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on validation dataset",
)
eval_dataloaders[ds_name] = (eval_dataset,eval_examples)
eval_dataset.save_to_disk("./ours/{}-eval.hf".format(ds_name))
eval_examples.save_to_disk("./ours/{}-evalex.hf".format(ds_name))
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,672 | import logging
import os
import torch
import copy,random
import sys
import json
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from sklearn.cluster import KMeans
from models.metat5 import T5ForConditionalGeneration as PromptT5
from downstream.dataset_processors import *
from downstream.l2ptrainer import QuestionAnsweringTrainer
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def preprocess_validation_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
if True:
pass
# logger.info(f'Loading task {data_args.dataset_name} prompt')
# format_id = format2id[dataset2format[data_args.dataset_name]]
# task_id = task2id[data_args.dataset_name]
# format_prompt_ids = [- (i + 1) for i in range(format_id * data_args.prompt_number, (
# format_id + 1) * data_args.prompt_number)] # list(range(-(format_id * data_args.prompt_number+1), -((format_id + 1) * data_args.prompt_number+1)))
# task_prompt_id_start = len(format2id.keys()) * data_args.prompt_number
# logger.info('Prompt ids {}: {}'.format(task_prompt_id_start + task_id * data_args.prompt_number,
# task_prompt_id_start + (task_id + 1) * data_args.prompt_number))
# task_prompt_ids = [- (i + 1) for i in range(task_prompt_id_start + task_id * data_args.prompt_number,
# task_prompt_id_start + (task_id + 1) * data_args.prompt_number)]
# input_ids = copy.deepcopy(
# [format_prompt_ids + task_prompt_ids + input_ids for input_ids in model_inputs['input_ids']])
#input_ids = copy.deepcopy([format_prompt_ids + input_ids for input_ids in model_inputs['input_ids']])
# model_inputs['input_ids'] = input_ids
# model_inputs['attention_mask'] = [[1] * data_args.prompt_number * 2 + attention_mask for attention_mask in
# model_inputs['attention_mask']]
# input_ids = copy.deepcopy([input])
return model_inputs
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
def save_prompt_embedding(model,path):
prompt_embedding = model.state_dict()['encoder.prompt_embeddings.weight']
save_prompt_info = {'encoder.prompt_embeddings.weight':copy.deepcopy(prompt_embedding),'task2id':task2id,'format2id':format2id}
prompt_path = os.path.join(path,'prompt_embedding_info')
torch.save(save_prompt_info,prompt_path)
logger.info(f'Saving prompt embedding information to {prompt_path}')
def preprocess_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_load_diverse_sample(model,trainset):
with torch.no_grad():
device = 'cuda:0'
search_upbound = len(trainset)//4
query_idxs = [None]*30
keys = model.encoder.domain_keys
for idx,item in enumerate(trainset.select(range(search_upbound))):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
result = torch.matmul(query,keys.t())
result = torch.topk(result,5).indices[0].cpu().numpy().tolist()
key_sel = None
for key_idx in result:
if query_idxs[key_idx] is None or len(query_idxs[key_idx])<3:
key_sel = key_idx
break
if key_sel is not None:
if query_idxs[key_sel] is None:
query_idxs[key_sel] = [idx]
else:
query_idxs[key_sel].append(idx)
total_idxs = []
for item in query_idxs:
try:
total_idxs.extend(item[:3])
except:
total_idxs.extend(random.sample(list(range(search_upbound,len(trainset))),3))
total_idxs = list(set(total_idxs))
total_idxs = random.sample(total_idxs,50)
sub_set = trainset.select(total_idxs)
features = []
for idx,item in enumerate(sub_set):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
features.append(query.detach().cpu().numpy())
return sub_set,features
def dataset_name_to_func(dataset_name):
mapping = {
'squad': preprocess_sqaud_batch,
'squad_v2': preprocess_sqaud_batch,
'boolq': preprocess_boolq_batch,
'narrativeqa': preprocess_narrativeqa_batch,
'race': preprocess_race_batch,
'newsqa': preprocess_newsqa_batch,
'quoref': preprocess_sqaud_batch,
'ropes': preprocess_ropes_batch,
'drop': preprocess_drop_batch,
'nqopen': preprocess_sqaud_abstractive_batch,
# 'multirc': preprocess_boolq_batch,
'boolq_np': preprocess_boolq_batch,
'openbookqa': preprocess_openbookqa_batch,
'mctest': preprocess_race_batch,
'social_iqa': preprocess_social_iqa_batch,
'dream': preprocess_dream_batch,
}
return mapping[dataset_name]
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
dataset_name_to_metric = {
'squad': 'metric/squad_v1_local/squad_v1_local.py',
'squad_v2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v2_local/squad_v2_local.py',
'boolq': 'metric/accuracy.py',
'narrativeqa': 'metric/rouge_local/rouge_metric.py',
'race': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'nqopen': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/accuracy.py',
'openbookqa': 'metric/accuracy.py',
'mctest': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
}
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# tokenizer.add_tokens(['[TASK]', '[ABSTRACTIVE]','[QUESTION]','[CONTEXT]','[BOOL]','[EXTRACTIVE]','[MultiChoice]',
# '[OPTIONS]'])
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# task_num = data_args.max_task_num,
# prompt_num = data_args.prompt_number,
# format_num = data_args.qa_task_type_num,
# add_task_prompt = False
)
model.resize_token_embeddings(len(tokenizer))
#reload format specific task-prompt for newly involved task
#format_prompts###task_promptsf
data_args.reload_from_trained_prompt = False#@
data_args.load_from_format_task_id = False#@
### before pretrain come !!!!!!
if data_args.load_from_format_task_id and (data_args.dataset_name not in seed_datasets) and not data_args.reload_from_trained_prompt:
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
format_task_id = task_start_id + task2id[dataset2format[data_args.dataset_name]] * data_args.prompt_number
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = model.state_dict()['encoder.prompt_embeddings.weight'][format_task_id:format_task_id+data_args.prompt_number,:]
logger.info(f'Successfully initialize format {dataset2format[data_args.dataset_name]} task prompt for new task {data_args.dataset_name}, task id {task_id}')
# print(dataset2format[data_args.dataset_name])
# print(data_args.dataset_name)
elif data_args.reload_from_trained_prompt:
assert data_args.trained_prompt_path,'Must specify the path of stored prompt'
prompt_info = torch.load(data_args.trained_prompt_path)
assert prompt_info['task2id'][data_args.dataset_name]==task2id[data_args.dataset_name],f'the task id in trained prompt task id is not matched to the current task id for {data_args.dataset_name}'
assert prompt_info['format2id'].keys()==format2id.keys(),'the format dont match'
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
logger.info('task id range {} {}'.format(task_id,task_id+data_args.prompt_number))
# assert torch.sum(model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] - prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:])==0
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:]
format_id = format2id[dataset2format[data_args.dataset_name]]
model.state_dict()['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :] = prompt_info['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :]
logger.info(
f'Successfully restore task+format prompt for the task {data_args.dataset_name} from {data_args.trained_prompt_path}')
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
if data_args.max_source_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_source_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
all_replay = None
for ds_name in ['squad','narrativeqa','race','newsqa','quoref','drop','nqopen','openbookqa','mctest','social_iqa','dream']:
eval_dataloaders[ds_name] = (load_from_disk("./oursfinallong/{}-eval.hf".format(ds_name)),load_from_disk("./oursfinallong/{}-evalex.hf".format(ds_name)))
pre_tasks = []
pre_general = []
pre_test = []
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
task_sequence = ['squad','newsqa','narrativeqa','nqopen','race','openbookqa','mctest','social_iqa']
# task_sequence = ["woz.en","srl","sst","wikisql","squad"]
fileout = open("diana_log.txt",'w')
all_replay = None
all_features = []
all_ids = []
cluster_num=0
for cur_dataset in task_sequence:
p=200
if p>len(load_from_disk("./oursfinallong/{}-train.hf".format(cur_dataset))):
p = len(load_from_disk("./oursfinallong/{}-train.hf".format(cur_dataset)))
trainds = load_from_disk("./oursfinallong/{}-train.hf".format(cur_dataset))
cluster_num+=5
pre_tasks.append(cur_dataset)
if cur_dataset==task_sequence[-1]:
pre_tasks.extend(["drop","quoref","dream"])
data_args.dataset_name = cur_dataset
logger.info("current_dataset:"+cur_dataset)
training_args.do_train = True
training_args.to_eval = False
metric = load_metric(dataset_name_to_metric[cur_dataset])
if all_replay is not None:
fused = datasets.concatenate_datasets([all_replay,trainds])
else:
fused = trainds
training_args.num_train_epochs = 5
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=fused,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
if training_args.local_rank<=0:
save_set,features = save_load_diverse_sample(model,trainds)
if all_replay is None:
all_replay = save_set
else:
all_replay = datasets.concatenate_datasets([all_replay,save_set])
if all_features==[]:
all_features=features
else:
all_features.extend(features)
np.save("./all_features.npy",np.array(all_features))
all_replay.save_to_disk("all_replay@{}.hf".format(cur_dataset))
if training_args.local_rank!=-1:
torch.distributed.barrier()
all_replay = load_from_disk("all_replay@{}.hf".format(cur_dataset))
all_ids.extend([task2id[cur_dataset]]*50)
all_features=np.load("./all_features.npy").tolist()
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=all_replay,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
model.encoder.add_negs(all_ids,all_features)
for pre_dataset in pre_tasks:
data_args.dataset_name = pre_dataset
metric = load_metric(dataset_name_to_metric[pre_dataset])
eval_dataset,eval_examples = eval_dataloaders[pre_dataset]
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
torch.cuda.empty_cache()
logger.info("*** Evaluate:{} ***".format(data_args.dataset_name))
max_length, num_beams, ignore_keys_for_eval = None, None, None
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, ignore_keys=ignore_keys_for_eval,metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.local_rank<=0:
try:
print("after_train_",cur_dataset,"_test_",pre_dataset,file=fileout)
print(metrics,file=fileout)
except:
pass
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,673 | import logging
import os
import torch
import copy,random
import sys
import json
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from sklearn.cluster import KMeans
from models.metat5nometa import T5ForConditionalGeneration as PromptT5
from downstream.dataset_processors import *
from downstream.l2ptrainer import QuestionAnsweringTrainer
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def preprocess_validation_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
if True:
pass
# logger.info(f'Loading task {data_args.dataset_name} prompt')
# format_id = format2id[dataset2format[data_args.dataset_name]]
# task_id = task2id[data_args.dataset_name]
# format_prompt_ids = [- (i + 1) for i in range(format_id * data_args.prompt_number, (
# format_id + 1) * data_args.prompt_number)] # list(range(-(format_id * data_args.prompt_number+1), -((format_id + 1) * data_args.prompt_number+1)))
# task_prompt_id_start = len(format2id.keys()) * data_args.prompt_number
# logger.info('Prompt ids {}: {}'.format(task_prompt_id_start + task_id * data_args.prompt_number,
# task_prompt_id_start + (task_id + 1) * data_args.prompt_number))
# task_prompt_ids = [- (i + 1) for i in range(task_prompt_id_start + task_id * data_args.prompt_number,
# task_prompt_id_start + (task_id + 1) * data_args.prompt_number)]
# input_ids = copy.deepcopy(
# [format_prompt_ids + task_prompt_ids + input_ids for input_ids in model_inputs['input_ids']])
#input_ids = copy.deepcopy([format_prompt_ids + input_ids for input_ids in model_inputs['input_ids']])
# model_inputs['input_ids'] = input_ids
# model_inputs['attention_mask'] = [[1] * data_args.prompt_number * 2 + attention_mask for attention_mask in
# model_inputs['attention_mask']]
# input_ids = copy.deepcopy([input])
return model_inputs
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
def save_prompt_embedding(model,path):
prompt_embedding = model.state_dict()['encoder.prompt_embeddings.weight']
save_prompt_info = {'encoder.prompt_embeddings.weight':copy.deepcopy(prompt_embedding),'task2id':task2id,'format2id':format2id}
prompt_path = os.path.join(path,'prompt_embedding_info')
torch.save(save_prompt_info,prompt_path)
logger.info(f'Saving prompt embedding information to {prompt_path}')
def preprocess_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_load_diverse_sample(model,trainset):
features = []
device='cuda:0'
with torch.no_grad():
sub_set = trainset.select(range(50))
for idx,item in enumerate(sub_set):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
features.append(query.detach().cpu().numpy())
return sub_set,features
def dataset_name_to_func(dataset_name):
mapping = {
'squad': preprocess_sqaud_batch,
'squad_v2': preprocess_sqaud_batch,
'boolq': preprocess_boolq_batch,
'narrativeqa': preprocess_narrativeqa_batch,
'race': preprocess_race_batch,
'newsqa': preprocess_newsqa_batch,
'quoref': preprocess_sqaud_batch,
'ropes': preprocess_ropes_batch,
'drop': preprocess_drop_batch,
'nqopen': preprocess_sqaud_abstractive_batch,
# 'multirc': preprocess_boolq_batch,
'boolq_np': preprocess_boolq_batch,
'openbookqa': preprocess_openbookqa_batch,
'mctest': preprocess_race_batch,
'social_iqa': preprocess_social_iqa_batch,
'dream': preprocess_dream_batch,
}
return mapping[dataset_name]
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
dataset_name_to_metric = {
'squad': 'metric/squad_v1_local/squad_v1_local.py',
'squad_v2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v2_local/squad_v2_local.py',
'boolq': 'metric/accuracy.py',
'narrativeqa': 'metric/rouge_local/rouge_metric.py',
'race': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'nqopen': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/accuracy.py',
'openbookqa': 'metric/accuracy.py',
'mctest': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
}
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# tokenizer.add_tokens(['[TASK]', '[ABSTRACTIVE]','[QUESTION]','[CONTEXT]','[BOOL]','[EXTRACTIVE]','[MultiChoice]',
# '[OPTIONS]'])
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# task_num = data_args.max_task_num,
# prompt_num = data_args.prompt_number,
# format_num = data_args.qa_task_type_num,
# add_task_prompt = False
)
model.resize_token_embeddings(len(tokenizer))
#reload format specific task-prompt for newly involved task
#format_prompts###task_promptsf
data_args.reload_from_trained_prompt = False#@
data_args.load_from_format_task_id = False#@
### before pretrain come !!!!!!
if data_args.load_from_format_task_id and (data_args.dataset_name not in seed_datasets) and not data_args.reload_from_trained_prompt:
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
format_task_id = task_start_id + task2id[dataset2format[data_args.dataset_name]] * data_args.prompt_number
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = model.state_dict()['encoder.prompt_embeddings.weight'][format_task_id:format_task_id+data_args.prompt_number,:]
logger.info(f'Successfully initialize format {dataset2format[data_args.dataset_name]} task prompt for new task {data_args.dataset_name}, task id {task_id}')
# print(dataset2format[data_args.dataset_name])
# print(data_args.dataset_name)
elif data_args.reload_from_trained_prompt:
assert data_args.trained_prompt_path,'Must specify the path of stored prompt'
prompt_info = torch.load(data_args.trained_prompt_path)
assert prompt_info['task2id'][data_args.dataset_name]==task2id[data_args.dataset_name],f'the task id in trained prompt task id is not matched to the current task id for {data_args.dataset_name}'
assert prompt_info['format2id'].keys()==format2id.keys(),'the format dont match'
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
logger.info('task id range {} {}'.format(task_id,task_id+data_args.prompt_number))
# assert torch.sum(model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] - prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:])==0
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:]
format_id = format2id[dataset2format[data_args.dataset_name]]
model.state_dict()['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :] = prompt_info['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :]
logger.info(
f'Successfully restore task+format prompt for the task {data_args.dataset_name} from {data_args.trained_prompt_path}')
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
if data_args.max_source_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_source_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
all_replay = None
for ds_name in ['squad','narrativeqa','race','newsqa','quoref','drop','nqopen','openbookqa','mctest','social_iqa','dream']:
eval_dataloaders[ds_name] = (load_from_disk("./oursnometa/{}-eval.hf".format(ds_name)),load_from_disk("./oursnometa/{}-evalex.hf".format(ds_name)))
pre_tasks = []
pre_general = []
pre_test = []
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
task_sequence = ['squad','newsqa','narrativeqa','nqopen','race','openbookqa','mctest','social_iqa']
fileout = open("diana_log.txt",'w')
all_replay = None
all_features = []
all_ids = []
cluster_num=0
for cur_dataset in task_sequence:
trainds = load_from_disk("./oursnometa/{}-train.hf".format(cur_dataset)).select(range(1000))
cluster_num+=5
pre_tasks.append(cur_dataset)
if cur_dataset==task_sequence[-1]:
pre_tasks.extend(["drop","quoref","dream"])
data_args.dataset_name = cur_dataset
logger.info("current_dataset:"+cur_dataset)
training_args.do_train = True
training_args.to_eval = False
metric = load_metric(dataset_name_to_metric[cur_dataset])
if all_replay is not None:
fused = datasets.concatenate_datasets([all_replay,trainds])
else:
fused = trainds
training_args.num_train_epochs = 5
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=fused,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
if training_args.local_rank<=0:
try:
save_set,features = save_load_diverse_sample(model,trainds)
if all_replay is None:
all_replay = save_set
else:
all_replay = datasets.concatenate_datasets([all_replay,save_set])
if all_features==[]:
all_features=features
else:
all_features.extend(features)
np.save("./all_features.npy",np.array(all_features))
all_replay.save_to_disk("all_replay@{}.hf".format(cur_dataset))
except:
pass
if training_args.local_rank!=-1:
torch.distributed.barrier()
all_replay = load_from_disk("all_replay@{}.hf".format(cur_dataset))
all_ids.extend([task2id[cur_dataset]]*50)
all_features=np.load("./all_features.npy").tolist()
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=all_replay,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
model.encoder.add_negs(all_ids,all_features)
for pre_dataset in pre_tasks:
data_args.dataset_name = pre_dataset
metric = load_metric(dataset_name_to_metric[pre_dataset])
eval_dataset,eval_examples = eval_dataloaders[pre_dataset]
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
torch.cuda.empty_cache()
logger.info("*** Evaluate:{} ***".format(data_args.dataset_name))
max_length, num_beams, ignore_keys_for_eval = None, None, None
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, ignore_keys=ignore_keys_for_eval,metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.local_rank<=0:
try:
print("after_train_",cur_dataset,"_test_",pre_dataset,file=fileout)
print(metrics,file=fileout)
except:
pass
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,715 | import logging
import os
import torch
import copy,random
import sys
import json
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from sklearn.cluster import KMeans
from models.metat5notask import T5ForConditionalGeneration as PromptT5
from downstream.dataset_processors import *
from downstream.l2ptrainer import QuestionAnsweringTrainer
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def preprocess_validation_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = i #sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
if True:
pass
# logger.info(f'Loading task {data_args.dataset_name} prompt')
# format_id = format2id[dataset2format[data_args.dataset_name]]
# task_id = task2id[data_args.dataset_name]
# format_prompt_ids = [- (i + 1) for i in range(format_id * data_args.prompt_number, (
# format_id + 1) * data_args.prompt_number)] # list(range(-(format_id * data_args.prompt_number+1), -((format_id + 1) * data_args.prompt_number+1)))
# task_prompt_id_start = len(format2id.keys()) * data_args.prompt_number
# logger.info('Prompt ids {}: {}'.format(task_prompt_id_start + task_id * data_args.prompt_number,
# task_prompt_id_start + (task_id + 1) * data_args.prompt_number))
# task_prompt_ids = [- (i + 1) for i in range(task_prompt_id_start + task_id * data_args.prompt_number,
# task_prompt_id_start + (task_id + 1) * data_args.prompt_number)]
# input_ids = copy.deepcopy(
# [format_prompt_ids + task_prompt_ids + input_ids for input_ids in model_inputs['input_ids']])
#input_ids = copy.deepcopy([format_prompt_ids + input_ids for input_ids in model_inputs['input_ids']])
# model_inputs['input_ids'] = input_ids
# model_inputs['attention_mask'] = [[1] * data_args.prompt_number * 2 + attention_mask for attention_mask in
# model_inputs['attention_mask']]
# input_ids = copy.deepcopy([input])
return model_inputs
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
def save_prompt_embedding(model,path):
prompt_embedding = model.state_dict()['encoder.prompt_embeddings.weight']
save_prompt_info = {'encoder.prompt_embeddings.weight':copy.deepcopy(prompt_embedding),'task2id':task2id,'format2id':format2id}
prompt_path = os.path.join(path,'prompt_embedding_info')
torch.save(save_prompt_info,prompt_path)
logger.info(f'Saving prompt embedding information to {prompt_path}')
def preprocess_function(examples):
preprocess_fn = dataset_name_to_func(data_args.dataset_name)
inputs, targets = preprocess_fn(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def save_load_diverse_sample(model,trainset):
with torch.no_grad():
device = 'cuda:0'
search_upbound = len(trainset)//4
query_idxs = [None]*30
keys = model.encoder.domain_keys
for idx,item in enumerate(trainset.select(range(search_upbound))):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
result = torch.matmul(query,keys.t())
result = torch.topk(result,5).indices[0].cpu().numpy().tolist()
key_sel = None
for key_idx in result:
if query_idxs[key_idx] is None or len(query_idxs[key_idx])<3:
key_sel = key_idx
break
if key_sel is not None:
if query_idxs[key_sel] is None:
query_idxs[key_sel] = [idx]
else:
query_idxs[key_sel].append(idx)
total_idxs = []
for item in query_idxs:
try:
total_idxs.extend(item[:3])
except:
total_idxs.extend(random.sample(list(range(search_upbound,len(trainset))),3))
total_idxs = list(set(total_idxs))
total_idxs = random.sample(total_idxs,50)
sub_set = trainset.select(total_idxs)
features = []
for idx,item in enumerate(sub_set):
query = model.encoder.get_query_vector(input_ids=torch.tensor([item['input_ids']]).long().to(device),
attention_mask=torch.tensor([item['attention_mask']]).long().to(device),
return_dict=True)
features.append(query.detach().cpu().numpy())
return sub_set,features
def dataset_name_to_func(dataset_name):
mapping = {
'squad': preprocess_sqaud_batch,
'squad_v2': preprocess_sqaud_batch,
'boolq': preprocess_boolq_batch,
'narrativeqa': preprocess_narrativeqa_batch,
'race': preprocess_race_batch,
'newsqa': preprocess_newsqa_batch,
'quoref': preprocess_sqaud_batch,
'ropes': preprocess_ropes_batch,
'drop': preprocess_drop_batch,
'nqopen': preprocess_sqaud_abstractive_batch,
# 'multirc': preprocess_boolq_batch,
'boolq_np': preprocess_boolq_batch,
'openbookqa': preprocess_openbookqa_batch,
'mctest': preprocess_race_batch,
'social_iqa': preprocess_social_iqa_batch,
'dream': preprocess_dream_batch,
}
return mapping[dataset_name]
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
dataset_name_to_metric = {
'squad': 'metric/squad_v1_local/squad_v1_local.py',
'squad_v2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v2_local/squad_v2_local.py',
'boolq': 'metric/accuracy.py',
'narrativeqa': 'metric/rouge_local/rouge_metric.py',
'race': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'nqopen': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/accuracy.py',
'openbookqa': 'metric/accuracy.py',
'mctest': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
}
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# tokenizer.add_tokens(['[TASK]', '[ABSTRACTIVE]','[QUESTION]','[CONTEXT]','[BOOL]','[EXTRACTIVE]','[MultiChoice]',
# '[OPTIONS]'])
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
logger.info('Added tokens: {}'.format(added_tokens))
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# task_num = data_args.max_task_num,
# prompt_num = data_args.prompt_number,
# format_num = data_args.qa_task_type_num,
# add_task_prompt = False
)
model.resize_token_embeddings(len(tokenizer))
#reload format specific task-prompt for newly involved task
#format_prompts###task_promptsf
data_args.reload_from_trained_prompt = False#@
data_args.load_from_format_task_id = False#@
### before pretrain come !!!!!!
if data_args.load_from_format_task_id and (data_args.dataset_name not in seed_datasets) and not data_args.reload_from_trained_prompt:
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
format_task_id = task_start_id + task2id[dataset2format[data_args.dataset_name]] * data_args.prompt_number
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = model.state_dict()['encoder.prompt_embeddings.weight'][format_task_id:format_task_id+data_args.prompt_number,:]
logger.info(f'Successfully initialize format {dataset2format[data_args.dataset_name]} task prompt for new task {data_args.dataset_name}, task id {task_id}')
# print(dataset2format[data_args.dataset_name])
# print(data_args.dataset_name)
elif data_args.reload_from_trained_prompt:
assert data_args.trained_prompt_path,'Must specify the path of stored prompt'
prompt_info = torch.load(data_args.trained_prompt_path)
assert prompt_info['task2id'][data_args.dataset_name]==task2id[data_args.dataset_name],f'the task id in trained prompt task id is not matched to the current task id for {data_args.dataset_name}'
assert prompt_info['format2id'].keys()==format2id.keys(),'the format dont match'
task_start_id = data_args.prompt_number * len(format2dataset.keys())
task_id = task_start_id + task2id[data_args.dataset_name] * data_args.prompt_number
logger.info('task id range {} {}'.format(task_id,task_id+data_args.prompt_number))
# assert torch.sum(model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] - prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:])==0
model.state_dict()['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:] = prompt_info['encoder.prompt_embeddings.weight'][task_id:task_id+data_args.prompt_number,:]
format_id = format2id[dataset2format[data_args.dataset_name]]
model.state_dict()['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :] = prompt_info['encoder.prompt_embeddings.weight'][format_id*data_args.prompt_number:(format_id+1)*data_args.prompt_number, :]
logger.info(
f'Successfully restore task+format prompt for the task {data_args.dataset_name} from {data_args.trained_prompt_path}')
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
if data_args.max_source_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_source_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
all_replay = None
for ds_name in ['squad','narrativeqa','race','newsqa','quoref','drop','nqopen','openbookqa','mctest','social_iqa','dream']:
eval_dataloaders[ds_name] = (load_from_disk("./oursnotask/{}-eval.hf".format(ds_name)),load_from_disk("./oursnotask/{}-evalex.hf".format(ds_name)))
pre_tasks = []
pre_general = []
pre_test = []
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
task_sequence = ['squad','newsqa','narrativeqa','nqopen','race','openbookqa','mctest','social_iqa']
# task_sequence = ["woz.en","srl","sst","wikisql","squad"]
fileout = open("diana_log.txt",'w')
all_replay = None
all_features = []
all_ids = []
cluster_num=0
for cur_dataset in task_sequence:
p=200
if p>len(load_from_disk("./oursnotask/{}-train.hf".format(cur_dataset))):
p = len(load_from_disk("./oursnotask/{}-train.hf".format(cur_dataset)))
trainds = load_from_disk("./oursnotask/{}-train.hf".format(cur_dataset))
cluster_num+=5
pre_tasks.append(cur_dataset)
if cur_dataset==task_sequence[-1]:
pre_tasks.extend(["drop","quoref","dream"])
data_args.dataset_name = cur_dataset
logger.info("current_dataset:"+cur_dataset)
training_args.do_train = True
training_args.to_eval = False
metric = load_metric(dataset_name_to_metric[cur_dataset])
if all_replay is not None:
fused = datasets.concatenate_datasets([all_replay,trainds])
else:
fused = trainds
training_args.num_train_epochs = 5
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=fused,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
if training_args.local_rank<=0:
save_set,features = save_load_diverse_sample(model,trainds)
if all_replay is None:
all_replay = save_set
else:
all_replay = datasets.concatenate_datasets([all_replay,save_set])
if all_features==[]:
all_features=features
else:
all_features.extend(features)
np.save("./all_features.npy",np.array(all_features))
all_replay.save_to_disk("all_replay@{}.hf".format(cur_dataset))
if training_args.local_rank!=-1:
torch.distributed.barrier()
all_replay = load_from_disk("all_replay@{}.hf".format(cur_dataset))
all_ids.extend([task2id[cur_dataset]]*50)
all_features=np.load("./all_features.npy").tolist()
model.encoder.reset_train_count()
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=all_replay,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
model.encoder.add_negs(all_ids,all_features)
for pre_dataset in pre_tasks:
data_args.dataset_name = pre_dataset
metric = load_metric(dataset_name_to_metric[pre_dataset])
eval_dataset,eval_examples = eval_dataloaders[pre_dataset]
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
answer_column_name=answer_column,
dataset_name=data_args.dataset_name,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
torch.cuda.empty_cache()
logger.info("*** Evaluate:{} ***".format(data_args.dataset_name))
max_length, num_beams, ignore_keys_for_eval = None, None, None
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, ignore_keys=ignore_keys_for_eval,metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.local_rank<=0:
try:
print("after_train_",cur_dataset,"_test_",pre_dataset,file=fileout)
print(metrics,file=fileout)
except:
pass
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,718 | import copy
import math
import os
import warnings
import numpy as np
from random import random
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from .utils import *
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
logger = logging.get_logger(__name__)
import torch.nn.functional as F
from torch import nn
import torch
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
164,719 | import torch.nn.functional as F
from torch import nn
import torch
import copy
def euclidean_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = -((a - b)**2).sum(dim=2)
return logits | null |
164,720 | import torch.nn.functional as F
from torch import nn
import torch
import copy
def cosine_metric(a,b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = (a*b).sum(dim=2)
# logits = -logits+1
return logits | null |
164,726 | import collections
import string
import re
import numpy as np
import json
from datasets import load_metric
def computeROUGE(greedy, answer):
rouges = compute_rouge_scores(greedy, answer)
if len(rouges) > 0:
avg_rouges = {}
for key in rouges[0].keys():
avg_rouges[key] = sum(
[r.get(key, 0.0) for r in rouges]) / len(rouges) * 100
else:
avg_rouges = None
return avg_rouges
def normalize_text(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
# print(text)
# print("after:",' '.join(text.split( )))
return ' '.join(text.split( ))
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
# print(white_space_fix(remove_articles(remove_punc(lower(s)))))
return white_space_fix(remove_articles(remove_punc(lower(s))))
def computeLFEM(greedy, answer):
count = 0
correct = 0
text_answers = []
for idx, (g, ex) in enumerate(zip(greedy, answer)):
count += 1
text_answers.append([ex['answer'].lower()])
try:
gt = ex['sql']
conds = gt['conds']
lower_conds = []
for c in conds:
lc = c
lc[2] = str(lc[2]).lower().replace(' ', '')
lower_conds.append(lc)
gt['conds'] = lower_conds
# print("gt_answer:",ex['answer'].lower())
lf = to_lf(g, ex['table'])
# print(lf,"lf")
# print(gt,"gt")
correct += lf == gt
except Exception as e:
continue
return (correct / count) * 100, text_answers
def computeF1(outputs, targets):
return sum([metric_max_over_ground_truths(f1_score, o, t) for o, t in zip(outputs, targets)]) / len(outputs) * 100
def computeEM(outputs, targets):
outs = [metric_max_over_ground_truths(exact_match, o, t) for o, t in zip(outputs, targets)]
return sum(outs) / len(outputs) * 100
def computeEMtri(outputs, targets):
options = ["entailment","neutral","contradiction"]
preds = []
for o in outputs:
scores = [score_string_similarity(opt, o) for opt in options]
max_idx = np.argmax(scores)
preds.append(options[max_idx])
print(preds)
print(targets)
outs = [metric_max_over_ground_truths(exact_match, o, t) for o, t in zip(preds, targets)]
return sum(outs) / len(outputs) * 100
def computeCF1(greedy, answer):
scores = np.zeros(4)
for g, a in zip(greedy, answer):
scores += score(g, a)
tp, tn, sys_pos, real_pos = scores.tolist()
total = len(answer)
if tp == 0:
p = r = f = 0.0
else:
p = tp / float(sys_pos)
r = tp / float(real_pos)
f = 2 * p * r / (p + r)
return f * 100, p * 100, r * 100
def computeDialogue(greedy, answer):
examples = []
for idx, (g, a) in enumerate(zip(greedy, answer)):
examples.append((a[0], g, a[1], idx))
#examples.sort()
turn_request_positives = 0
turn_goal_positives = 0
joint_goal_positives = 0
ldt = None
for ex in examples:
if ldt is None or ldt.split('_')[:-1] != ex[0].split('_')[:-1]:
state, answer_state = {}, {}
ldt = ex[0]
delta_state = to_delta_state(ex[1])
answer_delta_state = to_delta_state(ex[2])
state = update_state(state, delta_state['inform'])
answer_state = update_state(answer_state, answer_delta_state['inform'])
if dict_cmp(state, answer_state):
joint_goal_positives += 1
if delta_state['request'] == answer_delta_state['request']:
turn_request_positives += 1
if dict_cmp(delta_state['inform'], answer_delta_state['inform']):
turn_goal_positives += 1
joint_goal_em = joint_goal_positives / len(examples) * 100
turn_request_em = turn_request_positives / len(examples) * 100
turn_goal_em = turn_goal_positives / len(examples) * 100
answer = [(x[-1], x[-2]) for x in examples]
#answer.sort()
answer = [[x[1]] for x in answer]
return joint_goal_em, turn_request_em, turn_goal_em, answer
def compute_metrics(data, rouge=False, bleu=False, corpus_f1=False, logical_form=False, dialogue=False,tri=False):
if rouge:
metric_func = load_metric("metric/rouge_local/rouge_metric.py")
metrics = metric_func.compute(predictions=data.predictions, references=data.label_ids)
metric_keys = ["rougeL"]
metric_values = metrics["rougeL"]
metric_dict = collections.OrderedDict(list(zip(metric_keys, [metric_values])))
return metric_dict
greedy = data.predictions
answer = data.label_ids
greedy = [_["prediction_text"] for _ in greedy]
# greedy = [_["answers"]["text"][0].lower() for _ in answer]
answer = [_["answers"]["text"] for _ in answer]
if dialogue:
addition_answers = open("./oursdeca/"+"woz.en_answers.json",'r')
answer = json.load(addition_answers)
if logical_form:
addition_answers = open("./oursdeca/"+"wikisql_answers.json",'r')
answer = json.load(addition_answers)
metric_keys = []
metric_values = []
if logical_form:
lfem, answer = computeLFEM(greedy, answer)
metric_keys += ['lfem']
metric_values += [lfem]
if tri:
em = computeEMtri(greedy,answer)
else:
em = computeEM(greedy, answer)
print(greedy[:20])
print(answer[:20])
metric_keys.append('em')
metric_values.append(em)
norm_greedy = [normalize_text(g) for g in greedy]
norm_answer = [[normalize_text(a) for a in ans] for ans in answer]
nf1 = computeF1(norm_greedy, norm_answer)
nem = computeEM(norm_greedy, norm_answer)
metric_keys.extend(['nf1', 'nem'])
metric_values.extend([nf1, nem])
if rouge:
rouge = computeROUGE(greedy, answer)
metric_keys += ['rouge1', 'rouge2', 'rougeL', 'avg_rouge']
avg_rouge = (rouge['rouge_1_f_score'] + rouge['rouge_2_f_score'] + rouge['rouge_l_f_score']) / 3
metric_values += [rouge['rouge_1_f_score'], rouge['rouge_2_f_score'], rouge['rouge_l_f_score'], avg_rouge]
if corpus_f1:
corpus_f1, precision, recall = computeCF1(norm_greedy, norm_answer)
metric_keys += ['corpus_f1', 'precision', 'recall']
metric_values += [corpus_f1, precision, recall]
if dialogue:
joint_goal_em, request_em, turn_goal_em, answer = computeDialogue(greedy, answer)
avg_dialogue = (joint_goal_em + request_em) / 2
metric_keys += ['joint_goal_em', 'turn_request_em', 'turn_goal_em', 'avg_dialogue']
metric_values += [joint_goal_em, request_em, turn_goal_em, avg_dialogue]
metric_dict = collections.OrderedDict(list(zip(metric_keys, metric_values)))
return metric_dict | null |
164,731 | from asdl.hypothesis import Hypothesis
from asdl.transition_system import ApplyRuleAction, GenTokenAction
from asdl.sql.sql_transition_system import SelectColumnAction, SelectTableAction
class ActionInfo(object):
def __init__(self, action=None):
def __repr__(self, verbose=False):
class Hypothesis(object):
def __init__(self):
def apply_action(self, action):
def update_frontier_info(self):
def _find_frontier_node_and_field(tree_node):
def clone_and_apply_action(self, action):
def copy(self):
def completed(self):
class GenTokenAction(Action):
def __init__(self, token):
def is_stop_signal(self):
def __repr__(self):
class SelectColumnAction(GenTokenAction):
def __init__(self, column_id):
def column_id(self):
def __repr__(self):
class SelectTableAction(GenTokenAction):
def __init__(self, table_id):
def table_id(self):
def __repr__(self):
def get_action_infos(src_query: list = None, tgt_actions: list = [], force_copy=False):
action_infos = []
hyp = Hypothesis()
for t, action in enumerate(tgt_actions):
action_info = ActionInfo(action)
action_info.t = t
if hyp.frontier_node:
action_info.parent_t = hyp.frontier_node.created_time
action_info.frontier_prod = hyp.frontier_node.production
action_info.frontier_field = hyp.frontier_field.field
if isinstance(action, SelectColumnAction) or isinstance(action, SelectTableAction):
pass
elif isinstance(action, GenTokenAction): # GenToken
try:
tok_src_idx = src_query.index(str(action.token))
action_info.copy_from_src = True
action_info.src_token_position = tok_src_idx
except ValueError:
if force_copy: raise ValueError('cannot copy primitive token %s from source' % action.token)
hyp.apply_action(action)
action_infos.append(action_info)
return action_infos | null |
164,756 | import os, json, pickle, argparse, sys, time
from asdl.asdl import ASDLGrammar
from asdl.transition_system import TransitionSystem
from asdl.action_info import get_action_infos
from preprocess.common_utils import Preprocessor
def process_example(processor, entry, db, trans, verbose=False):
class ASDLGrammar(object):
def __init__(self, productions, file_path):
def __len__(self):
def productions(self):
def __getitem__(self, datum):
def get_prod_by_ctr_name(self, name):
def types(self):
def fields(self):
def primitive_types(self):
def composite_types(self):
def is_composite_type(self, asdl_type):
def is_primitive_type(self, asdl_type):
def from_filepath(file_path):
def _parse_field_from_text(_text):
def _parse_constructor_from_text(_text):
class TransitionSystem(object):
def __init__(self, grammar):
def get_actions(self, asdl_ast):
def tokenize_code(self, code, mode):
def compare_ast(self, hyp_ast, ref_ast):
def ast_to_surface_code(self, asdl_ast):
def surface_code_to_ast(self, code):
def get_primitive_field_actions(self, realized_field):
def get_valid_continuation_types(self, hyp):
def get_valid_continuating_productions(self, hyp):
def get_class_by_lang(lang):
GRAMMAR_FILEPATH = 'asdl/sql/grammar/sql_asdl_v2.txt'
def process_dataset(processor, dataset, tables, output_path=None, skip_large=False, verbose=False):
from utils.constants import GRAMMAR_FILEPATH
grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
trans = TransitionSystem.get_class_by_lang('sql')(grammar)
processed_dataset = []
for idx, entry in enumerate(dataset):
if skip_large and len(tables[entry['db_id']]['column_names']) > 100: continue
if verbose:
print('*************** Processing %d-th sample **************' % (idx))
entry = process_example(processor, entry, tables[entry['db_id']], trans, verbose=verbose)
processed_dataset.append(entry)
print('In total, process %d samples , skip %d extremely large databases.' % (len(processed_dataset), len(dataset) - len(processed_dataset)))
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
return processed_dataset | null |
164,757 | import os, sqlite3
import numpy as np
import stanza, torch
import stanfordnlp
from stanfordnlp.server import CoreNLPClient
from nltk.corpus import stopwords
from itertools import product, combinations
from utils.constants import MAX_RELATIVE_DIST
def is_number(s):
try:
float(s)
return True
except ValueError:
return False | null |
164,758 | import os, sqlite3
import numpy as np
import stanza, torch
import stanfordnlp
from stanfordnlp.server import CoreNLPClient
from nltk.corpus import stopwords
from itertools import product, combinations
from utils.constants import MAX_RELATIVE_DIST
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
164,766 | import sys, os, time, json, gc
from argparse import Namespace
from utils.args import init_args
from utils.hyperparams import hyperparam_path
from utils.initialization import *
from utils.example import Example
from utils.batch import Batch
from utils.optimization import set_optimizer
from model.model_utils import Registrable
from model.model_constructor import *
args = init_args(sys.argv[1:])
device = set_torch_device(args.device)
if args.read_model_path:
params = json.load(open(os.path.join(args.read_model_path, 'params.json')), object_hook=lambda d: Namespace(**d))
params.lazy_load = True
else:
params = args
train_dataset, dev_dataset = Example.load_dataset('train'), Example.load_dataset('dev')
args.word_vocab, args.relation_num = len(Example.word_vocab), len(Example.relation_vocab)
model = Registrable.by_name('text2sql')(params, sql_trans).to(device)
if args.read_model_path:
check_point = torch.load(open(os.path.join(args.read_model_path, 'model.bin'), 'rb'), map_location=device)
model.load_state_dict(check_point['model'])
logger.info("Load saved model from path: %s" % (args.read_model_path))
else:
json.dump(vars(params), open(os.path.join(exp_path, 'params.json'), 'w'), indent=4)
if params.plm is None:
ratio = Example.word2vec.load_embeddings(model.encoder.input_layer.word_embed, Example.word_vocab, device=device)
logger.info("Init model and word embedding layer with a coverage %.2f" % (ratio))
if not args.testing:
num_training_steps = ((len(train_dataset) + args.batch_size - 1) // args.batch_size) * args.max_epoch
num_warmup_steps = int(num_training_steps * args.warmup_ratio)
logger.info('Total training steps: %d;\t Warmup steps: %d' % (num_training_steps, num_warmup_steps))
optimizer, scheduler = set_optimizer(model, args, num_warmup_steps, num_training_steps)
start_epoch, nsamples, best_result = 0, len(train_dataset), {'dev_acc': 0.}
train_index, step_size = np.arange(nsamples), args.batch_size // args.grad_accumulate
if args.read_model_path and args.load_optimizer:
optimizer.load_state_dict(check_point['optim'])
scheduler.load_state_dict(check_point['scheduler'])
start_epoch = check_point['epoch'] + 1
logger.info('Start training ......')
for i in range(start_epoch, args.max_epoch):
start_time = time.time()
epoch_loss, epoch_gp_loss, count = 0, 0, 0
np.random.shuffle(train_index)
model.train()
for j in range(0, nsamples, step_size):
count += 1
cur_dataset = [train_dataset[k] for k in train_index[j: j + step_size]]
current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing)
# loss, gp_loss = model(current_batch) # see utils/batch.py for batch elements
loss, gp_loss, final_loss = model(current_batch)
epoch_loss += loss.item()
epoch_gp_loss += gp_loss.item()
# print("Minibatch loss: %.4f" % (loss.item()))
# loss += gp_loss
# loss += rel_loss
# if count == args.grad_accumulate or j + step_size >= nsamples:
# loss += rel_loss
final_loss.backward()
# fgm.attack()
# adv_loss, adv_gp_loss = model(current_batch)
# adv_loss += adv_gp_loss
# adv_loss.backward()
# fgm.restore()
if count == args.grad_accumulate or j + step_size >= nsamples:
count = 0
model.pad_embedding_grad_zero()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
logger.info('Training: \tEpoch: %d\tTime: %.4f\tTraining loss: %.4f/%.4f' % (i, time.time() - start_time, epoch_loss, epoch_gp_loss))
torch.cuda.empty_cache()
gc.collect()
if i < args.eval_after_epoch: # avoid unnecessary evaluation
continue
start_time = time.time()
dev_acc = decode('dev', os.path.join(exp_path, 'dev.iter' + str(i)), acc_type='sql')
logger.info('Evaluation: \tEpoch: %d\tTime: %.4f\tDev acc: %.4f' % (i, time.time() - start_time, dev_acc))
if dev_acc > best_result['dev_acc']:
best_result['dev_acc'], best_result['iter'] = dev_acc, i
torch.save({
'epoch': i, 'model': model.state_dict(),
'optim': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}, open(os.path.join(exp_path, 'model.bin'), 'wb'))
logger.info('NEW BEST MODEL: \tEpoch: %d\tDev acc: %.4f' % (i, dev_acc))
logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc: %.4f' % (best_result['iter'], best_result['dev_acc']))
# check_point = torch.load(open(os.path.join(exp_path, 'model.bin'), 'rb'))
# model.load_state_dict(check_point['model'])
# dev_acc_beam = decode('dev', output_path=os.path.join(exp_path, 'dev.iter' + str(best_result['iter']) + '.beam' + str(args.beam_size)), acc_type='beam')
# logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc/Beam acc: %.4f/%.4f' % (best_result['iter'], best_result['dev_acc'], dev_acc_beam))
else:
# start_time = time.time()
# train_acc = decode('train', output_path=os.path.join(args.read_model_path, 'train.eval'), acc_type='sql')
# logger.info("Evaluation costs %.2fs ; Train dataset exact match acc is %.4f ." % (time.time() - start_time, train_acc))
start_time = time.time()
dev_acc = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval'), acc_type='sql')
dev_acc_checker = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.checker'), acc_type='sql', use_checker=True)
#dev_acc_beam = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.beam' + str(args.beam_size)), acc_type='beam')
logger.info("Evaluation costs %.2fs ; Dev dataset exact match/checker is %.4f/%.4f ." % (time.time() - start_time, dev_acc, dev_acc_checker))
class Batch():
def __init__(self, examples, device='cpu'):
super(Batch, self).__init__()
self.examples = examples
self.device = device
def from_example_list(cls, ex_list, device='cpu', train=True, method='text2sql', **kwargs):
method_dict = {
"text2sql": from_example_list_text2sql,
}
return method_dict[method](ex_list, device, train=train, **kwargs)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def max_question_len(self):
return torch.max(self.question_lens).item()
def max_table_len(self):
return torch.max(self.table_lens).item()
def max_column_len(self):
return torch.max(self.column_lens).item()
def max_table_word_len(self):
return torch.max(self.table_word_lens).item()
def max_column_word_len(self):
return torch.max(self.column_word_lens).item()
def max_question_subword_len(self):
return torch.max(self.question_subword_lens).item()
def max_table_subword_len(self):
return torch.max(self.table_subword_lens).item()
def max_column_subword_len(self):
return torch.max(self.column_subword_lens).item()
""" Different types of nodes are seperated instead of concatenated together """
def mask(self):
return torch.cat([self.question_mask, self.table_mask, self.column_mask], dim=1)
def question_mask(self):
return lens2mask(self.question_lens)
def table_mask(self):
return lens2mask(self.table_lens)
def column_mask(self):
return lens2mask(self.column_lens)
def table_word_mask(self):
return lens2mask(self.table_word_lens)
def column_word_mask(self):
return lens2mask(self.column_word_lens)
def question_subword_mask(self):
return lens2mask(self.question_subword_lens)
def table_subword_mask(self):
return lens2mask(self.table_subword_lens)
def column_subword_mask(self):
return lens2mask(self.column_subword_lens)
def get_frontier_field_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_action):
ids.append(Example.grammar.field2id[e.tgt_action[t].frontier_field])
# assert self.grammar.id2field[ids[-1]] == e.tgt_action[t].frontier_field
else:
ids.append(0)
return torch.tensor(ids, dtype=torch.long, device=self.device)
def get_frontier_prod_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_action):
ids.append(Example.grammar.prod2id[e.tgt_action[t].frontier_prod])
# assert self.grammar.id2prod[ids[-1]] == e.tgt_action[t].frontier_prod
else:
ids.append(0)
return torch.tensor(ids, dtype=torch.long, device=self.device)
def get_frontier_field_type_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_action):
ids.append(Example.grammar.type2id[e.tgt_action[t].frontier_field.type])
# assert self.grammar.id2type[ids[-1]] == e.tgt_action[t].frontier_field.type
else:
ids.append(0)
return torch.tensor(ids, dtype=torch.long, device=self.device)
def decode(choice, output_path, acc_type='sql', use_checker=False):
assert acc_type in ['beam', 'ast', 'sql'] and choice in ['train', 'dev']
model.eval()
dataset = train_dataset if choice == 'train' else dev_dataset
all_hyps = []
with torch.no_grad():
for i in range(0, len(dataset), args.batch_size):
current_batch = Batch.from_example_list(dataset[i: i + args.batch_size], device, train=False)
hyps = model.parse(current_batch, args.beam_size)
all_hyps.extend(hyps)
acc = evaluator.acc(all_hyps, dataset, output_path, acc_type=acc_type, etype='match', use_checker=use_checker)
torch.cuda.empty_cache()
gc.collect()
return acc | null |
164,788 | import argparse
import sys
def add_argument_base(arg_parser):
#### General configuration ####
arg_parser.add_argument('--task', default='text2sql', help='task name')
arg_parser.add_argument('--seed', default=999, type=int, help='Random seed')
arg_parser.add_argument('--device', type=int, default=1, help='Use which device: -1 -> cpu ; the index of gpu o.w.')
arg_parser.add_argument('--testing', action='store_true', help='training or evaluation mode')
arg_parser.add_argument('--read_model_path', type=str, help='read pretrained model path')
#### Training Hyperparams ####
arg_parser.add_argument('--batch_size', default=20, type=int, help='Batch size')
arg_parser.add_argument('--grad_accumulate', default=1, type=int, help='accumulate grad and update once every x steps')
arg_parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
arg_parser.add_argument('--layerwise_decay', type=float, default=1.0, help='layerwise decay rate for lr, used for PLM')
arg_parser.add_argument('--l2', type=float, default=1e-4, help='weight decay coefficient')
arg_parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup steps proportion')
arg_parser.add_argument('--lr_schedule', default='linear', choices=['constant', 'linear', 'ratsql', 'cosine'], help='lr scheduler')
arg_parser.add_argument('--eval_after_epoch', default=40, type=int, help='Start to evaluate after x epoch')
arg_parser.add_argument('--load_optimizer', action='store_true', default=False, help='Whether to load optimizer state')
arg_parser.add_argument('--max_epoch', type=int, default=100, help='terminate after maximum epochs')
arg_parser.add_argument('--max_norm', default=5., type=float, help='clip gradients')
return arg_parser
def add_argument_encoder(arg_parser):
# Encoder Hyperparams
arg_parser.add_argument('--model', choices=['rgatsql', 'lgesql'], default='lgesql', help='which text2sql model to use')
arg_parser.add_argument('--local_and_nonlocal', choices=['mmc', 'msde', 'local', 'global'], default='mmc',
help='how to integrate local and non-local relations: mmc -> multi-head multi-view concatenation ; msde -> mixed static and dynamic embeddings')
arg_parser.add_argument('--output_model', choices=['without_pruning', 'with_pruning'], default='without_pruning', help='whether add graph pruning')
arg_parser.add_argument('--plm', type=str, choices=['bert-base-uncased', 'bert-large-uncased', 'bert-large-uncased-whole-word-masking',
'roberta-base', 'roberta-large', 'grappa_large_jnt', 'electra-base-discriminator', 'electra-large-discriminator'], help='pretrained model name')
arg_parser.add_argument('--subword_aggregation', choices=['mean-pooling', 'max-pooling', 'attentive-pooling'], default='attentive-pooling', help='aggregate subword feats from PLM')
arg_parser.add_argument('--schema_aggregation', choices=['mean-pooling', 'max-pooling', 'attentive-pooling', 'head+tail'], default='head+tail', help='aggregate schema words feats')
arg_parser.add_argument('--dropout', type=float, default=0.2, help='feature dropout rate')
arg_parser.add_argument('--attn_drop', type=float, default=0., help='dropout rate of attention weights')
arg_parser.add_argument('--embed_size', default=300, type=int, help='size of word embeddings, only used in glove.42B.300d')
arg_parser.add_argument('--gnn_num_layers', default=8, type=int, help='num of GNN layers in encoder')
arg_parser.add_argument('--gnn_hidden_size', default=256, type=int, help='size of GNN layers hidden states')
arg_parser.add_argument('--num_heads', default=8, type=int, help='num of heads in multihead attn')
arg_parser.add_argument('--relation_share_layers', action='store_true')
arg_parser.add_argument('--relation_share_heads', action='store_true')
arg_parser.add_argument('--score_function', choices=['affine', 'bilinear', 'biaffine', 'dot'], default='affine', help='graph pruning score function')
arg_parser.add_argument('--smoothing', type=float, default=0.15, help='label smoothing factor for graph pruning')
return arg_parser
def add_argument_decoder(arg_parser):
# Decoder Hyperparams
arg_parser.add_argument('--lstm', choices=['lstm', 'onlstm'], default='onlstm', help='Type of LSTM used, ONLSTM or traditional LSTM')
arg_parser.add_argument('--chunk_size', default=8, type=int, help='parameter of ONLSTM')
arg_parser.add_argument('--att_vec_size', default=512, type=int, help='size of attentional vector')
arg_parser.add_argument('--sep_cxt', action='store_true', help='when calculating context vectors, use seperate cxt for question and schema')
arg_parser.add_argument('--drop_connect', type=float, default=0.2, help='recurrent connection dropout rate in decoder lstm')
arg_parser.add_argument('--lstm_num_layers', type=int, default=1, help='num_layers of decoder')
arg_parser.add_argument('--lstm_hidden_size', default=512, type=int, help='Size of LSTM hidden states')
arg_parser.add_argument('--action_embed_size', default=128, type=int, help='Size of ApplyRule/GenToken action embeddings')
arg_parser.add_argument('--field_embed_size', default=64, type=int, help='Embedding size of ASDL fields')
arg_parser.add_argument('--type_embed_size', default=64, type=int, help='Embeddings ASDL types')
arg_parser.add_argument('--no_context_feeding', action='store_true', default=False,
help='Do not use embedding of context vectors')
arg_parser.add_argument('--no_parent_production_embed', default=False, action='store_true',
help='Do not use embedding of parent ASDL production to update decoder LSTM state')
arg_parser.add_argument('--no_parent_field_embed', default=False, action='store_true',
help='Do not use embedding of parent field to update decoder LSTM state')
arg_parser.add_argument('--no_parent_field_type_embed', default=False, action='store_true',
help='Do not use embedding of the ASDL type of parent field to update decoder LSTM state')
arg_parser.add_argument('--no_parent_state', default=False, action='store_true',
help='Do not use the parent hidden state to update decoder LSTM state')
arg_parser.add_argument('--beam_size', default=5, type=int, help='Beam size for beam search')
arg_parser.add_argument('--decode_max_step', default=100, type=int, help='Maximum number of time steps used in decoding')
return arg_parser
def init_args(params=sys.argv[1:]):
arg_parser = argparse.ArgumentParser()
arg_parser = add_argument_base(arg_parser)
arg_parser = add_argument_encoder(arg_parser)
arg_parser = add_argument_decoder(arg_parser)
opt = arg_parser.parse_args(params)
if opt.model == 'rgatsql' and opt.local_and_nonlocal == 'msde':
opt.local_and_nonlocal = 'global'
if opt.model == 'lgesql' and opt.local_and_nonlocal == 'global':
opt.local_and_nonlocal = 'msde'
return opt | null |
164,792 | import json
import os
import random
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
NOISE_NUM = 4
def noise_entity_type(entity_list):
entity_type_list = []
for entity in entity_list:
entity_type_list.append(entity["type"])
entity_type_list = list(set(entity_type_list))
noised_entity_list = []
for entity in entity_list:
noised_entity = deepcopy(entity)
if np.random.rand() > THRESHOLD:
noised_entity_type = random.choice(entity_type_list)
noised_entity["type"] = noised_entity_type
noised_entity_list.append(noised_entity)
return noised_entity_list
def noise_entity_offset(entity_list, tokens):
noised_entity_list = []
for entity in entity_list:
noised_entity = deepcopy(entity)
entity_offset = noised_entity["offset"]
start_index, end_index = entity_offset[0], entity_offset[-1]
start_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
end_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
noised_start_index = max(start_index-start_noise, 0)
noised_end_index = min(end_index+end_noise, len(tokens)-1)
noised_entity_offset = list(range(noised_start_index, noised_end_index+1))
noised_entity_mention = " ".join(tokens[noised_start_index:noised_end_index+1])
noised_entity["offset"] = noised_entity_offset
noised_entity["text"] = noised_entity_mention
noised_entity_list.append(noised_entity)
return noised_entity_list
def noise_entity_with_other_entity(entity_list):
type_entity_mapping = {}
for entity in entity_list:
entity_type = entity["type"]
if entity_type not in type_entity_mapping:
type_entity_mapping[entity_type] = []
type_entity_mapping[entity_type].append(entity)
noised_entity_list = []
for entity in entity_list:
noised_entity = deepcopy(entity)
if np.random.rand() > THRESHOLD:
entity_type = noised_entity["type"]
other_entity = random.choice(type_entity_mapping[entity_type])
noised_entity["text"] = other_entity["text"]
noised_entity["offset"] = other_entity["offset"]
noised_entity_list.append(noised_entity)
return noised_entity_list
def noise_relation_type(triple_list):
relation_type_list = []
for triple in triple_list:
relation_type_list.append(triple["type"])
relation_type_list = list(set(relation_type_list))
noised_triple_list = []
for triple in triple_list:
noised_triple = deepcopy(triple)
if np.random.rand() > THRESHOLD:
noised_relation_type = random.choice(relation_type_list)
noised_triple["type"] = noised_relation_type
noised_triple_list.append(noised_triple)
return noised_triple_list
def noise_triple_num(triple_list, entity_list):
noised_triple_list = []
for triple in triple_list:
p = np.random.rand()
if p < TRIPLE_THRESHOLD[0]: # do nothing
noised_triple_list.append(triple)
elif p < TRIPLE_THRESHOLD[1]: # add noised triple
noised_triple_list.append(triple)
noised_triple = deepcopy(triple)
replaced_tail = random.choice(entity_list)
noised_triple["args"][1] = replaced_tail
noised_triple_list.append(noised_triple)
else: # remove triple
pass
return noised_triple_list
def build_trigger_list(event_list):
trigger_list = []
for event in event_list:
trigger_mention = event["text"]
trigger_type = event["type"]
trigger_offset = event["offset"]
trigger = {
"type": trigger_type,
"offset": trigger_offset,
"text": trigger_mention
}
trigger_list.append(trigger)
return trigger_list
def build_argument_list(event_list):
argument_list = []
for event in event_list:
arguments = event["args"]
argument_list.extend(arguments)
return argument_list
def noise_event_num(event_list, all_trigger_list):
noised_event_list = []
for event in event_list:
p = np.random.rand()
if p < EVENT_THRESHOLD[0]: # do nothing
noised_event_list.append(event)
elif p < EVENT_THRESHOLD[1]: # add noised event
noised_event_list.append(event)
noised_event = deepcopy(event)
replaced_trigger = random.choice(all_trigger_list)
for key in replaced_trigger:
noised_event[key] = replaced_trigger[key]
noised_event_list.append(noised_event)
else: # remove event
pass
return noised_event_list
def noise_trigger_type(event_list, all_trigger_list):
event_type_list = list(set([trigger["type"] for trigger in all_trigger_list]))
noised_event_list = []
for event in event_list:
noised_event = deepcopy(event)
if np.random.rand() > THRESHOLD:
noised_event_type = random.choice(event_type_list)
noised_event["type"] = noised_event_type
noised_event_list.append(noised_event)
return noised_event_list
def noise_trigger_with_other_trigger(event_list, all_trigger_list):
trigger_mention_list = list([(trigger["text"], trigger["offset"]) for trigger in all_trigger_list])
noised_event_list = []
for event in event_list:
noised_event = deepcopy(event)
if np.random.rand() > THRESHOLD:
noised_trigger_mention, noised_trigger_offset = random.choice(trigger_mention_list)
noised_event["text"] = noised_trigger_mention
noised_event["offset"] = noised_trigger_offset
noised_event_list.append(noised_event)
return noised_event_list
def noise_trigger_offset(event_list, tokens):
noised_event_list = []
for event in event_list:
noised_event = deepcopy(event)
event_offset = noised_event["offset"]
start_index, end_index = event_offset[0], event_offset[-1]
start_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
end_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
noised_start_index = max(start_index-start_noise, 0)
noised_end_index = min(end_index+end_noise, len(tokens)-1)
noised_event_offset = list(range(noised_start_index, noised_end_index+1))
noised_event_mention = " ".join(tokens[noised_start_index:noised_end_index+1])
noised_event["offset"] = noised_event_offset
noised_event["text"] = noised_event_mention
noised_event_list.append(noised_event)
return noised_event_list
def noise_argument_num(event_list, all_argument_list):
noised_event_list = []
for event in event_list:
noised_event = deepcopy(event)
noised_argument_list = []
for argument in noised_event["args"]:
p = np.random.rand()
if p < EVENT_THRESHOLD[0]: # do nothing
noised_argument_list.append(argument)
elif p < EVENT_THRESHOLD[1]: # add noised event
noised_argument_list.append(argument)
noised_argument = deepcopy(argument)
replaced_argument = random.choice(all_argument_list)
for key in replaced_argument:
noised_argument[key] = replaced_argument[key]
noised_argument_list.append(noised_argument)
else: # remove event
pass
noised_event["args"] = noised_argument_list
noised_event_list.append(noised_event)
return noised_event_list
def noise_argument_type(event_list, all_argument_list):
argument_type_list = list(set([argument["type"] for argument in all_argument_list]))
noised_event_list = []
for event in event_list:
noised_event = deepcopy(event)
for argument in noised_event["args"]:
if np.random.rand() > THRESHOLD:
noised_argument_type = random.choice(argument_type_list)
noised_event["type"] = noised_argument_type
noised_event_list.append(noised_event)
return noised_event_list
def noise_argument_with_other_argument(event_list, all_argument_list):
argument_mention_list = list([(argument["text"], argument["offset"]) for argument in all_argument_list])
noised_event_list = []
for event in event_list:
noised_event = deepcopy(event)
for argument in noised_event["args"]:
if np.random.rand() > THRESHOLD:
noised_argument_mention, noised_argument_offset = random.choice(argument_mention_list)
argument["text"] = noised_argument_mention
argument["offset"] = noised_argument_offset
noised_event_list.append(noised_event)
return noised_event_list
def noise_argument_offset(event_list, tokens):
noised_event_list = []
for event in event_list:
noised_event = deepcopy(event)
for argument in noised_event["args"]:
argument_offset = argument["offset"]
start_index, end_index = argument_offset[0], argument_offset[-1]
start_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
end_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
noised_start_index = max(start_index-start_noise, 0)
noised_end_index = min(end_index+end_noise, len(tokens)-1)
noised_argument_offset = list(range(noised_start_index, noised_end_index+1))
noised_argument_mention = " ".join(tokens[noised_start_index:noised_end_index+1])
argument["offset"] = noised_argument_offset
argument["text"] = noised_argument_mention
noised_event_list.append(noised_event)
return noised_event_list
def create_entity_uri(entity_list):
entity_uri_mapping = {}
for i, entity in enumerate(entity_list):
if "uri" not in entity:
entity_uri_mapping[json.dumps(entity)] = str(i)
entity["uri"] = str(i)
else:
entity_uri_mapping[json.dumps(entity)] = entity["uri"]
return entity_uri_mapping
def update_entity_uri_in_triple(triple_list, entity_uri_mapping):
for triple in triple_list:
head, tail = triple["args"]
if "uri" not in head:
head_str = json.dumps(head)
if head_str not in entity_uri_mapping: # !!!
entity_uri_mapping[head_str] = str(len(entity_uri_mapping))
head["uri"] = entity_uri_mapping[head_str]
if "uri" not in tail:
tail_str = json.dumps(tail)
if tail_str not in entity_uri_mapping: # !!!
entity_uri_mapping[tail_str] = str(len(entity_uri_mapping))
tail["uri"] = entity_uri_mapping[tail_str]
return triple_list
def build_entity_dict(entity_list):
entity_dict = {}
for entity in entity_list:
entity_uri = entity["uri"]
entity_dict[entity_uri] = entity
return entity_dict
def update_relation_triple_by_noised_entity(triple_list, noised_entity_dict):
noised_triple_list = []
for triple in triple_list:
noised_triple = deepcopy(triple)
head, tail = noised_triple["args"]
noised_head = noised_entity_dict[head["uri"]] if head["uri"] in noised_entity_dict else head
noised_tail = noised_entity_dict[tail["uri"]] if tail["uri"] in noised_entity_dict else tail
# noised_head, noised_tail = noised_entity_dict[head["uri"]], noised_entity_dict[tail["uri"]]
noised_triple["args"] = [noised_head, noised_tail]
noised_triple_list.append(noised_triple)
return noised_triple_list
def create_spot_asoc_field(instance_entity_list, instance_triple_list, instance_event_list):
instance_spot_asoc_list = []
for entity in instance_entity_list:
instance_spot_asoc = {
"span": entity["text"],
"label": entity["type"],
"asoc": []
}
for triple in instance_triple_list:
if triple["args"][0]["uri"] == entity["uri"]:
asoc_record = [triple["type"], triple["args"][1]["text"]]
instance_spot_asoc["asoc"].append(asoc_record)
instance_spot_asoc_list.append(instance_spot_asoc)
for event in instance_event_list:
instance_spot_asoc = {
"span": event["text"],
"label": event["type"],
"asoc": []
}
for argument in event["args"]:
asoc_record = [argument["type"], argument["text"]]
instance_spot_asoc["asoc"].append(asoc_record)
instance_spot_asoc_list.append(instance_spot_asoc)
return instance_spot_asoc_list
def create_record_field(instance_spot_asoc_list):
instance_record = "<extra_id_0> "
for instance_spot_asoc in instance_spot_asoc_list:
instance_record += "<extra_id_0> "
instance_record += instance_spot_asoc["label"] + " "
instance_record += "<extra_id_5> "
instance_record += instance_spot_asoc["span"] + " "
if len(instance_spot_asoc["asoc"]) != 0:
for asoc in instance_spot_asoc["asoc"]:
instance_record += "<extra_id_0> "
instance_record += asoc[0] + " "
instance_record += "<extra_id_5> "
instance_record += asoc[1] + " "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1>"
return instance_record
def create_noised_record(tokens, entity_list, triple_list, event_list):
entity_uri_mapping = create_entity_uri(entity_list)
triple_list = update_entity_uri_in_triple(triple_list, entity_uri_mapping)
all_trigger_list = build_trigger_list(event_list)
all_argument_list = build_argument_list(event_list)
noised_record_list = []
for _ in range(NOISE_NUM):
# noise entity
noised_entity_list = noise_entity_offset(entity_list, tokens)
noised_entity_list = noise_entity_with_other_entity(noised_entity_list)
noised_entity_list = noise_entity_type(noised_entity_list)
noised_entity_dict = build_entity_dict(noised_entity_list)
# noise triple
noised_triple_list = update_relation_triple_by_noised_entity(triple_list, noised_entity_dict)
noised_triple_list = noise_relation_type(noised_triple_list)
noised_triple_list = noise_triple_num(noised_triple_list, noised_entity_list)
# noise event
noised_event_list = noise_event_num(event_list, all_trigger_list)
noised_event_list = noise_trigger_type(noised_event_list, all_trigger_list)
noised_event_list = noise_trigger_with_other_trigger(noised_event_list, all_trigger_list)
noised_event_list = noise_trigger_offset(noised_event_list, tokens)
noised_event_list = noise_argument_num(noised_event_list, all_argument_list)
noised_event_list = noise_argument_type(noised_event_list, all_argument_list)
noised_event_list = noise_argument_with_other_argument(noised_event_list, all_argument_list)
noised_event_list = noise_argument_offset(noised_event_list, tokens)
# create noised record
noised_spot_asoc_list = create_spot_asoc_field(noised_entity_list, noised_triple_list, noised_event_list)
noised_record = create_record_field(noised_spot_asoc_list)
noised_record_list.append(noised_record)
# remove uir field
for entity in entity_list:
del entity["uri"]
for triple in triple_list:
head, tail = triple["args"]
del head["uri"]
del tail["uri"]
return noised_record_list | null |
164,793 | from collections import defaultdict
import os
from typing import List
def find_bracket_position(generated_text, _type_start, _type_end):
bracket_position = {_type_start: list(), _type_end: list()}
for index, char in enumerate(generated_text):
if char in bracket_position:
bracket_position[char] += [index]
return bracket_position | null |
164,794 | from collections import defaultdict
import os
from typing import List
def build_sentence_tree(sentence):
tree = defaultdict(set)
for prev_token, next_token in zip(sentence[:-1], sentence[1:]):
tree[prev_token].add(next_token)
return tree | null |
164,795 | from collections import defaultdict
import os
from typing import List
def generated_search_prefix_tree(generated, prefix_tree, tokenizer):
tree = prefix_tree
# Leaf is KEY_VALUE_SPLIT
for token in generated:
if token not in tree:
return [tokenizer.eos_token]
tree = tree[token]
return list(tree) | null |
164,796 | from collections import defaultdict
import os
from typing import List
def match_sublist(the_list, to_match):
"""
:param the_list: [1, 2, 3, 4, 5, 6, 1, 2, 4, 5]
:param to_match:
[1, 2]
:return:
[(0, 1), (6, 7)]
"""
len_to_match = len(to_match)
matched_list = list()
for index in range(len(the_list) - len_to_match + 1):
if to_match == the_list[index:index + len_to_match]:
matched_list += [(index, index + len_to_match - 1)]
return matched_list
def generated_search_src_sequence(generated, src_sequence, end_sequence_search_tokens=None):
if len(generated) == 0:
# All src tokens are valid before generation
return src_sequence
matched_tuples = match_sublist(the_list=src_sequence, to_match=generated)
valid_token = list()
for _, end in matched_tuples:
next_index = end + 1
if next_index < len(src_sequence):
valid_token += [src_sequence[next_index]]
if end_sequence_search_tokens:
valid_token += end_sequence_search_tokens
return valid_token | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.