id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
164,005
from zss import simple_distance, Node from space.utils.decorators import ignore_nodes def str_dist(a, b): if a == b: return 0 else: return 1
null
164,006
from zss import simple_distance, Node from space.utils.decorators import ignore_nodes The provided code snippet includes necessary dependencies for implementing the `reorder` function. Write a Python function `def reorder(root)` to solve the following problem: reorder Here is the function: def reorder(root): """ reorder """ def _reorder(node, new_node): children = Node.get_children(node) if children: children = sorted(children, key=lambda child: Node.get_label(child)) for child in children: new_node.addkid(Node(Node.get_label(child))) new_children = Node.get_children(new_node) for child, new_child in zip(children, new_children): _reorder(node=child, new_node=new_child) new_root = Node("root") _reorder(node=root, new_node=new_root) return new_root
reorder
164,007
from zss import simple_distance, Node from space.utils.decorators import ignore_nodes The provided code snippet includes necessary dependencies for implementing the `reorder_` function. Write a Python function `def reorder_(root)` to solve the following problem: reorder in place 但__str__函数无法修正 Here is the function: def reorder_(root): """ reorder in place 但__str__函数无法修正 """ def _reorder(node): children = Node.get_children(node) if children: children = sorted(children, key=lambda child: Node.get_label(child)) for child in children: _reorder(node=child) node.children = children _reorder(node=root)
reorder in place 但__str__函数无法修正
164,008
from zss import simple_distance, Node from space.utils.decorators import ignore_nodes def jaccard_dis_sim(x, y): """ Jaccard Distance Similarity """ res = len(set.intersection(*[set(x), set(y)])) union_cardinality = len(set.union(*[set(x), set(y)])) if union_cardinality: return res / float(union_cardinality), 1 else: return 0., 0 def clean_frame(frame): cleaned_frame = {} for domain, domain_frame in frame.items(): cleaned_frame[domain.strip().lower()] = clean_domain_frame(frame=domain_frame) return cleaned_frame def construct_frame_graph(frame): assert frame domain_nodes, act_nodes, slot_nodes, value_nodes = [], [], [], [] # 1-gram list (nodes) domain_act_edges, act_slot_edges, slot_value_edges = [], [], [] # 2-gram list (edges) domain_act_slot_paths, act_slot_value_paths = [], [] # 3-gram list (paths) domain_act_slot_value_paths = [] # 4-gram list (paths) domain_nodes.extend(list(frame.keys())) for domain, domain_frame in frame.items(): single_act_nodes, single_slot_nodes, single_value_nodes, single_act_slot_edges, single_slot_value_edges, \ single_act_slot_value_paths = construct_domain_frame_graph(frame=domain_frame) act_nodes.extend(single_act_nodes) slot_nodes.extend(single_slot_nodes) value_nodes.extend(single_value_nodes) act_slot_edges.extend(single_act_slot_edges) slot_value_edges.extend(single_slot_value_edges) act_slot_value_paths.extend(single_act_slot_value_paths) domain_act_edges.extend([f'{domain}-{act}' for act in single_act_nodes]) domain_act_slot_paths.extend([f'{domain}-{act_slot}' for act_slot in single_act_slot_edges]) domain_act_slot_value_paths.extend([f'{domain}-{act_slot_value}' for act_slot_value in single_act_slot_value_paths]) return domain_nodes, act_nodes, slot_nodes, value_nodes, domain_act_edges, act_slot_edges, slot_value_edges, \ domain_act_slot_paths, act_slot_value_paths, domain_act_slot_value_paths def hierarchical_set_score(frame1, frame2, return_all=False): # deal with empty frame if not (frame1 and frame2): return (0.,) * 10 if return_all else 0. # clean frame frame1 = clean_frame(frame=frame1) frame2 = clean_frame(frame=frame2) if frame1 == frame2 and not return_all: return 1. # construct frame graph domain_nodes1, act_nodes1, slot_nodes1, value_nodes1, domain_act_edges1, act_slot_edges1, slot_value_edges1, \ domain_act_slot_paths1, act_slot_value_paths1, domain_act_slot_value_paths1 = \ construct_frame_graph(frame=frame1) domain_nodes2, act_nodes2, slot_nodes2, value_nodes2, domain_act_edges2, act_slot_edges2, slot_value_edges2, \ domain_act_slot_paths2, act_slot_value_paths2, domain_act_slot_value_paths2 = \ construct_frame_graph(frame=frame2) # compute individual score domain_score = jaccard_dis_sim(domain_nodes1, domain_nodes2) act_score = jaccard_dis_sim(act_nodes1, act_nodes2) slot_score = jaccard_dis_sim(slot_nodes1, slot_nodes2) value_score = jaccard_dis_sim(value_nodes1, value_nodes2) domain_act_score = jaccard_dis_sim(domain_act_edges1, domain_act_edges2) act_slot_score = jaccard_dis_sim(act_slot_edges1, act_slot_edges2) slot_value_score = jaccard_dis_sim(slot_value_edges1, slot_value_edges2) domain_act_slot_score = jaccard_dis_sim(domain_act_slot_paths1, domain_act_slot_paths2) act_slot_value_score = jaccard_dis_sim(act_slot_value_paths1, act_slot_value_paths2) domain_act_slot_value_score = jaccard_dis_sim(domain_act_slot_value_paths1, domain_act_slot_value_paths2) if return_all: return domain_score[0], act_score[0], slot_score[0], value_score[0], domain_act_score[0], \ act_slot_score[0], slot_value_score[0], domain_act_slot_score[0], act_slot_value_score[0], \ domain_act_slot_value_score[0] else: # compute combined score score, num_score = 0., 0 for single_score in (domain_score, act_score, slot_score, value_score, domain_act_score, act_slot_score, slot_value_score, domain_act_slot_score, act_slot_value_score, domain_act_slot_value_score): score += single_score[0] num_score += single_score[1] score = score / num_score return score
null
164,009
import math, logging, copy, json from collections import Counter, OrderedDict import numpy as np from nltk.util import ngrams from sklearn.metrics import f1_score def DAEvaluate(preds, labels): preds = np.array(preds) labels = np.array(labels) results = {} # for avg_name in ['micro', 'macro', 'weighted', 'samples']: for avg_name in ['micro']: my_f1_score = f1_score(y_true=labels, y_pred=preds, average=avg_name) results["f1_{}".format(avg_name)] = my_f1_score return results
null
164,010
import json import re from tqdm import tqdm from utils_dst import (DSTExample, convert_to_unicode) LABEL_MAPS = {} def load_acts(input_file): with open(input_file) as f: acts = json.load(f) s_dict = {} for d in acts: for t in acts[d]: if int(t) % 2 == 0: continue # Only process, if turn has annotation if isinstance(acts[d][t]['dialog_act'], dict): for a in acts[d][t]['dialog_act']: aa = a.lower().split('-') if aa[1] == 'inform' or aa[1] == 'recommend' or aa[1] == 'select' or aa[1] == 'book': for i in acts[d][t]['dialog_act'][a]: s = i[0].lower() v = i[1].lower().strip() if s == 'none' or v == '?' or v == 'none': continue slot = aa[0] + '-' + s if slot in ACTS_DICT: slot = ACTS_DICT[slot] key = d, str(int(t) // 2 + 1), slot # In case of multiple mentioned values... # ... Option 1: Keep first informed value if key not in s_dict: s_dict[key] = list([v]) # ... Option 2: Keep last informed value #s_dict[key] = list([v]) return s_dict def normalize_label(slot, value_label): # Normalization of empty slots if value_label == '' or value_label == "not mentioned": return "none" # Normalization of time slots if "leaveAt" in slot or "arriveBy" in slot or slot == 'restaurant-book_time': return normalize_time(value_label) # Normalization if "type" in slot or "name" in slot or "destination" in slot or "departure" in slot: value_label = re.sub("guesthouse", "guest house", value_label) # Map to boolean slots if slot == 'hotel-parking' or slot == 'hotel-internet': if value_label == 'yes' or value_label == 'free': return "true" if value_label == "no": return "false" if slot == 'hotel-type': if value_label == "hotel": return "true" if value_label == "guest house": return "false" return value_label def delex_utt(utt, values, unk_token="[UNK]"): utt_norm = tokenize(utt) for s, vals in values.items(): for v in vals: if v != 'none': v_norm = tokenize(v) v_len = len(v_norm) for i in range(len(utt_norm) + 1 - v_len): if utt_norm[i:i + v_len] == v_norm: utt_norm[i:i + v_len] = [unk_token] * v_len return utt_norm def get_turn_label(value_label, inform_label, sys_utt_tok, usr_utt_tok, slot, seen_slots, slot_last_occurrence): usr_utt_tok_label = [0 for _ in usr_utt_tok] informed_value = 'none' referred_slot = 'none' if value_label == 'none' or value_label == 'dontcare' or value_label == 'true' or value_label == 'false': class_type = value_label else: in_usr, usr_pos = check_label_existence(value_label, usr_utt_tok) is_informed, informed_value = check_slot_inform(value_label, inform_label) if in_usr: class_type = 'copy_value' if slot_last_occurrence: (s, e) = usr_pos[-1] for i in range(s, e): usr_utt_tok_label[i] = 1 else: for (s, e) in usr_pos: for i in range(s, e): usr_utt_tok_label[i] = 1 elif is_informed: class_type = 'inform' else: referred_slot = check_slot_referral(value_label, slot, seen_slots) if referred_slot != 'none': class_type = 'refer' else: class_type = 'unpointable' return informed_value, referred_slot, usr_utt_tok_label, class_type def tokenize(utt): utt_lower = convert_to_unicode(utt).lower() utt_lower = normalize_text(utt_lower) utt_tok = [tok for tok in map(str.strip, re.split("(\W+)", utt_lower)) if len(tok) > 0] return utt_tok class DSTExample(object): """ A single training/test example for the DST dataset. """ def __init__(self, guid, text_a, text_b, audio_a, audio_b, history,text_a_label, text_b_label, history_label=None, values=None, inform_label=None, inform_slot_label=None, refer_label=None, diag_state=None, class_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.audio_a = audio_a self.audio_b = audio_b self.history = history self.text_a_label = text_a_label self.text_b_label = text_b_label self.history_label = history_label self.values = values self.inform_label = inform_label self.inform_slot_label = inform_slot_label self.refer_label = refer_label self.diag_state = diag_state self.class_label = class_label def __str__(self): return self.__repr__() def __repr__(self): s = '' for k, v in self.__dict__.items(): s += f'{k} : {v} \n' return s The provided code snippet includes necessary dependencies for implementing the `create_examples` function. Write a Python function `def create_examples(input_file, acts_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, unk_token="[UNK]", analyze=False)` to solve the following problem: Read a DST json file into a list of DSTExample. Here is the function: def create_examples(input_file, acts_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, unk_token="[UNK]", analyze=False): """Read a DST json file into a list of DSTExample.""" sys_inform_dict = load_acts(acts_file) with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader) global LABEL_MAPS LABEL_MAPS = label_maps examples = [] for dialog_id in tqdm(input_data): entry = input_data[dialog_id] utterances = entry['log'] # Collects all slot changes throughout the dialog cumulative_labels = {slot: 'none' for slot in slot_list} # First system utterance is empty, since multiwoz starts with user input utt_tok_list = [[]] mod_slots_list = [{}] # Collect all utterances and their metadata usr_sys_switch = True turn_itr = 0 for utt in utterances: # Assert that system and user utterances alternate is_sys_utt = utt['metadata'] != {} if usr_sys_switch == is_sys_utt: print("WARN: Wrong order of system and user utterances. Skipping rest of dialog %s" % (dialog_id)) break usr_sys_switch = is_sys_utt if is_sys_utt: turn_itr += 1 # Delexicalize sys utterance if delexicalize_sys_utts and is_sys_utt: inform_dict = {slot: 'none' for slot in slot_list} for slot in slot_list: if (str(dialog_id), str(turn_itr), slot) in sys_inform_dict: inform_dict[slot] = sys_inform_dict[(str(dialog_id), str(turn_itr), slot)] utt_tok_list.append(delex_utt(utt['text'], inform_dict, unk_token)) # normalize utterances else: utt_tok_list.append(tokenize(utt['text'])) # normalize utterances modified_slots = {} # If sys utt, extract metadata (identify and collect modified slots) if is_sys_utt: for d in utt['metadata']: booked = utt['metadata'][d]['book']['booked'] booked_slots = {} # Check the booked section if booked != []: for s in booked[0]: booked_slots[s] = normalize_label('%s-%s' % (d, s), booked[0][s]) # normalize labels # Check the semi and the inform slots for category in ['book', 'semi']: for s in utt['metadata'][d][category]: cs = '%s-book_%s' % (d, s) if category == 'book' else '%s-%s' % (d, s) value_label = normalize_label(cs, utt['metadata'][d][category][s]) # normalize labels # Prefer the slot value as stored in the booked section if s in booked_slots: value_label = booked_slots[s] # Remember modified slots and entire dialog state if cs in slot_list and cumulative_labels[cs] != value_label: modified_slots[cs] = value_label cumulative_labels[cs] = value_label mod_slots_list.append(modified_slots.copy()) # Form proper (usr, sys) turns turn_itr = 0 diag_seen_slots_dict = {} diag_seen_slots_value_dict = {slot: 'none' for slot in slot_list} diag_state = {slot: 'none' for slot in slot_list} sys_utt_tok = [] usr_utt_tok = [] hst_utt_tok = [] hst_utt_tok_label_dict = {slot: [] for slot in slot_list} for i in range(1, len(utt_tok_list) - 1, 2): sys_utt_tok_label_dict = {} usr_utt_tok_label_dict = {} value_dict = {} inform_dict = {} inform_slot_dict = {} referral_dict = {} class_type_dict = {} # Collect turn data if append_history: if swap_utterances: hst_utt_tok = usr_utt_tok + sys_utt_tok + hst_utt_tok else: hst_utt_tok = sys_utt_tok + usr_utt_tok + hst_utt_tok sys_utt_tok = utt_tok_list[i - 1] usr_utt_tok = utt_tok_list[i] turn_slots = mod_slots_list[i + 1] guid = '%s-%s-%s' % (set_type, str(dialog_id), str(turn_itr)) if analyze: print("%15s %2s %s ||| %s" % (dialog_id, turn_itr, ' '.join(sys_utt_tok), ' '.join(usr_utt_tok))) print("%15s %2s [" % (dialog_id, turn_itr), end='') new_hst_utt_tok_label_dict = hst_utt_tok_label_dict.copy() new_diag_state = diag_state.copy() for slot in slot_list: value_label = 'none' if slot in turn_slots: value_label = turn_slots[slot] # We keep the original labels so as to not # overlook unpointable values, as well as to not # modify any of the original labels for test sets, # since this would make comparison difficult. value_dict[slot] = value_label elif label_value_repetitions and slot in diag_seen_slots_dict: value_label = diag_seen_slots_value_dict[slot] # Get dialog act annotations inform_label = list(['none']) inform_slot_dict[slot] = 0 if (str(dialog_id), str(turn_itr), slot) in sys_inform_dict: inform_label = list([normalize_label(slot, i) for i in sys_inform_dict[(str(dialog_id), str(turn_itr), slot)]]) inform_slot_dict[slot] = 1 elif (str(dialog_id), str(turn_itr), 'booking-' + slot.split('-')[1]) in sys_inform_dict: inform_label = list([normalize_label(slot, i) for i in sys_inform_dict[(str(dialog_id), str(turn_itr), 'booking-' + slot.split('-')[1])]]) inform_slot_dict[slot] = 1 (informed_value, referred_slot, usr_utt_tok_label, class_type) = get_turn_label(value_label, inform_label, sys_utt_tok, usr_utt_tok, slot, diag_seen_slots_value_dict, slot_last_occurrence=True) inform_dict[slot] = informed_value # Generally don't use span prediction on sys utterance (but inform prediction instead). sys_utt_tok_label = [0 for _ in sys_utt_tok] # Determine what to do with value repetitions. # If value is unique in seen slots, then tag it, otherwise not, # since correct slot assignment can not be guaranteed anymore. if label_value_repetitions and slot in diag_seen_slots_dict: if class_type == 'copy_value' and list(diag_seen_slots_value_dict.values()).count(value_label) > 1: class_type = 'none' usr_utt_tok_label = [0 for _ in usr_utt_tok_label] sys_utt_tok_label_dict[slot] = sys_utt_tok_label usr_utt_tok_label_dict[slot] = usr_utt_tok_label if append_history: if use_history_labels: if swap_utterances: new_hst_utt_tok_label_dict[slot] = usr_utt_tok_label + sys_utt_tok_label + new_hst_utt_tok_label_dict[slot] else: new_hst_utt_tok_label_dict[slot] = sys_utt_tok_label + usr_utt_tok_label + new_hst_utt_tok_label_dict[slot] else: new_hst_utt_tok_label_dict[slot] = [0 for _ in sys_utt_tok_label + usr_utt_tok_label + new_hst_utt_tok_label_dict[slot]] # For now, we map all occurences of unpointable slot values # to none. However, since the labels will still suggest # a presence of unpointable slot values, the task of the # DST is still to find those values. It is just not # possible to do that via span prediction on the current input. if class_type == 'unpointable': class_type_dict[slot] = 'none' referral_dict[slot] = 'none' if analyze: if slot not in diag_seen_slots_dict or value_label != diag_seen_slots_value_dict[slot]: print("(%s): %s, " % (slot, value_label), end='') elif slot in diag_seen_slots_dict and class_type == diag_seen_slots_dict[slot] and class_type != 'copy_value' and class_type != 'inform': # If slot has seen before and its class type did not change, label this slot a not present, # assuming that the slot has not actually been mentioned in this turn. # Exceptions are copy_value and inform. If a seen slot has been tagged as copy_value or inform, # this must mean there is evidence in the original labels, therefore consider # them as mentioned again. class_type_dict[slot] = 'none' referral_dict[slot] = 'none' else: class_type_dict[slot] = class_type referral_dict[slot] = referred_slot # Remember that this slot was mentioned during this dialog already. if class_type != 'none': diag_seen_slots_dict[slot] = class_type diag_seen_slots_value_dict[slot] = value_label new_diag_state[slot] = class_type # Unpointable is not a valid class, therefore replace with # some valid class for now... if class_type == 'unpointable': new_diag_state[slot] = 'copy_value' if analyze: print("]") if swap_utterances: txt_a = usr_utt_tok txt_b = sys_utt_tok txt_a_lbl = usr_utt_tok_label_dict txt_b_lbl = sys_utt_tok_label_dict else: txt_a = sys_utt_tok txt_b = usr_utt_tok txt_a_lbl = sys_utt_tok_label_dict txt_b_lbl = usr_utt_tok_label_dict examples.append(DSTExample( guid=guid, text_a=txt_a, text_b=txt_b, history=hst_utt_tok, text_a_label=txt_a_lbl, text_b_label=txt_b_lbl, history_label=hst_utt_tok_label_dict, values=diag_seen_slots_value_dict.copy(), inform_label=inform_dict, inform_slot_label=inform_slot_dict, refer_label=referral_dict, diag_state=diag_state, class_label=class_type_dict)) # Update some variables. hst_utt_tok_label_dict = new_hst_utt_tok_label_dict.copy() diag_state = new_diag_state.copy() turn_itr += 1 if analyze: print("----------------------------------------------------------------------") return examples
Read a DST json file into a list of DSTExample.
164,011
import json import re from utils_dst import (DSTExample, convert_to_unicode) LABEL_MAPS = {} def load_acts(input_file): with open(input_file) as f: acts = json.load(f) s_dict = {} for d in acts: for t in acts[d]: # Only process, if turn has annotation if isinstance(acts[d][t], dict): for a in acts[d][t]: aa = a.lower().split('-') if aa[1] == 'inform' or aa[1] == 'recommend' or aa[1] == 'select' or aa[1] == 'book': for i in acts[d][t][a]: s = i[0].lower() v = i[1].lower().strip() if s == 'none' or v == '?' or v == 'none': continue slot = aa[0] + '-' + s if slot in ACTS_DICT: slot = ACTS_DICT[slot] key = d + '.json', t, slot # In case of multiple mentioned values... # ... Option 1: Keep first informed value if key not in s_dict: s_dict[key] = list([v]) # ... Option 2: Keep last informed value #s_dict[key] = list([v]) return s_dict def normalize_label(slot, value_label): # Normalization of empty slots if value_label == '' or value_label == "not mentioned": return "none" # Normalization of time slots if "leaveAt" in slot or "arriveBy" in slot or slot == 'restaurant-book_time': return normalize_time(value_label) # Normalization if "type" in slot or "name" in slot or "destination" in slot or "departure" in slot: value_label = re.sub("guesthouse", "guest house", value_label) # Map to boolean slots if slot == 'hotel-parking' or slot == 'hotel-internet': if value_label == 'yes' or value_label == 'free': return "true" if value_label == "no": return "false" if slot == 'hotel-type': if value_label == "hotel": return "true" if value_label == "guest house": return "false" return value_label def delex_utt(utt, values): utt_norm = tokenize(utt) for s, vals in values.items(): for v in vals: if v != 'none': v_norm = tokenize(v) v_len = len(v_norm) for i in range(len(utt_norm) + 1 - v_len): if utt_norm[i:i + v_len] == v_norm: utt_norm[i:i + v_len] = ['[UNK]'] * v_len return utt_norm def get_turn_label(value_label, inform_label, sys_utt_tok, usr_utt_tok, slot, seen_slots, slot_last_occurrence): usr_utt_tok_label = [0 for _ in usr_utt_tok] informed_value = 'none' referred_slot = 'none' if value_label == 'none' or value_label == 'dontcare' or value_label == 'true' or value_label == 'false': class_type = value_label else: in_usr, usr_pos = check_label_existence(value_label, usr_utt_tok) if in_usr: class_type = 'copy_value' if slot_last_occurrence: (s, e) = usr_pos[-1] for i in range(s, e): usr_utt_tok_label[i] = 1 else: for (s, e) in usr_pos: for i in range(s, e): usr_utt_tok_label[i] = 1 else: is_informed, informed_value = check_slot_inform(value_label, inform_label) if is_informed: class_type = 'inform' else: referred_slot = check_slot_referral(value_label, slot, seen_slots) if referred_slot != 'none': class_type = 'refer' else: class_type = 'unpointable' return informed_value, referred_slot, usr_utt_tok_label, class_type def tokenize(utt): utt_lower = convert_to_unicode(utt).lower() utt_lower = normalize_text(utt_lower) utt_tok = [tok for tok in map(str.strip, re.split("(\W+)", utt_lower)) if len(tok) > 0] return utt_tok class DSTExample(object): """ A single training/test example for the DST dataset. """ def __init__(self, guid, text_a, text_b, audio_a, audio_b, history,text_a_label, text_b_label, history_label=None, values=None, inform_label=None, inform_slot_label=None, refer_label=None, diag_state=None, class_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.audio_a = audio_a self.audio_b = audio_b self.history = history self.text_a_label = text_a_label self.text_b_label = text_b_label self.history_label = history_label self.values = values self.inform_label = inform_label self.inform_slot_label = inform_slot_label self.refer_label = refer_label self.diag_state = diag_state self.class_label = class_label def __str__(self): return self.__repr__() def __repr__(self): s = '' for k, v in self.__dict__.items(): s += f'{k} : {v} \n' return s The provided code snippet includes necessary dependencies for implementing the `create_examples` function. Write a Python function `def create_examples(input_file, acts_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, analyze=False)` to solve the following problem: Read a DST json file into a list of DSTExample. Here is the function: def create_examples(input_file, acts_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, analyze=False): """Read a DST json file into a list of DSTExample.""" sys_inform_dict = load_acts(acts_file) with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader) global LABEL_MAPS LABEL_MAPS = label_maps examples = [] for dialog_id in input_data: entry = input_data[dialog_id] utterances = entry['log'] # Collects all slot changes throughout the dialog cumulative_labels = {slot: 'none' for slot in slot_list} # First system utterance is empty, since multiwoz starts with user input utt_tok_list = [[]] mod_slots_list = [{}] # Collect all utterances and their metadata usr_sys_switch = True turn_itr = 0 for utt in utterances: # Assert that system and user utterances alternate is_sys_utt = utt['metadata'] != {} if usr_sys_switch == is_sys_utt: print("WARN: Wrong order of system and user utterances. Skipping rest of dialog %s" % (dialog_id)) break usr_sys_switch = is_sys_utt if is_sys_utt: turn_itr += 1 # Delexicalize sys utterance if delexicalize_sys_utts and is_sys_utt: inform_dict = {slot: 'none' for slot in slot_list} for slot in slot_list: if (str(dialog_id), str(turn_itr), slot) in sys_inform_dict: inform_dict[slot] = sys_inform_dict[(str(dialog_id), str(turn_itr), slot)] utt_tok_list.append(delex_utt(utt['text'], inform_dict)) # normalize utterances else: utt_tok_list.append(tokenize(utt['text'])) # normalize utterances modified_slots = {} # If sys utt, extract metadata (identify and collect modified slots) if is_sys_utt: for d in utt['metadata']: booked = utt['metadata'][d]['book']['booked'] booked_slots = {} # Check the booked section if booked != []: for s in booked[0]: booked_slots[s] = normalize_label('%s-%s' % (d, s), booked[0][s]) # normalize labels # Check the semi and the inform slots for category in ['book', 'semi']: for s in utt['metadata'][d][category]: cs = '%s-book_%s' % (d, s) if category == 'book' else '%s-%s' % (d, s) value_label = normalize_label(cs, utt['metadata'][d][category][s]) # normalize labels # Prefer the slot value as stored in the booked section if s in booked_slots: value_label = booked_slots[s] # Remember modified slots and entire dialog state if cs in slot_list and cumulative_labels[cs] != value_label: modified_slots[cs] = value_label cumulative_labels[cs] = value_label mod_slots_list.append(modified_slots.copy()) # Form proper (usr, sys) turns turn_itr = 0 diag_seen_slots_dict = {} diag_seen_slots_value_dict = {slot: 'none' for slot in slot_list} diag_state = {slot: 'none' for slot in slot_list} sys_utt_tok = [] usr_utt_tok = [] hst_utt_tok = [] hst_utt_tok_label_dict = {slot: [] for slot in slot_list} for i in range(1, len(utt_tok_list) - 1, 2): sys_utt_tok_label_dict = {} usr_utt_tok_label_dict = {} value_dict = {} inform_dict = {} inform_slot_dict = {} referral_dict = {} class_type_dict = {} # Collect turn data if append_history: if swap_utterances: hst_utt_tok = usr_utt_tok + sys_utt_tok + hst_utt_tok else: hst_utt_tok = sys_utt_tok + usr_utt_tok + hst_utt_tok sys_utt_tok = utt_tok_list[i - 1] usr_utt_tok = utt_tok_list[i] turn_slots = mod_slots_list[i + 1] guid = '%s-%s-%s' % (set_type, str(dialog_id), str(turn_itr)) if analyze: print("%15s %2s %s ||| %s" % (dialog_id, turn_itr, ' '.join(sys_utt_tok), ' '.join(usr_utt_tok))) print("%15s %2s [" % (dialog_id, turn_itr), end='') new_hst_utt_tok_label_dict = hst_utt_tok_label_dict.copy() new_diag_state = diag_state.copy() for slot in slot_list: value_label = 'none' if slot in turn_slots: value_label = turn_slots[slot] # We keep the original labels so as to not # overlook unpointable values, as well as to not # modify any of the original labels for test sets, # since this would make comparison difficult. value_dict[slot] = value_label elif label_value_repetitions and slot in diag_seen_slots_dict: value_label = diag_seen_slots_value_dict[slot] # Get dialog act annotations inform_label = list(['none']) if (str(dialog_id), str(turn_itr), slot) in sys_inform_dict: inform_label = list([normalize_label(slot, i) for i in sys_inform_dict[(str(dialog_id), str(turn_itr), slot)]]) elif (str(dialog_id), str(turn_itr), 'booking-' + slot.split('-')[1]) in sys_inform_dict: inform_label = list([normalize_label(slot, i) for i in sys_inform_dict[(str(dialog_id), str(turn_itr), 'booking-' + slot.split('-')[1])]]) (informed_value, referred_slot, usr_utt_tok_label, class_type) = get_turn_label(value_label, inform_label, sys_utt_tok, usr_utt_tok, slot, diag_seen_slots_value_dict, slot_last_occurrence=True) inform_dict[slot] = informed_value if informed_value != 'none': inform_slot_dict[slot] = 1 else: inform_slot_dict[slot] = 0 # Generally don't use span prediction on sys utterance (but inform prediction instead). sys_utt_tok_label = [0 for _ in sys_utt_tok] # Determine what to do with value repetitions. # If value is unique in seen slots, then tag it, otherwise not, # since correct slot assignment can not be guaranteed anymore. if label_value_repetitions and slot in diag_seen_slots_dict: if class_type == 'copy_value' and list(diag_seen_slots_value_dict.values()).count(value_label) > 1: class_type = 'none' usr_utt_tok_label = [0 for _ in usr_utt_tok_label] sys_utt_tok_label_dict[slot] = sys_utt_tok_label usr_utt_tok_label_dict[slot] = usr_utt_tok_label if append_history: if use_history_labels: if swap_utterances: new_hst_utt_tok_label_dict[slot] = usr_utt_tok_label + sys_utt_tok_label + new_hst_utt_tok_label_dict[slot] else: new_hst_utt_tok_label_dict[slot] = sys_utt_tok_label + usr_utt_tok_label + new_hst_utt_tok_label_dict[slot] else: new_hst_utt_tok_label_dict[slot] = [0 for _ in sys_utt_tok_label + usr_utt_tok_label + new_hst_utt_tok_label_dict[slot]] # For now, we map all occurences of unpointable slot values # to none. However, since the labels will still suggest # a presence of unpointable slot values, the task of the # DST is still to find those values. It is just not # possible to do that via span prediction on the current input. if class_type == 'unpointable': class_type_dict[slot] = 'none' referral_dict[slot] = 'none' if analyze: if slot not in diag_seen_slots_dict or value_label != diag_seen_slots_value_dict[slot]: print("(%s): %s, " % (slot, value_label), end='') elif slot in diag_seen_slots_dict and class_type == diag_seen_slots_dict[slot] and class_type != 'copy_value' and class_type != 'inform': # If slot has seen before and its class type did not change, label this slot a not present, # assuming that the slot has not actually been mentioned in this turn. # Exceptions are copy_value and inform. If a seen slot has been tagged as copy_value or inform, # this must mean there is evidence in the original labels, therefore consider # them as mentioned again. class_type_dict[slot] = 'none' referral_dict[slot] = 'none' else: class_type_dict[slot] = class_type referral_dict[slot] = referred_slot # Remember that this slot was mentioned during this dialog already. if class_type != 'none': diag_seen_slots_dict[slot] = class_type diag_seen_slots_value_dict[slot] = value_label new_diag_state[slot] = class_type # Unpointable is not a valid class, therefore replace with # some valid class for now... if class_type == 'unpointable': new_diag_state[slot] = 'copy_value' if analyze: print("]") if swap_utterances: txt_a = usr_utt_tok txt_b = sys_utt_tok txt_a_lbl = usr_utt_tok_label_dict txt_b_lbl = sys_utt_tok_label_dict else: txt_a = sys_utt_tok txt_b = usr_utt_tok txt_a_lbl = sys_utt_tok_label_dict txt_b_lbl = usr_utt_tok_label_dict examples.append(DSTExample( guid=guid, text_a=txt_a, text_b=txt_b, history=hst_utt_tok, text_a_label=txt_a_lbl, text_b_label=txt_b_lbl, history_label=hst_utt_tok_label_dict, values=diag_seen_slots_value_dict.copy(), inform_label=inform_dict, inform_slot_label=inform_slot_dict, refer_label=referral_dict, diag_state=diag_state, class_label=class_type_dict)) # Update some variables. hst_utt_tok_label_dict = new_hst_utt_tok_label_dict.copy() diag_state = new_diag_state.copy() turn_itr += 1 if analyze: print("----------------------------------------------------------------------") return examples
Read a DST json file into a list of DSTExample.
164,012
import glob import json import sys import numpy as np import re def load_dataset_config(dataset_config): with open(dataset_config, "r", encoding='utf-8') as f: raw_config = json.load(f) return raw_config['class_types'], raw_config['slots'], raw_config['label_maps']
null
164,013
import glob import json import sys import numpy as np import re def tokenize(text): if "\u0120" in text: text = re.sub(" ", "", text) text = re.sub("\u0120", " ", text) text = text.strip() return ' '.join([tok for tok in map(str.strip, re.split("(\W+)", text)) if len(tok) > 0]) def check_slot_inform(value_label, inform_label, label_maps): value = inform_label if value_label == inform_label: value = value_label elif is_in_list(inform_label, value_label): value = value_label elif is_in_list(value_label, inform_label): value = value_label elif inform_label in label_maps: for inform_label_variant in label_maps[inform_label]: if value_label == inform_label_variant: value = value_label break elif is_in_list(inform_label_variant, value_label): value = value_label break elif is_in_list(value_label, inform_label_variant): value = value_label break elif value_label in label_maps: for value_label_variant in label_maps[value_label]: if value_label_variant == inform_label: value = value_label break elif is_in_list(inform_label, value_label_variant): value = value_label break elif is_in_list(value_label_variant, inform_label): value = value_label break return value def get_joint_slot_correctness(fp, class_types, label_maps, key_class_label_id='class_label_id', key_class_prediction='class_prediction', key_start_pos='start_pos', key_start_prediction='start_prediction', key_end_pos='end_pos', key_end_prediction='end_prediction', key_refer_id='refer_id', key_refer_prediction='refer_prediction', key_slot_groundtruth='slot_groundtruth', key_slot_prediction='slot_prediction'): with open(fp) as f: preds = json.load(f) class_correctness = [[] for cl in range(len(class_types) + 1)] confusion_matrix = [[[] for cl_b in range(len(class_types))] for cl_a in range(len(class_types))] pos_correctness = [] refer_correctness = [] val_correctness = [] total_correctness = [] c_tp = {ct: 0 for ct in range(len(class_types))} c_tn = {ct: 0 for ct in range(len(class_types))} c_fp = {ct: 0 for ct in range(len(class_types))} c_fn = {ct: 0 for ct in range(len(class_types))} for pred in preds: guid = pred['guid'] # List: set_type, dialogue_idx, turn_idx turn_gt_class = pred[key_class_label_id] turn_pd_class = pred[key_class_prediction] gt_start_pos = pred[key_start_pos] pd_start_pos = pred[key_start_prediction] gt_end_pos = pred[key_end_pos] pd_end_pos = pred[key_end_prediction] gt_refer = pred[key_refer_id] pd_refer = pred[key_refer_prediction] gt_slot = pred[key_slot_groundtruth] pd_slot = pred[key_slot_prediction] gt_slot = tokenize(gt_slot) pd_slot = tokenize(pd_slot) # Make sure the true turn labels are contained in the prediction json file! joint_gt_slot = gt_slot if guid[-1] == '0': # First turn, reset the slots joint_pd_slot = 'none' # If turn_pd_class or a value to be copied is "none", do not update the dialog state. if turn_pd_class == class_types.index('none'): pass elif turn_pd_class == class_types.index('dontcare'): joint_pd_slot = 'dontcare' elif turn_pd_class == class_types.index('copy_value'): joint_pd_slot = pd_slot elif 'true' in class_types and turn_pd_class == class_types.index('true'): joint_pd_slot = 'true' elif 'false' in class_types and turn_pd_class == class_types.index('false'): joint_pd_slot = 'false' elif 'refer' in class_types and turn_pd_class == class_types.index('refer'): if pd_slot[0:3] == "§§ ": if pd_slot[3:] != 'none': joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps) elif pd_slot[0:2] == "§§": if pd_slot[2:] != 'none': joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps) elif pd_slot != 'none': joint_pd_slot = pd_slot elif 'inform' in class_types and turn_pd_class == class_types.index('inform'): if pd_slot[0:3] == "§§ ": if pd_slot[3:] != 'none': joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps) elif pd_slot[0:2] == "§§": if pd_slot[2:] != 'none': joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps) else: print("ERROR: Unexpected slot value format. Aborting.") exit() else: print("ERROR: Unexpected class_type. Aborting.") exit() total_correct = True # Check the per turn correctness of the class_type prediction if turn_gt_class == turn_pd_class: class_correctness[turn_gt_class].append(1.0) class_correctness[-1].append(1.0) c_tp[turn_gt_class] += 1 for cc in range(len(class_types)): if cc != turn_gt_class: c_tn[cc] += 1 # Only where there is a span, we check its per turn correctness if turn_gt_class == class_types.index('copy_value'): if gt_start_pos == pd_start_pos and gt_end_pos == pd_end_pos: pos_correctness.append(1.0) else: pos_correctness.append(0.0) # Only where there is a referral, we check its per turn correctness if 'refer' in class_types and turn_gt_class == class_types.index('refer'): if gt_refer == pd_refer: refer_correctness.append(1.0) print(" [%s] Correct referral: %s | %s" % (guid, gt_refer, pd_refer)) else: refer_correctness.append(0.0) print(" [%s] Incorrect referral: %s | %s" % (guid, gt_refer, pd_refer)) else: if turn_gt_class == class_types.index('copy_value'): pos_correctness.append(0.0) if 'refer' in class_types and turn_gt_class == class_types.index('refer'): refer_correctness.append(0.0) class_correctness[turn_gt_class].append(0.0) class_correctness[-1].append(0.0) confusion_matrix[turn_gt_class][turn_pd_class].append(1.0) c_fn[turn_gt_class] += 1 c_fp[turn_pd_class] += 1 # Check the joint slot correctness. # If the value label is not none, then we need to have a value prediction. # Even if the class_type is 'none', there can still be a value label, # it might just not be pointable in the current turn. It might however # be referrable and thus predicted correctly. if joint_gt_slot == joint_pd_slot: val_correctness.append(1.0) elif joint_gt_slot != 'none' and joint_gt_slot != 'dontcare' and joint_gt_slot != 'true' and joint_gt_slot != 'false' and joint_gt_slot in label_maps: no_match = True for variant in label_maps[joint_gt_slot]: if variant == joint_pd_slot: no_match = False break if no_match: val_correctness.append(0.0) total_correct = False print(" [%s] Incorrect value (variant): %s (turn class: %s) | %s (turn class: %s)" % (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class)) else: val_correctness.append(1.0) else: val_correctness.append(0.0) total_correct = False print(" [%s] Incorrect value: %s (turn class: %s) | %s (turn class: %s)" % (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class)) total_correctness.append(1.0 if total_correct else 0.0) # Account for empty lists (due to no instances of spans or referrals being seen) if pos_correctness == []: pos_correctness.append(1.0) if refer_correctness == []: refer_correctness.append(1.0) for ct in range(len(class_types)): if c_tp[ct] + c_fp[ct] > 0: precision = c_tp[ct] / (c_tp[ct] + c_fp[ct]) else: precision = 1.0 if c_tp[ct] + c_fn[ct] > 0: recall = c_tp[ct] / (c_tp[ct] + c_fn[ct]) else: recall = 1.0 if precision + recall > 0: f1 = 2 * ((precision * recall) / (precision + recall)) else: f1 = 1.0 if c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct] > 0: acc = (c_tp[ct] + c_tn[ct]) / (c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct]) else: acc = 1.0 print("Performance for class '%s' (%s): Recall: %.2f (%d of %d), Precision: %.2f, F1: %.2f, Accuracy: %.2f (TP/TN/FP/FN: %d/%d/%d/%d)" % (class_types[ct], ct, recall, np.sum(class_correctness[ct]), len(class_correctness[ct]), precision, f1, acc, c_tp[ct], c_tn[ct], c_fp[ct], c_fn[ct])) print("Confusion matrix:") for cl in range(len(class_types)): print(" %s" % (cl), end="") print("") for cl_a in range(len(class_types)): print("%s " % (cl_a), end="") for cl_b in range(len(class_types)): if len(class_correctness[cl_a]) > 0: print("%.2f " % (np.sum(confusion_matrix[cl_a][cl_b]) / len(class_correctness[cl_a])), end="") else: print("---- ", end="") print("") return np.asarray(total_correctness), np.asarray(val_correctness), np.asarray(class_correctness), np.asarray(pos_correctness), np.asarray(refer_correctness), np.asarray(confusion_matrix), c_tp, c_tn, c_fp, c_fn
null
164,014
import json import re from utils_dst import (DSTExample, convert_to_unicode) LABEL_MAPS = {} LABEL_FIX = {'centre': 'center', 'areas': 'area', 'phone number': 'number', 'price range': 'price_range'} def delex_utt(utt, values): utt_norm = utt.copy() for s, v in values.items(): if v != 'none': v_norm = tokenize(v) v_len = len(v_norm) for i in range(len(utt_norm) + 1 - v_len): if utt_norm[i:i + v_len] == v_norm: utt_norm[i:i + v_len] = ['[UNK]'] * v_len return utt_norm def check_label_existence(label, usr_utt_tok, sys_utt_tok): in_usr, usr_pos = get_token_pos(usr_utt_tok, label) if not in_usr and label in LABEL_MAPS: for tmp_label in LABEL_MAPS[label]: in_usr, usr_pos = get_token_pos(usr_utt_tok, tmp_label) if in_usr: break in_sys, sys_pos = get_token_pos(sys_utt_tok, label) if not in_sys and label in LABEL_MAPS: for tmp_label in LABEL_MAPS[label]: in_sys, sys_pos = get_token_pos(sys_utt_tok, tmp_label) if in_sys: break return in_usr, usr_pos, in_sys, sys_pos def get_turn_label(label, sys_utt_tok, usr_utt_tok, slot_last_occurrence): usr_utt_tok_label = [0 for _ in usr_utt_tok] if label == 'none' or label == 'dontcare': class_type = label else: in_usr, usr_pos, in_sys, _ = check_label_existence(label, usr_utt_tok, sys_utt_tok) if in_usr: class_type = 'copy_value' if slot_last_occurrence: (s, e) = usr_pos[-1] for i in range(s, e): usr_utt_tok_label[i] = 1 else: for (s, e) in usr_pos: for i in range(s, e): usr_utt_tok_label[i] = 1 elif in_sys: class_type = 'inform' else: class_type = 'unpointable' return usr_utt_tok_label, class_type def tokenize(utt): utt_lower = convert_to_unicode(utt).lower() utt_tok = [tok for tok in map(str.strip, re.split("(\W+)", utt_lower)) if len(tok) > 0] return utt_tok class DSTExample(object): """ A single training/test example for the DST dataset. """ def __init__(self, guid, text_a, text_b, audio_a, audio_b, history,text_a_label, text_b_label, history_label=None, values=None, inform_label=None, inform_slot_label=None, refer_label=None, diag_state=None, class_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.audio_a = audio_a self.audio_b = audio_b self.history = history self.text_a_label = text_a_label self.text_b_label = text_b_label self.history_label = history_label self.values = values self.inform_label = inform_label self.inform_slot_label = inform_slot_label self.refer_label = refer_label self.diag_state = diag_state self.class_label = class_label def __str__(self): return self.__repr__() def __repr__(self): s = '' for k, v in self.__dict__.items(): s += f'{k} : {v} \n' return s The provided code snippet includes necessary dependencies for implementing the `create_examples` function. Write a Python function `def create_examples(input_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, analyze=False)` to solve the following problem: Read a DST json file into a list of DSTExample. Here is the function: def create_examples(input_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, analyze=False): """Read a DST json file into a list of DSTExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader) global LABEL_MAPS LABEL_MAPS = label_maps examples = [] for entry in input_data: diag_seen_slots_dict = {} diag_seen_slots_value_dict = {slot: 'none' for slot in slot_list} diag_state = {slot: 'none' for slot in slot_list} sys_utt_tok = [] sys_utt_tok_delex = [] usr_utt_tok = [] hst_utt_tok = [] hst_utt_tok_label_dict = {slot: [] for slot in slot_list} for turn in entry['dialogue']: sys_utt_tok_label_dict = {} usr_utt_tok_label_dict = {} inform_dict = {slot: 'none' for slot in slot_list} inform_slot_dict = {slot: 0 for slot in slot_list} referral_dict = {} class_type_dict = {} # Collect turn data if append_history: if swap_utterances: if delexicalize_sys_utts: hst_utt_tok = usr_utt_tok + sys_utt_tok_delex + hst_utt_tok else: hst_utt_tok = usr_utt_tok + sys_utt_tok + hst_utt_tok else: if delexicalize_sys_utts: hst_utt_tok = sys_utt_tok_delex + usr_utt_tok + hst_utt_tok else: hst_utt_tok = sys_utt_tok + usr_utt_tok + hst_utt_tok sys_utt_tok = tokenize(turn['system_transcript']) usr_utt_tok = tokenize(turn['transcript']) turn_label = {LABEL_FIX.get(s.strip(), s.strip()): LABEL_FIX.get(v.strip(), v.strip()) for s, v in turn['turn_label']} guid = '%s-%s-%s' % (set_type, str(entry['dialogue_idx']), str(turn['turn_idx'])) # Create delexicalized sys utterances. if delexicalize_sys_utts: delex_dict = {} for slot in slot_list: delex_dict[slot] = 'none' label = 'none' if slot in turn_label: label = turn_label[slot] elif label_value_repetitions and slot in diag_seen_slots_dict: label = diag_seen_slots_value_dict[slot] if label != 'none' and label != 'dontcare': _, _, in_sys, _ = check_label_existence(label, usr_utt_tok, sys_utt_tok) if in_sys: delex_dict[slot] = label sys_utt_tok_delex = delex_utt(sys_utt_tok, delex_dict) new_hst_utt_tok_label_dict = hst_utt_tok_label_dict.copy() new_diag_state = diag_state.copy() for slot in slot_list: label = 'none' if slot in turn_label: label = turn_label[slot] elif label_value_repetitions and slot in diag_seen_slots_dict: label = diag_seen_slots_value_dict[slot] (usr_utt_tok_label, class_type) = get_turn_label(label, sys_utt_tok, usr_utt_tok, slot_last_occurrence=True) if class_type == 'inform': inform_dict[slot] = label if label != 'none': inform_slot_dict[slot] = 1 referral_dict[slot] = 'none' # Referral is not present in woz2 data # Generally don't use span prediction on sys utterance (but inform prediction instead). if delexicalize_sys_utts: sys_utt_tok_label = [0 for _ in sys_utt_tok_delex] else: sys_utt_tok_label = [0 for _ in sys_utt_tok] # Determine what to do with value repetitions. # If value is unique in seen slots, then tag it, otherwise not, # since correct slot assignment can not be guaranteed anymore. if label_value_repetitions and slot in diag_seen_slots_dict: if class_type == 'copy_value' and list(diag_seen_slots_value_dict.values()).count(label) > 1: class_type = 'none' usr_utt_tok_label = [0 for _ in usr_utt_tok_label] sys_utt_tok_label_dict[slot] = sys_utt_tok_label usr_utt_tok_label_dict[slot] = usr_utt_tok_label if append_history: if use_history_labels: if swap_utterances: new_hst_utt_tok_label_dict[slot] = usr_utt_tok_label + sys_utt_tok_label + new_hst_utt_tok_label_dict[slot] else: new_hst_utt_tok_label_dict[slot] = sys_utt_tok_label + usr_utt_tok_label + new_hst_utt_tok_label_dict[slot] else: new_hst_utt_tok_label_dict[slot] = [0 for _ in sys_utt_tok_label + usr_utt_tok_label + new_hst_utt_tok_label_dict[slot]] # For now, we map all occurences of unpointable slot values # to none. However, since the labels will still suggest # a presence of unpointable slot values, the task of the # DST is still to find those values. It is just not # possible to do that via span prediction on the current input. if class_type == 'unpointable': class_type_dict[slot] = 'none' elif slot in diag_seen_slots_dict and class_type == diag_seen_slots_dict[slot] and class_type != 'copy_value' and class_type != 'inform': # If slot has seen before and its class type did not change, label this slot a not present, # assuming that the slot has not actually been mentioned in this turn. # Exceptions are copy_value and inform. If a seen slot has been tagged as copy_value or inform, # this must mean there is evidence in the original labels, therefore consider # them as mentioned again. class_type_dict[slot] = 'none' referral_dict[slot] = 'none' else: class_type_dict[slot] = class_type # Remember that this slot was mentioned during this dialog already. if class_type != 'none': diag_seen_slots_dict[slot] = class_type diag_seen_slots_value_dict[slot] = label new_diag_state[slot] = class_type # Unpointable is not a valid class, therefore replace with # some valid class for now... if class_type == 'unpointable': new_diag_state[slot] = 'copy_value' if swap_utterances: txt_a = usr_utt_tok if delexicalize_sys_utts: txt_b = sys_utt_tok_delex else: txt_b = sys_utt_tok txt_a_lbl = usr_utt_tok_label_dict txt_b_lbl = sys_utt_tok_label_dict else: if delexicalize_sys_utts: txt_a = sys_utt_tok_delex else: txt_a = sys_utt_tok txt_b = usr_utt_tok txt_a_lbl = sys_utt_tok_label_dict txt_b_lbl = usr_utt_tok_label_dict examples.append(DSTExample( guid=guid, text_a=txt_a, text_b=txt_b, history=hst_utt_tok, text_a_label=txt_a_lbl, text_b_label=txt_b_lbl, history_label=hst_utt_tok_label_dict, values=diag_seen_slots_value_dict.copy(), inform_label=inform_dict, inform_slot_label=inform_slot_dict, refer_label=referral_dict, diag_state=diag_state, class_label=class_type_dict)) # Update some variables. hst_utt_tok_label_dict = new_hst_utt_tok_label_dict.copy() diag_state = new_diag_state.copy() return examples
Read a DST json file into a list of DSTExample.
164,015
import json from utils_dst import (DSTExample) def get_turn_label(turn, prev_dialogue_state, slot_list, dial_id, turn_id, delexicalize_sys_utts=False, slot_last_occurrence=True): """Make turn_label a dictionary of slot with value positions or being dontcare / none: Turn label contains: (1) the updates from previous to current dialogue state, (2) values in current dialogue state explicitly mentioned in system or user utterance.""" prev_ds_dict = dialogue_state_to_sv_dict(prev_dialogue_state) cur_ds_dict = dialogue_state_to_sv_dict(turn['dialogue_state']) (sys_utt_tok, sys_slot_label, usr_utt_tok, usr_slot_label) = get_token_and_slot_label(turn) sys_utt_tok_label_dict = {} usr_utt_tok_label_dict = {} inform_label_dict = {} inform_slot_label_dict = {} referral_label_dict = {} class_type_dict = {} for slot_type in slot_list: inform_label_dict[slot_type] = 'none' inform_slot_label_dict[slot_type] = 0 referral_label_dict[slot_type] = 'none' # Referral is not present in sim data sys_utt_tok_label, usr_utt_tok_label, class_type = get_tok_label( prev_ds_dict, cur_ds_dict, slot_type, sys_utt_tok, sys_slot_label, usr_utt_tok, usr_slot_label, dial_id, turn_id, slot_last_occurrence=slot_last_occurrence) if sum(sys_utt_tok_label) > 0: inform_label_dict[slot_type] = cur_ds_dict[slot_type] inform_slot_label_dict[slot_type] = 1 sys_utt_tok_label = [0 for _ in sys_utt_tok_label] # Don't use token labels for sys utt sys_utt_tok_label_dict[slot_type] = sys_utt_tok_label usr_utt_tok_label_dict[slot_type] = usr_utt_tok_label class_type_dict[slot_type] = class_type if delexicalize_sys_utts: sys_utt_tok = delex_utt(sys_utt_tok, sys_slot_label) return (sys_utt_tok, sys_utt_tok_label_dict, usr_utt_tok, usr_utt_tok_label_dict, inform_label_dict, inform_slot_label_dict, referral_label_dict, cur_ds_dict, class_type_dict) class DSTExample(object): """ A single training/test example for the DST dataset. """ def __init__(self, guid, text_a, text_b, audio_a, audio_b, history,text_a_label, text_b_label, history_label=None, values=None, inform_label=None, inform_slot_label=None, refer_label=None, diag_state=None, class_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.audio_a = audio_a self.audio_b = audio_b self.history = history self.text_a_label = text_a_label self.text_b_label = text_b_label self.history_label = history_label self.values = values self.inform_label = inform_label self.inform_slot_label = inform_slot_label self.refer_label = refer_label self.diag_state = diag_state self.class_label = class_label def __str__(self): return self.__repr__() def __repr__(self): s = '' for k, v in self.__dict__.items(): s += f'{k} : {v} \n' return s The provided code snippet includes necessary dependencies for implementing the `create_examples` function. Write a Python function `def create_examples(input_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, analyze=False)` to solve the following problem: Read a DST json file into a list of DSTExample. Here is the function: def create_examples(input_file, set_type, slot_list, label_maps={}, append_history=False, use_history_labels=False, swap_utterances=False, label_value_repetitions=False, delexicalize_sys_utts=False, analyze=False): """Read a DST json file into a list of DSTExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader) examples = [] for entry in input_data: dial_id = entry['dialogue_id'] prev_ds = [] hst = [] prev_hst_lbl_dict = {slot: [] for slot in slot_list} prev_ds_lbl_dict = {slot: 'none' for slot in slot_list} for turn_id, turn in enumerate(entry['turns']): guid = '%s-%s-%s' % (set_type, dial_id, str(turn_id)) ds_lbl_dict = prev_ds_lbl_dict.copy() hst_lbl_dict = prev_hst_lbl_dict.copy() (text_a, text_a_label, text_b, text_b_label, inform_label, inform_slot_label, referral_label, cur_ds_dict, class_label) = get_turn_label(turn, prev_ds, slot_list, dial_id, turn_id, delexicalize_sys_utts=delexicalize_sys_utts, slot_last_occurrence=True) if swap_utterances: txt_a = text_b txt_b = text_a txt_a_lbl = text_b_label txt_b_lbl = text_a_label else: txt_a = text_a txt_b = text_b txt_a_lbl = text_a_label txt_b_lbl = text_b_label value_dict = {} for slot in slot_list: if slot in cur_ds_dict: value_dict[slot] = cur_ds_dict[slot] else: value_dict[slot] = 'none' if class_label[slot] != 'none': ds_lbl_dict[slot] = class_label[slot] if append_history: if use_history_labels: hst_lbl_dict[slot] = txt_a_lbl[slot] + txt_b_lbl[slot] + hst_lbl_dict[slot] else: hst_lbl_dict[slot] = [0 for _ in txt_a_lbl[slot] + txt_b_lbl[slot] + hst_lbl_dict[slot]] examples.append(DSTExample( guid=guid, text_a=txt_a, text_b=txt_b, history=hst, text_a_label=txt_a_lbl, text_b_label=txt_b_lbl, history_label=prev_hst_lbl_dict, values=value_dict, inform_label=inform_label, inform_slot_label=inform_slot_label, refer_label=referral_label, diag_state=prev_ds_lbl_dict, class_label=class_label)) prev_ds = turn['dialogue_state'] prev_ds_lbl_dict = ds_lbl_dict.copy() prev_hst_lbl_dict = hst_lbl_dict.copy() if append_history: hst = txt_a + txt_b + hst return examples
Read a DST json file into a list of DSTExample.
164,016
import argparse import logging import os import random import glob import json import math import re import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer) from transformers import AdamW from transformers import get_linear_schedule_with_warmup from bert_models import BertPretrain from modeling_bert_dst import (BertForDST) from data_processors import PROCESSORS from utils_dst import (convert_examples_to_features) from tensorlistdataset import (TensorListDataset) def to_list(tensor): return tensor.detach().cpu().tolist()
null
164,017
import argparse import logging import os import random import glob import json import math import re import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer) from transformers import AdamW from transformers import get_linear_schedule_with_warmup from bert_models import BertPretrain from modeling_bert_dst import (BertForDST) from data_processors import PROCESSORS from utils_dst import (convert_examples_to_features) from tensorlistdataset import (TensorListDataset) logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def batch_to_device(batch, device): batch_on_device = [] for element in batch: if isinstance(element, dict): batch_on_device.append({k: v.to(device) for k, v in element.items()}) else: batch_on_device.append(element.to(device)) return tuple(batch_on_device) def mask_tokens(inputs, tokenizer, mlm_probability=0.15): """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone().detach() # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix = torch.full(labels.shape, mlm_probability) #special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(labels.cpu() == 0, dtype=torch.bool), value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = tokenizer.mask_token_id # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(tokenizer.vocab_size, labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random].cuda() # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels class BertPretrain(torch.nn.Module): def __init__(self, model_name_or_path: str): super(BertPretrain, self).__init__() self.bert_model = BertForMaskedLM.from_pretrained(model_name_or_path) def forward(self, input_ids: torch.tensor, mlm_labels: torch.tensor): outputs = self.bert_model(input_ids, masked_lm_labels=mlm_labels) return outputs[0] The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, features, model, tokenizer, processor, continue_from_global_step=0)` to solve the following problem: Train the model Here is the function: def train(args, train_dataset, features, model, tokenizer, processor, continue_from_global_step=0): """ Train the model """ # if args.local_rank in [-1, 0]: # tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs if args.save_epochs > 0: args.save_steps = t_total // args.num_train_epochs * args.save_epochs num_warmup_steps = int(t_total * args.warmup_proportion) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) model_single_gpu = model if args.n_gpu > 1: model = torch.nn.DataParallel(model_single_gpu) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # MLM pre-training model instantiation if args.mlm_pre or args.mlm_during: pre_model = BertPretrain(args.model_name_or_path) mlm_optimizer = AdamW(pre_model.parameters(), lr=args.learning_rate, eps=args.adam_epsilon) pre_model.to(args.device) pre_model.bert_model.bert = model.bert # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) logger.info(" Warmup steps = %d", num_warmup_steps) if continue_from_global_step > 0: logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...", continue_from_global_step) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) if args.mlm_pre: for _ in trange(3, desc="MLM-pre epoch"): epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): pre_model.train() pre_model.zero_grad() # Take the first 50 tokens (i.e., the current/last utterance) input_ids, mlm_labels = mask_tokens(batch[0][:, :50].to(args.device), tokenizer) loss = pre_model(input_ids=input_ids, mlm_labels=mlm_labels) loss.backward() torch.nn.utils.clip_grad_norm_(pre_model.parameters(), args.max_grad_norm) mlm_optimizer.step() for epoch in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) if args.mlm_during: for step, batch in enumerate(epoch_iterator): pre_model.train() pre_model.zero_grad() # Take the first 50 tokens (i.e., the current/last utterance) input_ids, mlm_labels = mask_tokens(batch[0][:,:50].to(args.device), tokenizer) loss = pre_model(input_ids=input_ids, mlm_labels=mlm_labels) loss.backward() torch.nn.utils.clip_grad_norm_(pre_model.parameters(), args.max_grad_norm) mlm_optimizer.step() epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # If training is continued from a checkpoint, fast forward # to the state of that checkpoint. if global_step < continue_from_global_step: if (step + 1) % args.gradient_accumulation_steps == 0: scheduler.step() # Update learning rate schedule global_step += 1 continue model.train() batch = batch_to_device(batch, args.device) # This is what is forwarded to the "forward" def. inputs = {'input_ids': batch[0], 'input_mask': batch[1], 'segment_ids': batch[2], 'start_pos': batch[3], 'end_pos': batch[4], 'inform_slot_id': batch[5], 'refer_id': batch[6], 'diag_state': batch[7], 'class_label_id': batch[8]} outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics # if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) # report_loss = (tr_loss - logging_loss) / args.logging_steps # tb_writer.add_scalar('loss', report_loss, global_step) # epoch_iterator.set_description(desc=f' loss: {report_loss} global_step: {global_step}') # logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and \ args.save_steps > 0 and global_step % args.save_steps == 0 and \ epoch not in list(range(int(args.num_train_epochs)))[:int(args.num_train_epochs * 0.4)]: output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break # Save model checkpoint if args.local_rank in [-1, 0] and \ epoch in list(range(int(args.num_train_epochs)))[-int(args.num_train_epochs * 0.1):]: output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) # if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well # results = evaluate(args, model_single_gpu, tokenizer, processor, prefix=global_step) # for key, value in results.items(): # tb_writer.add_scalar('eval_{}'.format(key), value, global_step) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break # if args.local_rank in [-1, 0]: # tb_writer.close() return global_step, tr_loss / global_step
Train the model
164,018
import argparse import logging import os import random import glob import json import math import re import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer) from transformers import AdamW from transformers import get_linear_schedule_with_warmup from bert_models import BertPretrain from modeling_bert_dst import (BertForDST) from data_processors import PROCESSORS from utils_dst import (convert_examples_to_features) from tensorlistdataset import (TensorListDataset) logger = logging.getLogger(__name__) def batch_to_device(batch, device): batch_on_device = [] for element in batch: if isinstance(element, dict): batch_on_device.append({k: v.to(device) for k, v in element.items()}) else: batch_on_device.append(element.to(device)) return tuple(batch_on_device) def eval_metric(model, features, total_loss, per_slot_per_example_loss, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits, per_slot_refer_logits): metric_dict = {} per_slot_correctness = {} for slot in model.slot_list: per_example_loss = per_slot_per_example_loss[slot] class_logits = per_slot_class_logits[slot] start_logits = per_slot_start_logits[slot] end_logits = per_slot_end_logits[slot] refer_logits = per_slot_refer_logits[slot] class_label_id = features['class_label_id'][slot] start_pos = features['start_pos'][slot] end_pos = features['end_pos'][slot] refer_id = features['refer_id'][slot] _, class_prediction = class_logits.max(1) class_correctness = torch.eq(class_prediction, class_label_id).float() class_accuracy = class_correctness.mean() # "is pointable" means whether class label is "copy_value", # i.e., that there is a span to be detected. token_is_pointable = torch.eq(class_label_id, model.class_types.index('copy_value')).float() _, start_prediction = start_logits.max(1) start_correctness = torch.eq(start_prediction, start_pos).float() _, end_prediction = end_logits.max(1) end_correctness = torch.eq(end_prediction, end_pos).float() token_correctness = start_correctness * end_correctness token_accuracy = (token_correctness * token_is_pointable).sum() / token_is_pointable.sum() # NaNs mean that none of the examples in this batch contain spans. -> division by 0 # The accuracy therefore is 1 by default. -> replace NaNs if math.isnan(token_accuracy): token_accuracy = torch.tensor(1.0, device=token_accuracy.device) token_is_referrable = torch.eq(class_label_id, model.class_types.index('refer') if 'refer' in model.class_types else -1).float() _, refer_prediction = refer_logits.max(1) refer_correctness = torch.eq(refer_prediction, refer_id).float() refer_accuracy = refer_correctness.sum() / token_is_referrable.sum() # NaNs mean that none of the examples in this batch contain referrals. -> division by 0 # The accuracy therefore is 1 by default. -> replace NaNs if math.isnan(refer_accuracy) or math.isinf(refer_accuracy): refer_accuracy = torch.tensor(1.0, device=refer_accuracy.device) total_correctness = class_correctness * (token_is_pointable * token_correctness + (1 - token_is_pointable)) * (token_is_referrable * refer_correctness + (1 - token_is_referrable)) total_accuracy = total_correctness.mean() loss = per_example_loss.mean() metric_dict['eval_accuracy_class_%s' % slot] = class_accuracy metric_dict['eval_accuracy_token_%s' % slot] = token_accuracy metric_dict['eval_accuracy_refer_%s' % slot] = refer_accuracy metric_dict['eval_accuracy_%s' % slot] = total_accuracy metric_dict['eval_loss_%s' % slot] = loss per_slot_correctness[slot] = total_correctness goal_correctness = torch.stack([c for c in per_slot_correctness.values()], 1).prod(1) goal_accuracy = goal_correctness.mean() metric_dict['eval_accuracy_goal'] = goal_accuracy metric_dict['loss'] = total_loss return metric_dict def predict_and_format(model, tokenizer, features, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits, per_slot_refer_logits, ids, input_ids_unmasked, values, inform, prefix, ds): prediction_list = [] dialog_state = ds for i in range(len(ids)): if int(ids[i].split("-")[2]) == 0: dialog_state = {slot: 'none' for slot in model.slot_list} prediction = {} prediction_addendum = {} for slot in model.slot_list: class_logits = per_slot_class_logits[slot][i] start_logits = per_slot_start_logits[slot][i] end_logits = per_slot_end_logits[slot][i] refer_logits = per_slot_refer_logits[slot][i] input_ids = features['input_ids'][i].tolist() class_label_id = int(features['class_label_id'][slot][i]) start_pos = int(features['start_pos'][slot][i]) end_pos = int(features['end_pos'][slot][i]) refer_id = int(features['refer_id'][slot][i]) class_prediction = int(class_logits.argmax()) start_prediction = int(start_logits.argmax()) end_prediction = int(end_logits.argmax()) refer_prediction = int(refer_logits.argmax()) prediction['guid'] = ids[i].split("-") prediction['class_prediction_%s' % slot] = class_prediction prediction['class_label_id_%s' % slot] = class_label_id prediction['start_prediction_%s' % slot] = start_prediction prediction['start_pos_%s' % slot] = start_pos prediction['end_prediction_%s' % slot] = end_prediction prediction['end_pos_%s' % slot] = end_pos prediction['refer_prediction_%s' % slot] = refer_prediction prediction['refer_id_%s' % slot] = refer_id prediction['input_ids_%s' % slot] = input_ids if class_prediction == model.class_types.index('dontcare'): dialog_state[slot] = 'dontcare' elif class_prediction == model.class_types.index('copy_value'): input_tokens = tokenizer.convert_ids_to_tokens(input_ids_unmasked[i]) dialog_state[slot] = ' '.join(input_tokens[start_prediction:end_prediction + 1]) dialog_state[slot] = re.sub("(^| )##", "", dialog_state[slot]) elif 'true' in model.class_types and class_prediction == model.class_types.index('true'): dialog_state[slot] = 'true' elif 'false' in model.class_types and class_prediction == model.class_types.index('false'): dialog_state[slot] = 'false' elif class_prediction == model.class_types.index('inform'): dialog_state[slot] = '§§' + inform[i][slot] # Referral case is handled below prediction_addendum['slot_prediction_%s' % slot] = dialog_state[slot] prediction_addendum['slot_groundtruth_%s' % slot] = values[i][slot] # Referral case. All other slot values need to be seen first in order # to be able to do this correctly. for slot in model.slot_list: class_logits = per_slot_class_logits[slot][i] refer_logits = per_slot_refer_logits[slot][i] class_prediction = int(class_logits.argmax()) refer_prediction = int(refer_logits.argmax()) if 'refer' in model.class_types and class_prediction == model.class_types.index('refer'): # Only slots that have been mentioned before can be referred to. # One can think of a situation where one slot is referred to in the same utterance. # This phenomenon is however currently not properly covered in the training data # label generation process. dialog_state[slot] = dialog_state[model.slot_list[refer_prediction - 1]] prediction_addendum['slot_prediction_%s' % slot] = dialog_state[slot] # Value update prediction.update(prediction_addendum) prediction_list.append(prediction) return prediction_list, dialog_state def load_and_cache_examples(args, model, tokenizer, processor, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file cached_file = os.path.join(os.path.dirname(args.output_dir), 'cached_{}_{}_features'.format( args.predict_type if evaluate else ('train_few' if 'few' in args.output_dir else 'train'), args.max_seq_length)) if os.path.exists(cached_file) and not args.overwrite_cache: # and not output_examples: logger.info("Loading features from cached file %s", cached_file) features = torch.load(cached_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) processor_args = {'append_history': args.append_history, 'use_history_labels': args.use_history_labels, 'swap_utterances': args.swap_utterances, 'label_value_repetitions': args.label_value_repetitions, 'delexicalize_sys_utts': args.delexicalize_sys_utts} if evaluate and args.predict_type == "dev": examples = processor.get_dev_examples(args.data_dir, processor_args) elif evaluate and args.predict_type == "test": examples = processor.get_test_examples(args.data_dir, processor_args) else: examples = processor.get_train_examples(args.data_dir, processor_args) features = convert_examples_to_features(examples=examples, slot_list=model.slot_list, class_types=model.class_types, model_type=args.model_type, tokenizer=tokenizer, max_seq_length=args.max_seq_length, slot_value_dropout=(0.0 if evaluate else args.svd)) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_file) torch.save(features, cached_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) f_start_pos = [f.start_pos for f in features] f_end_pos = [f.end_pos for f in features] f_inform_slot_ids = [f.inform_slot for f in features] f_refer_ids = [f.refer_id for f in features] f_diag_state = [f.diag_state for f in features] f_class_label_ids = [f.class_label_id for f in features] all_start_positions = {} all_end_positions = {} all_inform_slot_ids = {} all_refer_ids = {} all_diag_state = {} all_class_label_ids = {} for s in model.slot_list: all_start_positions[s] = torch.tensor([f[s] for f in f_start_pos], dtype=torch.long) all_end_positions[s] = torch.tensor([f[s] for f in f_end_pos], dtype=torch.long) all_inform_slot_ids[s] = torch.tensor([f[s] for f in f_inform_slot_ids], dtype=torch.long) all_refer_ids[s] = torch.tensor([f[s] for f in f_refer_ids], dtype=torch.long) all_diag_state[s] = torch.tensor([f[s] for f in f_diag_state], dtype=torch.long) all_class_label_ids[s] = torch.tensor([f[s] for f in f_class_label_ids], dtype=torch.long) dataset = TensorListDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_inform_slot_ids, all_refer_ids, all_diag_state, all_class_label_ids, all_example_index) return dataset, features def evaluate(args, model, tokenizer, processor, prefix=""): dataset, features = load_and_cache_examples(args, model, tokenizer, processor, evaluate=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] all_preds = [] ds = {slot: 'none' for slot in model.slot_list} with torch.no_grad(): diag_state = {slot: torch.tensor([0 for _ in range(args.eval_batch_size)]).to(args.device) for slot in model.slot_list} for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = batch_to_device(batch, args.device) # Reset dialog state if turn is first in the dialog. turn_itrs = [features[i.item()].guid.split('-')[2] for i in batch[9]] reset_diag_state = np.where(np.array(turn_itrs) == '0')[0] for slot in model.slot_list: for i in reset_diag_state: diag_state[slot][i] = 0 with torch.no_grad(): inputs = {'input_ids': batch[0], 'input_mask': batch[1], 'segment_ids': batch[2], 'start_pos': batch[3], 'end_pos': batch[4], 'inform_slot_id': batch[5], 'refer_id': batch[6], 'diag_state': diag_state, 'class_label_id': batch[8]} unique_ids = [features[i.item()].guid for i in batch[9]] values = [features[i.item()].values for i in batch[9]] input_ids_unmasked = [features[i.item()].input_ids_unmasked for i in batch[9]] inform = [features[i.item()].inform for i in batch[9]] outputs = model(**inputs) # Update dialog state for next turn. for slot in model.slot_list: updates = outputs[2][slot].max(1)[1] for i, u in enumerate(updates): if u != 0: diag_state[slot][i] = u results = eval_metric(model, inputs, outputs[0], outputs[1], outputs[2], outputs[3], outputs[4], outputs[5]) preds, ds = predict_and_format(model, tokenizer, inputs, outputs[2], outputs[3], outputs[4], outputs[5], unique_ids, input_ids_unmasked, values, inform, prefix, ds) all_results.append(results) all_preds.append(preds) all_preds = [item for sublist in all_preds for item in sublist] # Flatten list # Generate final results final_results = {} for k in all_results[0].keys(): final_results[k] = torch.stack([r[k] for r in all_results]).mean() # Write final predictions (for evaluation with external tool) output_prediction_file = os.path.join(args.output_dir, "pred_res.%s.%s.json" % (args.predict_type, prefix)) with open(output_prediction_file, "w") as f: json.dump(all_preds, f, indent=2) return final_results
null
164,019
import logging import six import numpy as np import json logger = logging.getLogger(__name__) class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_ids_unmasked, input_mask, segment_ids, start_pos=None, end_pos=None, values=None, inform=None, inform_slot=None, refer_id=None, diag_state=None, class_label_id=None, guid="NONE"): self.guid = guid self.input_ids = input_ids self.input_ids_unmasked = input_ids_unmasked self.input_mask = input_mask self.segment_ids = segment_ids self.start_pos = start_pos self.end_pos = end_pos self.values = values self.inform = inform self.inform_slot = inform_slot self.refer_id = refer_id self.diag_state = diag_state self.class_label_id = class_label_id def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(examples, slot_list, class_types, model_type, tokenizer, max_seq_length, slot_value_dropout=0.0)` to solve the following problem: Loads a data file into a list of `InputBatch`s. Here is the function: def convert_examples_to_features(examples, slot_list, class_types, model_type, tokenizer, max_seq_length, slot_value_dropout=0.0): """Loads a data file into a list of `InputBatch`s.""" if model_type == 'bert': model_specs = {'MODEL_TYPE': 'bert', 'CLS_TOKEN': '[CLS]', 'UNK_TOKEN': '[UNK]', 'SEP_TOKEN': '[SEP]', 'TOKEN_CORRECTION': 4} else: logger.error("Unknown model type (%s). Aborting." % (model_type)) exit(1) def _tokenize_text_and_label(text, text_label_dict, slot, tokenizer, model_specs, slot_value_dropout): joint_text_label = [0 for _ in text_label_dict[slot]] # joint all slots' label for slot_text_label in text_label_dict.values(): for idx, label in enumerate(slot_text_label): if label == 1: joint_text_label[idx] = 1 text_label = text_label_dict[slot] tokens = [] tokens_unmasked = [] token_labels = [] for token, token_label, joint_label in zip(text, text_label, joint_text_label): token = convert_to_unicode(token) sub_tokens = tokenizer.tokenize(token) # Most time intensive step tokens_unmasked.extend(sub_tokens) if slot_value_dropout == 0.0 or joint_label == 0: tokens.extend(sub_tokens) else: rn_list = np.random.random_sample((len(sub_tokens),)) for rn, sub_token in zip(rn_list, sub_tokens): if rn > slot_value_dropout: tokens.append(sub_token) else: tokens.append(model_specs['UNK_TOKEN']) token_labels.extend([token_label for _ in sub_tokens]) assert len(tokens) == len(token_labels) assert len(tokens_unmasked) == len(token_labels) return tokens, tokens_unmasked, token_labels def _truncate_seq_pair(tokens_a, tokens_b, history, max_length): """Truncates a sequence pair in place to the maximum length. Copied from bert/run_classifier.py """ # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) + len(history) if total_length <= max_length: break if len(history) > 0: history.pop() elif len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def _truncate_length_and_warn(tokens_a, tokens_b, history, max_seq_length, model_specs, guid): # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP], [SEP] with "- 4" (BERT) if len(tokens_a) + len(tokens_b) + len(history) > max_seq_length - model_specs['TOKEN_CORRECTION']: logger.info("Truncate Example %s. Total len=%d." % (guid, len(tokens_a) + len(tokens_b) + len(history))) input_text_too_long = True else: input_text_too_long = False _truncate_seq_pair(tokens_a, tokens_b, history, max_seq_length - model_specs['TOKEN_CORRECTION']) return input_text_too_long def _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs): token_label_ids = [] token_label_ids.append(0) # [CLS] for token_label in token_labels_a: token_label_ids.append(token_label) token_label_ids.append(0) # [SEP] for token_label in token_labels_b: token_label_ids.append(token_label) token_label_ids.append(0) # [SEP] for token_label in token_labels_history: token_label_ids.append(token_label) token_label_ids.append(0) # [SEP] while len(token_label_ids) < max_seq_length: token_label_ids.append(0) # padding assert len(token_label_ids) == max_seq_length return token_label_ids def _get_start_end_pos(class_type, token_label_ids, max_seq_length): if class_type == 'copy_value' and 1 not in token_label_ids: #logger.warn("copy_value label, but token_label not detected. Setting label to 'none'.") class_type = 'none' start_pos = 0 end_pos = 0 if 1 in token_label_ids: start_pos = token_label_ids.index(1) # Parsing is supposed to find only first location of wanted value if 0 not in token_label_ids[start_pos:]: end_pos = len(token_label_ids[start_pos:]) + start_pos - 1 else: end_pos = token_label_ids[start_pos:].index(0) + start_pos - 1 for i in range(max_seq_length): if i >= start_pos and i <= end_pos: assert token_label_ids[i] == 1 return class_type, start_pos, end_pos def _get_transformer_input(tokens_a, tokens_b, history, max_seq_length, tokenizer, model_specs): # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append(model_specs['CLS_TOKEN']) segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append(model_specs['SEP_TOKEN']) segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append(model_specs['SEP_TOKEN']) segment_ids.append(1) for token in history: tokens.append(token) segment_ids.append(1) tokens.append(model_specs['SEP_TOKEN']) segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length return tokens, input_ids, input_mask, segment_ids total_cnt = 0 too_long_cnt = 0 refer_list = ['none'] + slot_list features = [] # Convert single example for (example_index, example) in enumerate(examples): if example_index % 1000 == 0: logger.info("Writing example %d of %d" % (example_index, len(examples))) total_cnt += 1 value_dict = {} inform_dict = {} inform_slot_dict = {} refer_id_dict = {} diag_state_dict = {} class_label_id_dict = {} start_pos_dict = {} end_pos_dict = {} for slot in slot_list: tokens_a, tokens_a_unmasked, token_labels_a = _tokenize_text_and_label( example.text_a, example.text_a_label, slot, tokenizer, model_specs, slot_value_dropout) tokens_b, tokens_b_unmasked, token_labels_b = _tokenize_text_and_label( example.text_b, example.text_b_label, slot, tokenizer, model_specs, slot_value_dropout) tokens_history, tokens_history_unmasked, token_labels_history = _tokenize_text_and_label( example.history, example.history_label, slot, tokenizer, model_specs, slot_value_dropout) input_text_too_long = _truncate_length_and_warn( tokens_a, tokens_b, tokens_history, max_seq_length, model_specs, example.guid) if input_text_too_long: if example_index < 10: if len(token_labels_a) > len(tokens_a): logger.info(' tokens_a truncated labels: %s' % str(token_labels_a[len(tokens_a):])) if len(token_labels_b) > len(tokens_b): logger.info(' tokens_b truncated labels: %s' % str(token_labels_b[len(tokens_b):])) if len(token_labels_history) > len(tokens_history): logger.info(' tokens_history truncated labels: %s' % str(token_labels_history[len(tokens_history):])) token_labels_a = token_labels_a[:len(tokens_a)] token_labels_b = token_labels_b[:len(tokens_b)] token_labels_history = token_labels_history[:len(tokens_history)] tokens_a_unmasked = tokens_a_unmasked[:len(tokens_a)] tokens_b_unmasked = tokens_b_unmasked[:len(tokens_b)] tokens_history_unmasked = tokens_history_unmasked[:len(tokens_history)] assert len(token_labels_a) == len(tokens_a) assert len(token_labels_b) == len(tokens_b) assert len(token_labels_history) == len(tokens_history) assert len(token_labels_a) == len(tokens_a_unmasked) assert len(token_labels_b) == len(tokens_b_unmasked) assert len(token_labels_history) == len(tokens_history_unmasked) token_label_ids = _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs) value_dict[slot] = example.values[slot] inform_dict[slot] = example.inform_label[slot] class_label_mod, start_pos_dict[slot], end_pos_dict[slot] = _get_start_end_pos( example.class_label[slot], token_label_ids, max_seq_length) if class_label_mod != example.class_label[slot]: example.class_label[slot] = class_label_mod inform_slot_dict[slot] = example.inform_slot_label[slot] refer_id_dict[slot] = refer_list.index(example.refer_label[slot]) diag_state_dict[slot] = class_types.index(example.diag_state[slot]) class_label_id_dict[slot] = class_types.index(example.class_label[slot]) if input_text_too_long: too_long_cnt += 1 tokens, input_ids, input_mask, segment_ids = _get_transformer_input(tokens_a, tokens_b, tokens_history, max_seq_length, tokenizer, model_specs) if slot_value_dropout > 0.0: _, input_ids_unmasked, _, _ = _get_transformer_input(tokens_a_unmasked, tokens_b_unmasked, tokens_history_unmasked, max_seq_length, tokenizer, model_specs) else: input_ids_unmasked = input_ids assert(len(input_ids) == len(input_ids_unmasked)) if example_index < 10: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("start_pos: %s" % str(start_pos_dict)) logger.info("end_pos: %s" % str(end_pos_dict)) logger.info("values: %s" % str(value_dict)) logger.info("inform: %s" % str(inform_dict)) logger.info("inform_slot: %s" % str(inform_slot_dict)) logger.info("refer_id: %s" % str(refer_id_dict)) logger.info("diag_state: %s" % str(diag_state_dict)) logger.info("class_label_id: %s" % str(class_label_id_dict)) features.append( InputFeatures( guid=example.guid, input_ids=input_ids, input_ids_unmasked=input_ids_unmasked, input_mask=input_mask, segment_ids=segment_ids, start_pos=start_pos_dict, end_pos=end_pos_dict, values=value_dict, inform=inform_dict, inform_slot=inform_slot_dict, refer_id=refer_id_dict, diag_state=diag_state_dict, class_label_id=class_label_id_dict)) logger.info("========== %d out of %d examples have text too long" % (too_long_cnt, total_cnt)) return features
Loads a data file into a list of `InputBatch`s.
164,020
import torch from collections import OrderedDict BERT_VOCAB_SIZE = 30522 def get_match_value(name, state_dict_numpy, prefix): """ Need be overridden towards different models, here for UnifiedTransformer Model """ if name == 'embedder.token_embedding.weight': return state_dict_numpy[f'{prefix}embeddings.word_embeddings.weight'] elif name == 'embedder.pos_embedding.weight': return state_dict_numpy[f'{prefix}embeddings.position_embeddings.weight'] elif name == 'embedder.type_embedding.weight': return state_dict_numpy.get(f'{prefix}embeddings.token_type_embeddings.weight') elif name == 'embedder.turn_embedding.weight': return None elif name == 'embed_layer_norm.weight': if f'{prefix}embeddings.LayerNorm.weight' in state_dict_numpy: return state_dict_numpy[f'{prefix}embeddings.LayerNorm.weight'] else: return state_dict_numpy[f'{prefix}embeddings.LayerNorm.gamma'] elif name == 'embed_layer_norm.bias': if f'{prefix}embeddings.LayerNorm.bias' in state_dict_numpy: return state_dict_numpy[f'{prefix}embeddings.LayerNorm.bias'] else: return state_dict_numpy[f'{prefix}embeddings.LayerNorm.beta'] elif name == 'pooler.0.weight': return state_dict_numpy.get(f'{prefix}pooler.dense.weight') elif name == 'pooler.0.bias': return state_dict_numpy.get(f'{prefix}pooler.dense.bias') elif name == 'mlm_transform.0.weight': return state_dict_numpy.get('cls.predictions.transform.dense.weight') elif name == 'mlm_transform.0.bias': return state_dict_numpy.get('cls.predictions.transform.dense.bias') elif name == 'mlm_transform.2.weight': if 'cls.predictions.transform.LayerNorm.weight' in state_dict_numpy: return state_dict_numpy.get('cls.predictions.transform.LayerNorm.weight') else: return state_dict_numpy.get('cls.predictions.transform.LayerNorm.gamma') elif name == 'mlm_transform.2.bias': if 'cls.predictions.transform.LayerNorm.bias' in state_dict_numpy: return state_dict_numpy.get('cls.predictions.transform.LayerNorm.bias') else: return state_dict_numpy.get('cls.predictions.transform.LayerNorm.beta') elif name == 'mlm_bias': return state_dict_numpy.get('cls.predictions.bias') else: num = name.split('.')[1] assert num in [str(i) for i in range(12)] if name == f'layers.{num}.attn.linear_qkv.weight': q = state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.self.query.weight'] k = state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.self.key.weight'] v = state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.self.value.weight'] qkv_weight = torch.cat([q, k, v], dim=0) return qkv_weight elif name == f'layers.{num}.attn.linear_qkv.bias': q = state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.self.query.bias'] k = state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.self.key.bias'] v = state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.self.value.bias'] qkv_bias = torch.cat([q, k, v], dim=0) return qkv_bias elif name == f'layers.{num}.attn.linear_out.weight': return state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.output.dense.weight'] elif name == f'layers.{num}.attn.linear_out.bias': return state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.output.dense.bias'] elif name == f'layers.{num}.attn_norm.weight': if f'{prefix}encoder.layer.{num}.attention.output.LayerNorm.weight' in state_dict_numpy: return state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.output.LayerNorm.weight'] else: return state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.output.LayerNorm.gamma'] elif name == f'layers.{num}.attn_norm.bias': if f'{prefix}encoder.layer.{num}.attention.output.LayerNorm.bias' in state_dict_numpy: return state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.output.LayerNorm.bias'] else: return state_dict_numpy[f'{prefix}encoder.layer.{num}.attention.output.LayerNorm.beta'] elif name == f'layers.{num}.ff.linear_hidden.0.weight': return state_dict_numpy[f'{prefix}encoder.layer.{num}.intermediate.dense.weight'] elif name == f'layers.{num}.ff.linear_hidden.0.bias': return state_dict_numpy[f'{prefix}encoder.layer.{num}.intermediate.dense.bias'] elif name == f'layers.{num}.ff.linear_out.weight': return state_dict_numpy[f'{prefix}encoder.layer.{num}.output.dense.weight'] elif name == f'layers.{num}.ff.linear_out.bias': return state_dict_numpy[f'{prefix}encoder.layer.{num}.output.dense.bias'] elif name == f'layers.{num}.ff_norm.weight': if f'{prefix}encoder.layer.{num}.output.LayerNorm.weight' in state_dict_numpy: return state_dict_numpy[f'{prefix}encoder.layer.{num}.output.LayerNorm.weight'] else: return state_dict_numpy[f'{prefix}encoder.layer.{num}.output.LayerNorm.gamma'] elif name == f'layers.{num}.ff_norm.bias': if f'{prefix}encoder.layer.{num}.output.LayerNorm.bias' in state_dict_numpy: return state_dict_numpy[f'{prefix}encoder.layer.{num}.output.LayerNorm.bias'] else: return state_dict_numpy[f'{prefix}encoder.layer.{num}.output.LayerNorm.beta'] else: raise ValueError(f'ERROR: Param "{name}" can not be loaded in Space Model!') def hug2space(input_file, input_template, output_file): state_dict_output = OrderedDict() state_dict_input = torch.load(input_file, map_location=lambda storage, loc: storage) state_dict_template = torch.load(input_template, map_location=lambda storage, loc: storage) prefix = 'bert.' if list(state_dict_input.keys())[0].startswith('bert.') else '' for name, value in state_dict_template.items(): match_value = get_match_value(name, state_dict_input, prefix) if match_value is not None: assert match_value.ndim == value.ndim if match_value.shape != value.shape: assert value.size(0) == BERT_VOCAB_SIZE and match_value.size(0) > BERT_VOCAB_SIZE match_value = match_value[:BERT_VOCAB_SIZE] dtype = value.dtype device = value.device state_dict_output[name] = torch.tensor(match_value, dtype=dtype, device=device) else: print(f'WARNING: Param "{name}" can not be loaded in Space Model.') torch.save(state_dict_output, output_file)
null
164,021
import os import torch from collections import OrderedDict def get_match_value(name, state_dict_numpy): def space2hug(input_template, input_pt, output_pt, restore=True): state_dict_pytorch = OrderedDict() state_dict_init_template = torch.load(input_template, map_location=lambda storage, loc: storage) state_dict_init_pytorch = torch.load(input_pt, map_location=lambda storage, loc: storage) if 'module.' in list(state_dict_init_pytorch.keys())[0]: new_model_state_dict = OrderedDict() for k, v in state_dict_init_pytorch.items(): assert k[:7] == 'module.' new_model_state_dict[k[7:]] = v state_dict_init_pytorch = new_model_state_dict for name, value in state_dict_init_template.items(): match_value = get_match_value(name, state_dict_init_pytorch) if match_value is not None: assert match_value.shape == value.shape assert match_value.dtype == value.dtype state_dict_pytorch[name] = match_value else: print(f'Parm {name} is not existed! Restore: [{restore}]') if restore: state_dict_pytorch[name] = value else: continue torch.save(state_dict_pytorch, output_pt)
null
164,022
import time from statistics import mean, stdev import torch from torch import nn from fastai.text.all import * def Accuracy(axis=-1): return AvgMetric(partial(accuracy, axis=axis))
null
164,023
import random, re, os from functools import partial from fastai.text.all import * from hugdatafast.transform import CombineTransform import json import numpy as np def adam_no_correction_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs): p.data.addcdiv_(grad_avg, (sqr_avg).sqrt() + eps, value = -lr) return p The provided code snippet includes necessary dependencies for implementing the `Adam_no_bias_correction` function. Write a Python function `def Adam_no_bias_correction(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0.01, decouple_wd=True)` to solve the following problem: A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params` Here is the function: def Adam_no_bias_correction(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0.01, decouple_wd=True): "A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`" cbs = [weight_decay] if decouple_wd else [l2_reg] cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, adam_no_correction_step] return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`
164,024
import random, re, os from functools import partial from fastai.text.all import * from hugdatafast.transform import CombineTransform import json import numpy as np The provided code snippet includes necessary dependencies for implementing the `linear_warmup_and_decay` function. Write a Python function `def linear_warmup_and_decay(pct, lr_max, total_steps, warmup_steps=None, warmup_pct=None, end_lr=0.0, decay_power=1)` to solve the following problem: pct (float): fastai count it as ith_step/num_epoch*len(dl), so we can't just use pct when our num_epoch is fake.he ith_step is count from 0, Here is the function: def linear_warmup_and_decay(pct, lr_max, total_steps, warmup_steps=None, warmup_pct=None, end_lr=0.0, decay_power=1): """ pct (float): fastai count it as ith_step/num_epoch*len(dl), so we can't just use pct when our num_epoch is fake.he ith_step is count from 0, """ if warmup_pct: warmup_steps = int(warmup_pct * total_steps) step_i = round(pct * total_steps) # According to the original source code, two schedules take effect at the same time, but decaying schedule will be neglible in the early time. decayed_lr = (lr_max-end_lr) * (1 - step_i/total_steps) ** decay_power + end_lr # https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/polynomial_decay warmed_lr = decayed_lr * min(1.0, step_i/warmup_steps) # https://github.com/google-research/electra/blob/81f7e5fc98b0ad8bfd20b641aa8bc9e6ac00c8eb/model/optimization.py#L44 return warmed_lr
pct (float): fastai count it as ith_step/num_epoch*len(dl), so we can't just use pct when our num_epoch is fake.he ith_step is count from 0,
164,025
import random, re, os from functools import partial from fastai.text.all import * from hugdatafast.transform import CombineTransform import json import numpy as np The provided code snippet includes necessary dependencies for implementing the `linear_warmup_and_then_decay` function. Write a Python function `def linear_warmup_and_then_decay(pct, lr_max, total_steps, warmup_steps=None, warmup_pct=None, end_lr=0.0, decay_power=1)` to solve the following problem: pct (float): fastai count it as ith_step/num_epoch*len(dl), so we can't just use pct when our num_epoch is fake.he ith_step is count from 0, Here is the function: def linear_warmup_and_then_decay(pct, lr_max, total_steps, warmup_steps=None, warmup_pct=None, end_lr=0.0, decay_power=1): """ pct (float): fastai count it as ith_step/num_epoch*len(dl), so we can't just use pct when our num_epoch is fake.he ith_step is count from 0, """ if warmup_pct: warmup_steps = int(warmup_pct * total_steps) step_i = round(pct * total_steps) if step_i <= warmup_steps: # warm up return lr_max * min(1.0, step_i/warmup_steps) else: # decay return (lr_max-end_lr) * (1 - (step_i-warmup_steps)/(total_steps-warmup_steps)) ** decay_power + end_lr
pct (float): fastai count it as ith_step/num_epoch*len(dl), so we can't just use pct when our num_epoch is fake.he ith_step is count from 0,
164,026
import random, re, os from functools import partial from fastai.text.all import * from hugdatafast.transform import CombineTransform import json import numpy as np The provided code snippet includes necessary dependencies for implementing the `load_part_model` function. Write a Python function `def load_part_model(file, model, prefix, device=None, strict=True)` to solve the following problem: assume `model` is part of (child attribute at any level) of model whose states save in `file`. Here is the function: def load_part_model(file, model, prefix, device=None, strict=True): "assume `model` is part of (child attribute at any level) of model whose states save in `file`." distrib_barrier() if prefix[-1] != '.': prefix += '.' if isinstance(device, int): device = torch.device('cuda', device) elif device is None: device = 'cpu' state = torch.load(file, map_location=device) hasopt = set(state)=={'model', 'opt'} model_state = state['model'] if hasopt else state # model_state = {k[len(prefix):] : v for k,v in model_state.items() if k.startswith(prefix) and '_sss' not in k} # for k, v in model_state.items(): get_model(model).load_state_dict(model_state, strict=strict)
assume `model` is part of (child attribute at any level) of model whose states save in `file`.
164,027
import random, re, os from functools import partial from fastai.text.all import * from hugdatafast.transform import CombineTransform import json import numpy as np The provided code snippet includes necessary dependencies for implementing the `load_model_` function. Write a Python function `def load_model_(learn, files, device=None, **kwargs)` to solve the following problem: if multiple file passed, then load and create an ensemble. Load normally otherwise Here is the function: def load_model_(learn, files, device=None, **kwargs): "if multiple file passed, then load and create an ensemble. Load normally otherwise" merge_out_fc = kwargs.pop('merge_out_fc', None) if not isinstance(files, list): learn.load(files, device=device, **kwargs) return if device is None: device = learn.dls.device model = learn.model.cpu() models = [model, *(deepcopy(model) for _ in range(len(files)-1)) ] for f,m in zip(files, models): file = join_path_file(f, learn.path/learn.model_dir, ext='.pth') load_model(file, m, learn.opt, device='cpu', **kwargs) learn.model = Ensemble(models, device, merge_out_fc) return learn
if multiple file passed, then load and create an ensemble. Load normally otherwise
164,030
import copy, math import torch import torch.nn as nn import torch.nn.utils.rnn as rnn_utils def lens2mask2(lens,max_len): bsize = lens.numel() masks = torch.arange(0, max_len).type_as(lens).to(lens.device).repeat(bsize, 1).lt(lens.unsqueeze(1)) masks.requires_grad = False return masks
null
164,034
from functools import partial from pathlib import Path import json from tqdm import tqdm from torch.nn.utils.rnn import pad_sequence import datasets from fastai.text.all import * The provided code snippet includes necessary dependencies for implementing the `_show_title` function. Write a Python function `def _show_title(o, ax=None, ctx=None, title=None, color='black', **kwargs)` to solve the following problem: Set title of `ax` to `o`, or print `o` if `ax` is `None` Here is the function: def _show_title(o, ax=None, ctx=None, title=None, color='black', **kwargs): "Set title of `ax` to `o`, or print `o` if `ax` is `None`" ax = ifnone(ax,ctx) if ax is None: print(o) elif hasattr(ax, 'set_title'): t = ax.title.get_text() if len(t) > 0: o = t+'\n'+str(o) ax.set_title(o, color=color) elif isinstance(ax, pd.Series): while title in ax: title += '_' ax = ax.append(pd.Series({title: o})) return ax
Set title of `ax` to `o`, or print `o` if `ax` is `None`
164,035
from functools import partial from pathlib import Path import json from tqdm import tqdm from torch.nn.utils.rnn import pad_sequence import datasets from fastai.text.all import * def show_batch(x:tuple, y, samples, ctxs=None, max_n=9, **kwargs): if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n)) ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs) display_df(pd.DataFrame(ctxs)) return ctxs
null
164,036
from functools import partial from pathlib import Path import json from tqdm import tqdm from torch.nn.utils.rnn import pad_sequence import datasets from fastai.text.all import * def show_results(x: tuple, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs): if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n)) ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs) display_df(pd.DataFrame(ctxs)) return ctxs
null
164,037
from multiprocessing import context import os, sys, random from pathlib import Path from functools import partial from datetime import datetime, timezone, timedelta import numpy as np import torch from torch import nn import torch.nn.functional as F import datasets from fastai.text.all import * from transformers import ElectraConfig, ElectraTokenizerFast, ElectraForMaskedLM from electra_for_inbatch import ElectraForPreTraining from _utils.hugdatafast import * from _utils.utils_inbatch import * from _utils.would_like_to_pr import * from torch.nn.functional import gelu from transformers.activations import get_activation from sklearn.metrics import accuracy_score from _utils.model_utils import PoolingFunction,lens2mask,lens2mask2 from torch.nn import CrossEntropyLoss hf_tokenizer = ElectraTokenizerFast.from_pretrained(f"google/electra-{c.size}-generator") torch.backends.cudnn.benchmark = True torch.manual_seed(c.seed) The provided code snippet includes necessary dependencies for implementing the `mask_tokens` function. Write a Python function `def mask_tokens(inputs, question_mask_plm, mask_token_index, vocab_size, special_token_indices, mlm_probability=0.15, replace_prob=0.1, orginal_prob=0.1, ignore_index=-100)` to solve the following problem: Prepare masked tokens inputs/labels for masked language modeling: (1-replace_prob-orginal_prob)% MASK, replace_prob% random, orginal_prob% original within mlm_probability% of tokens in the sentence. * ignore_index in nn.CrossEntropy is default to -100, so you don't need to specify ignore_index in loss Here is the function: def mask_tokens(inputs, question_mask_plm, mask_token_index, vocab_size, special_token_indices, mlm_probability=0.15, replace_prob=0.1, orginal_prob=0.1, ignore_index=-100): ## inputs: input_ids """ Prepare masked tokens inputs/labels for masked language modeling: (1-replace_prob-orginal_prob)% MASK, replace_prob% random, orginal_prob% original within mlm_probability% of tokens in the sentence. * ignore_index in nn.CrossEntropy is default to -100, so you don't need to specify ignore_index in loss """ device = inputs.device labels = inputs.clone() cls_id = hf_tokenizer.cls_token_id ## Get positions to apply mlm (mask/replace/not changed). (mlm_probability) ## mlm probability matrix probability_matrix = torch.full(labels.shape, mlm_probability, device=device) ## special tokens special_tokens_mask = torch.full(inputs.shape, False, dtype=torch.bool, device=device) for sp_id in special_token_indices: special_tokens_mask = special_tokens_mask | (inputs==sp_id) special_tokens_mask = special_tokens_mask | question_mask_plm probability_matrix.masked_fill_(special_tokens_mask, value=0.0) ## mlm probability matrix (0 or 1) mlm_mask = torch.bernoulli(probability_matrix).bool() labels[~mlm_mask] = ignore_index # We only compute loss on mlm applied tokens (not special tokens) ## mask operation ## mask (mlm_probability * (1-replace_prob-orginal_prob)) mask_prob = 1 - replace_prob - orginal_prob mask_token_mask = torch.bernoulli(torch.full(labels.shape, mask_prob, device=device)).bool() & mlm_mask inputs[mask_token_mask] = mask_token_index ## replace operation ## replace with a random token (mlm_probability * replace_prob) if int(replace_prob)!=0: rep_prob = replace_prob/(replace_prob + orginal_prob) replace_token_mask = torch.bernoulli(torch.full(labels.shape, rep_prob, device=device)).bool() & mlm_mask & ~mask_token_mask random_words = torch.randint(vocab_size, labels.shape, dtype=torch.long, device=device) inputs[replace_token_mask] = random_words[replace_token_mask] ## do nothing (mlm_probability * orginal_prob) pass return inputs, labels, mlm_mask
Prepare masked tokens inputs/labels for masked language modeling: (1-replace_prob-orginal_prob)% MASK, replace_prob% random, orginal_prob% original within mlm_probability% of tokens in the sentence. * ignore_index in nn.CrossEntropy is default to -100, so you don't need to specify ignore_index in loss
164,053
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql def eval_where(pred, label): pred_conds = [unit for unit in pred['where'][::2]] label_conds = [unit for unit in label['where'][::2]] label_wo_agg = [unit[2] for unit in label_conds] pred_total = len(pred_conds) label_total = len(label_conds) cnt = 0 cnt_wo_agg = 0 for unit in pred_conds: if unit in label_conds: cnt += 1 label_conds.remove(unit) if unit[2] in label_wo_agg: cnt_wo_agg += 1 label_wo_agg.remove(unit[2]) return label_total, pred_total, cnt, cnt_wo_agg
null
164,059
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql def get_keywords(sql): def eval_keywords(pred, label): pred_keywords = get_keywords(pred) label_keywords = get_keywords(label) pred_total = len(pred_keywords) label_total = len(label_keywords) cnt = 0 for k in pred_keywords: if k in label_keywords: cnt += 1 return label_total, pred_total, cnt
null
164,061
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql def get_nestedSQL(sql): nested = [] for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]: if type(cond_unit[3]) is dict: nested.append(cond_unit[3]) if type(cond_unit[4]) is dict: nested.append(cond_unit[4]) if sql['intersect'] is not None: nested.append(sql['intersect']) if sql['except'] is not None: nested.append(sql['except']) if sql['union'] is not None: nested.append(sql['union']) return nested def count_component2(sql): nested = get_nestedSQL(sql) return len(nested)
null
164,064
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql class Evaluator: def __init__(self): def eval_hardness(self, sql): def eval_exact_match(self, pred, label): def eval_partial_match(self, pred, label): def print_scores(scores, etype): def eval_exec_match(db, p_str, g_str, pred, gold): def rebuild_sql_val(sql): def build_valid_col_units(table_units, schema): def rebuild_sql_col(valid_col_units, sql, kmap): class Schema: def __init__(self, schema): def schema(self): def idMap(self): def _map(self, schema): def get_schema(db): def get_sql(schema, query): def evaluate(gold, predict, db_dir, etype, kmaps): with open(gold) as f: glist = [] gseq_one = [] for l in f.readlines(): if len(l.strip()) == 0: glist.append(gseq_one) gseq_one = [] else: lstrip = l.strip().split('\t') gseq_one.append(lstrip) #glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] with open(predict) as f: plist = [] pseq_one = [] for l in f.readlines(): if len(l.strip()) == 0: plist.append(pseq_one) pseq_one = [] else: pseq_one.append(l.strip().split('\t')) #plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] # plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]] # glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]] evaluator = Evaluator() turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4'] levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] entries = [] scores = {} for turn in turns: scores[turn] = {'count': 0, 'exact': 0.} scores[turn]['exec'] = 0 for level in levels: scores[level] = {'count': 0, 'partial': {}, 'exact': 0.} scores[level]['exec'] = 0 for type_ in partial_types: scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0} eval_err_num = 0 for p, g in zip(plist, glist): scores['joint_all']['count'] += 1 turn_scores = {"exec": [], "exact": []} for idx, pg in enumerate(zip(p, g)): p, g = pg p_str = p[0] p_str = p_str.replace("value", "1") g_str, db = g db_name = db db = os.path.join(db_dir, db, db + ".sqlite") schema = Schema(get_schema(db)) g_sql = get_sql(schema, g_str) hardness = evaluator.eval_hardness(g_sql) if idx > 3: idx = ">4" else: idx += 1 turn_id = "turn " + str(idx) scores[turn_id]['count'] += 1 scores[hardness]['count'] += 1 scores['all']['count'] += 1 try: p_sql = get_sql(schema, p_str) except: # If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql p_sql = { "except": None, "from": { "conds": [], "table_units": [] }, "groupBy": [], "having": [], "intersect": None, "limit": None, "orderBy": [], "select": [ False, [] ], "union": None, "where": [] } eval_err_num += 1 print(("eval_err_num:{}".format(eval_err_num))) # rebuild sql for value evaluation kmap = kmaps[db_name] g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema) g_sql = rebuild_sql_val(g_sql) g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap) p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema) p_sql = rebuild_sql_val(p_sql) p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap) if etype in ["all", "exec"]: exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql) if exec_score: scores[hardness]['exec'] += 1 scores[turn_id]['exec'] += 1 turn_scores['exec'].append(1) else: turn_scores['exec'].append(0) if etype in ["all", "match"]: exact_score = evaluator.eval_exact_match(p_sql, g_sql) partial_scores = evaluator.partial_scores if exact_score == 0: turn_scores['exact'].append(0) print(("{} pred: {}".format(hardness,p_str))) print(("{} gold: {}".format(hardness,g_str))) print("") else: turn_scores['exact'].append(1) scores[turn_id]['exact'] += exact_score scores[hardness]['exact'] += exact_score scores['all']['exact'] += exact_score for type_ in partial_types: if partial_scores[type_]['pred_total'] > 0: scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores[hardness]['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores[hardness]['partial'][type_]['rec_count'] += 1 scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1'] if partial_scores[type_]['pred_total'] > 0: scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores['all']['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores['all']['partial'][type_]['rec_count'] += 1 scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1'] entries.append({ 'predictSQL': p_str, 'goldSQL': g_str, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores }) if all(v == 1 for v in turn_scores["exec"]): scores['joint_all']['exec'] += 1 if all(v == 1 for v in turn_scores["exact"]): scores['joint_all']['exact'] += 1 for turn in turns: if scores[turn]['count'] == 0: continue if etype in ["all", "exec"]: scores[turn]['exec'] /= scores[turn]['count'] if etype in ["all", "match"]: scores[turn]['exact'] /= scores[turn]['count'] for level in levels: if scores[level]['count'] == 0: continue if etype in ["all", "exec"]: scores[level]['exec'] /= scores[level]['count'] if etype in ["all", "match"]: scores[level]['exact'] /= scores[level]['count'] for type_ in partial_types: if scores[level]['partial'][type_]['acc_count'] == 0: scores[level]['partial'][type_]['acc'] = 0 else: scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \ scores[level]['partial'][type_]['acc_count'] * 1.0 if scores[level]['partial'][type_]['rec_count'] == 0: scores[level]['partial'][type_]['rec'] = 0 else: scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \ scores[level]['partial'][type_]['rec_count'] * 1.0 if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0: scores[level]['partial'][type_]['f1'] = 1 else: scores[level]['partial'][type_]['f1'] = \ 2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / ( scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc']) print_scores(scores, etype)
null
164,086
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql class Evaluator: """A simple evaluator""" def __init__(self): self.partial_scores = None def eval_hardness(self, sql): count_comp1_ = count_component1(sql) count_comp2_ = count_component2(sql) count_others_ = count_others(sql) if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0: return "easy" elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \ (count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0): return "medium" elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \ (2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \ (count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1): return "hard" else: return "extra" def eval_exact_match(self, pred, label): partial_scores = self.eval_partial_match(pred, label) self.partial_scores = partial_scores for _, score in partial_scores.items(): if score['f1'] != 1: return 0 if len(label['from']['table_units']) > 0: if label['from']['table_units'][0][0] == 'sql' and pred['from']['table_units'][0][0] == 'sql': return self.eval_exact_match(pred['from']['table_units'][0][1], label['from']['table_units'][0][1]) # still wrong else: label_tables = sorted(label['from']['table_units']) pred_tables = sorted(pred['from']['table_units']) return label_tables == pred_tables return 1 def eval_partial_match(self, pred, label): res = {} label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total) res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total) res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_group(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_having(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_order(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_and_or(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_IUEN(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_keywords(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} return res def print_scores(scores, etype): levels = ['easy', 'medium', 'hard', 'extra', 'all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels)) counts = [scores[level]['count'] for level in levels] print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts)) if etype in ["all", "exec"]: print('===================== EXECUTION ACCURACY =====================') this_scores = [scores[level]['exec'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores)) if etype in ["all", "match"]: print('\n====================== EXACT MATCHING ACCURACY =====================') exact_scores = [scores[level]['exact'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores)) print('\n---------------------PARTIAL MATCHING ACCURACY----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['acc'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING RECALL ----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['rec'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING F1 --------------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['f1'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) def eval_exec_match(db, p_str, g_str, pred, gold): """ return 1 if the values between prediction and gold are matching in the corresponding index. Currently not support multiple col_unit(pairs). """ conn = sqlite3.connect(db) cursor = conn.cursor() try: cursor.execute(p_str) p_res = cursor.fetchall() except: return False cursor.execute(g_str) q_res = cursor.fetchall() def res_map(res, val_units): rmap = {} for idx, val_unit in enumerate(val_units): key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2])) rmap[key] = [r[idx] for r in res] return rmap p_val_units = [unit[1] for unit in pred['select'][1]] q_val_units = [unit[1] for unit in gold['select'][1]] return res_map(p_res, p_val_units) == res_map(q_res, q_val_units) def rebuild_sql_val(sql): if sql is None or not DISABLE_VALUE: return sql if len(sql['from']['table_units']) > 0 and sql['from']['table_units'][0][0] == 'sql': sql['from']['table_units'][0] = ('sql', rebuild_sql_val(sql['from']['table_units'][0][1])) sql['from']['conds'] = rebuild_condition_val(sql['from']['conds']) sql['having'] = rebuild_condition_val(sql['having']) sql['where'] = rebuild_condition_val(sql['where']) sql['intersect'] = rebuild_sql_val(sql['intersect']) sql['except'] = rebuild_sql_val(sql['except']) sql['union'] = rebuild_sql_val(sql['union']) return sql def build_valid_col_units(table_units, schema): col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']] prefixs = [col_id[:-2] for col_id in col_ids] valid_col_units= [] for value in schema.idMap.values(): if '.' in value and value[:value.index('.')] in prefixs: valid_col_units.append(value) return valid_col_units def rebuild_sql_col(valid_col_units, sql, kmap): if sql is None: return sql sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap) sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap) sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap) sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap) sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap) sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap) sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap) sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap) sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap) return sql class Schema: """ Simple schema which maps table&column to a unique identifier """ def __init__(self, schema): self._schema = schema self._idMap = self._map(self._schema) def schema(self): return self._schema def idMap(self): return self._idMap def _map(self, schema): idMap = {"*": "__all__"} id = 1 for key, vals in schema.items(): for val in vals: idMap[key.lower() + "." + val.lower()] = ( "__" + key.lower() + "." + val.lower() + "__" ) id += 1 for key in schema: idMap[key.lower()] = "__" + key.lower() + "__" id += 1 return idMap def get_schema(db): """ Get database's schema, which is a dict with table name as key and list of column names as value :param db: database path :return: schema dict """ schema = {} conn = sqlite3.connect(db) cursor = conn.cursor() # fetch table names cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = [str(table[0].lower()) for table in cursor.fetchall()] # fetch table info for table in tables: cursor.execute("PRAGMA table_info({})".format(table)) schema[table] = [str(col[1].lower()) for col in cursor.fetchall()] return schema def get_sql(schema, query): toks = tokenize(query) tables_with_alias = get_tables_with_alias(schema.schema, toks) _, sql = parse_sql(toks, 0, tables_with_alias, schema) return sql def evaluate(gold, predict, db_dir, etype, kmaps): with open(gold) as f: glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] with open(predict) as f: plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] # plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")] # glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")] evaluator = Evaluator() levels = ['easy', 'medium', 'hard', 'extra', 'all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] entries = [] scores = {} for level in levels: scores[level] = {'count': 0, 'partial': {}, 'exact': 0.} scores[level]['exec'] = 0 for type_ in partial_types: scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0} eval_err_num = 0 total_acc = [] for p, g in zip(plist, glist): p_str = p[0] g_str, db = g db_name = db db = os.path.join(db_dir, db, db + ".sqlite") schema = Schema(get_schema(db)) # .schema: map lowercased raw tab name to lowercased raw col name list # .idMap: map tab name to __tab__, tab.col to __tab.col__, * to __all__, all lowercased g_sql = get_sql(schema, g_str) hardness = evaluator.eval_hardness(g_sql) scores[hardness]['count'] += 1 scores['all']['count'] += 1 try: p_sql = get_sql(schema, p_str) except: # If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql p_sql = { "except": None, "from": { "conds": [], "table_units": [] }, "groupBy": [], "having": [], "intersect": None, "limit": None, "orderBy": [], "select": [ False, [] ], "union": None, "where": [] } eval_err_num += 1 print("eval_err_num:{}".format(eval_err_num)) # rebuild sql for value evaluation kmap = kmaps[db_name] g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema) # extract all __tab.col__ that has tab in from clause, not include __all__ g_sql = rebuild_sql_val(g_sql) g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap) # kmap: map __tab.col__ to pivot __tab.col__ p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema) p_sql = rebuild_sql_val(p_sql) p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap) if etype in ["all", "exec"]: exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql) if exec_score: scores[hardness]['exec'] += 1 if etype in ["all", "match"]: exact_score = evaluator.eval_exact_match(p_sql, g_sql) total_acc.append(exact_score) partial_scores = evaluator.partial_scores if exact_score == 0: print("{} pred: {}".format(hardness,p_str)) print("{} gold: {}".format(hardness,g_str)) print("") scores[hardness]['exact'] += exact_score scores['all']['exact'] += exact_score for type_ in partial_types: if partial_scores[type_]['pred_total'] > 0: scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores[hardness]['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores[hardness]['partial'][type_]['rec_count'] += 1 scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1'] if partial_scores[type_]['pred_total'] > 0: scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores['all']['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores['all']['partial'][type_]['rec_count'] += 1 scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1'] entries.append({ 'predictSQL': p_str, 'goldSQL': g_str, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores }) thedev = [3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 3, 3, 2, 3, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 2, 2, 3, 2, 2, 2, 3, 2, 2, 3, 2, 2, 3, 2, 2, 3, 3, 2, 3, 2, 2, 3, 2, 2, 2, 3, 2, 3, 3, 2, 2, 2, 2, 2, 2, 3, 2, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 2, 3, 2, 3, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 2, 2, 2, 3, 4, 3, 3, 2, 3, 3, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 4, 4, 3, 3, 3, 4, 3, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 2, 3, 4, 3, 3, 4, 2, 4, 3, 3, 2, 2, 2, 2, 2, 3, 2, 3, 3, 3, 3, 3, 3, 4, 3, 2, 2, 3, 4, 3, 2, 4, 3, 3, 3, 3, 2, 3, 4, 4, 3, 2, 3, 3, 4, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 2, 4, 2, 2, 2, 2, 3, 3, 3, 3, 4, 3, 2, 2, 2, 3, 4, 2, 4, 3, 3, 3, 3, 2, 2, 4, 3, 4, 4, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 4, 3, 4, 3, 3, 4, 4, 4, 4, 4, 3, 4, 3, 4, 4, 4, 2, 2, 2, 3, 4, 3, 3, 3, 3, 2, 3, 3, 2, 3, 2, 2, 2, 2, 4, 3, 4, 3, 3, 3, 4, 4, 3, 3, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 3, 3, 2, 2, 2, 2, 3, 2, 2, 2, 2, 3, 4, 4, 5, 3, 3, 3, 3, 4, 4, 3, 4, 4, 4, 4, 4, 3, 3, 3, 4, 3, 3, 4, 4, 3, 3] IM = 0 index = 0 for item in thedev: flag = 1 for thescore in total_acc[index:index+item]: if thescore == 0: flag = 0 if flag == 1: IM += 1 index += item print('the IM is ',IM/len(thedev)) for level in levels: if scores[level]['count'] == 0: continue if etype in ["all", "exec"]: scores[level]['exec'] /= scores[level]['count'] if etype in ["all", "match"]: scores[level]['exact'] /= scores[level]['count'] for type_ in partial_types: if scores[level]['partial'][type_]['acc_count'] == 0: scores[level]['partial'][type_]['acc'] = 0 else: scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \ scores[level]['partial'][type_]['acc_count'] * 1.0 if scores[level]['partial'][type_]['rec_count'] == 0: scores[level]['partial'][type_]['rec'] = 0 else: scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \ scores[level]['partial'][type_]['rec_count'] * 1.0 if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0: scores[level]['partial'][type_]['f1'] = 1 else: scores[level]['partial'][type_]['f1'] = \ 2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / ( scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc']) print_scores(scores, etype) IM = IM/len(thedev) return scores, IM
null
164,093
import json import tqdm from preprocess.parse_sql.get_label_diff import * def get_label(sql,column_len): def get_language_slot(col_slot): from_dict = {1:'One',2:'Two',3:'Three',4:'Four',5:'Five',6:'Six'} def get_sql_label(sql, column_len): schema_label = [[l for l in get_label(sql, column_len) if l != '']] sql_label = list(get_language_slot(schema_label).values()) # add from label if sql['from']['table_units'][0][0] == 'sql': from_label = '[FromSQL]' else: from_num = len(sql['from']['table_units']) from_label = '[From'+from_dict[from_num]+'Table]' # sql label sql_label = [sql_label[0]] + [from_label] + sql_label[1:] return sql_label
null
164,094
STRUCT_KEYWORDS = ["WHERE", "GROUP_BY", "HAVING", "ORDER_BY", "SELECT"] NEST_KEYWORDS = ["NONE","OP_SEL"] UEI_KEYWORDS = ["NONE","INTERSECT","UNION","EXCEPT"] def get_label_split(label): label_split = [] temp = '' UEI = False NEST = False Continue = False for idx,tok in enumerate(label.split()): if tok in NEST_KEYWORDS: if idx != 0: label_split.append(temp) temp = tok else: temp = tok NEST = True elif tok in UEI_KEYWORDS: if NEST: temp += ' '+tok NEST = False else: if idx != 0: label_split.append(temp) temp = tok else: temp = tok UEI = True elif tok in STRUCT_KEYWORDS: if NEST: temp += ' '+tok NEST = False elif UEI: temp += ' '+tok UEI = False else: if idx != 0: label_split.append(temp) temp = tok Continue = False else: temp = tok else: temp += ' ' + tok label_split.append(temp) return label_split
null
164,095
STRUCT_KEYWORDS = ["WHERE", "GROUP_BY", "HAVING", "ORDER_BY", "SELECT"] NEST_KEYWORDS = ["NONE","OP_SEL"] UEI_KEYWORDS = ["NONE","INTERSECT","UNION","EXCEPT"] ALL_OPS = ["NONE","NOT_IN", "IN", "BETWEEN", "=", ">", "<", ">=", "<=", "LIKE", "!="] AGGS = ["NONE","COUNT", "MAX", "MIN", "SUM", "AVG"] DASCS = ["NONE","ASC", "DESC"] OTHER_KEYWORDS = ["NONE","LIMIT"] UEI_dict = {item:idx for idx,item in enumerate(UEI_KEYWORDS)} NEST_dict = {item:idx for idx,item in enumerate(NEST_KEYWORDS)} ALL_OPS_dict = {item:idx for idx,item in enumerate(ALL_OPS)} AGGS_dict = {item:idx for idx,item in enumerate(AGGS)} DASCS_dict = {item:idx for idx,item in enumerate(DASCS)} OTHER_KEYWORDS_dict = {item:idx for idx,item in enumerate(OTHER_KEYWORDS)} def get_label_struct(label_split): # label_all = {'SELECT':[],'WHERE':[],'GROUP_BY':[],'ORDER_BY':[],'HAVING':[]} label_all = {} for item in label_split: temp = [0, 0, 0, 0, 0, 0] save_tok = '' for tok in item.split(): if tok in NEST_KEYWORDS: temp[0] = NEST_dict[tok] if tok in UEI_KEYWORDS: temp[1] = UEI_dict[tok] if tok in STRUCT_KEYWORDS: save_tok = tok if tok in ALL_OPS: temp[2] = ALL_OPS_dict[tok] if tok in AGGS: temp[3] = AGGS_dict[tok] if tok in DASCS: temp[4] = DASCS_dict[tok] if tok in OTHER_KEYWORDS: temp[5] = OTHER_KEYWORDS_dict[tok] if save_tok in label_all.keys(): label_all[save_tok].append(temp) else: label_all[save_tok] = [temp] return label_all
null
164,096
STRUCT_KEYWORDS = ["WHERE", "GROUP_BY", "HAVING", "ORDER_BY", "SELECT"] ALL_OPS = ["NOT_IN", "IN", "BETWEEN", "=", ">", "<", ">=", "<=", "LIKE", "!="] AGGS = ["COUNT", "MAX", "MIN", "SUM", "AVG"] DASCS = ["ASC", "DESC"] NEST_KEYWORDS = ["EXCEPT", "UNION", "INTERSECT"] def get_labels(sql): sql_tokens = sql.upper().replace("NOT IN", "NOT_IN").replace("> =", ">=").replace("< =", "<=").replace("DISTINCT ", "").replace("GROUP BY", "GROUP_BY").replace("ORDER BY", "ORDER_BY").split(" ") columns = {} cur_nest = "" cur_struct = "" cur_len = len(sql_tokens) select_count = 0 skip = False star_table = None for i, tok in enumerate(sql_tokens): if tok in NEST_KEYWORDS: if cur_nest == "" or cur_nest == "OP_SEL": cur_nest = tok else: cur_nest = cur_nest + " " + tok elif tok in STRUCT_KEYWORDS: cur_struct = tok if tok == "SELECT": select_count += 1 if select_count > 1 and cur_nest == "": cur_nest = "OP_SEL" elif "." in tok: tok = tok.lower() if ".*" in tok: # if star_table is not None: # assert tok == star_table star_table = tok tok = "*" if tok not in columns.keys(): columns[tok] = [] # SELECT {COLUMN0} # SELECT {COLUMN0} , {COLUMN1} # SELECT {AGG0} ( {COLUMN0} ) if cur_struct == "SELECT": if "," == sql_tokens[i-1] or "SELECT" == sql_tokens[i-1]: columns[tok].append(cur_nest + " " + cur_struct) elif "(" == sql_tokens[i-1]: columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2]) else: print("\nWarning: unexcepted SELECT format") print("\n========sql: ", sql) skip = True # WHERE {COLUMN} {OP} val OR # WHERE {COLUMN2} {OP0} # WHERE OR {COLUMN2} {OP0} # WHERE {COLUMN2} BETWEEN elif cur_struct == "WHERE": try: sql_tokens[i+1] in ALL_OPS except: continue last_tok = sql_tokens[i-1] if "OR" == last_tok or (i+3 < cur_len and "OR" == sql_tokens[i+3]): columns[tok].append(cur_nest + " " + cur_struct + " OR " + sql_tokens[i+1]) elif "WHERE" == last_tok or "AND" == last_tok: columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i+1]) else: print("\nWarning: unexcepted WHERE format") print("\n========sql: ", sql) skip = True # GROUP BY {COLUMN0} , {COLUMN0} elif cur_struct == "GROUP_BY": columns[tok].append(cur_nest + " " + cur_struct) # HAVING COUNT ( * ) {OP0} # HAVING {AGG0} ( {COLUMN2} ) {OP0} # having avg ( boxes.value ) > boxes.value elif cur_struct == "HAVING": last_tok = sql_tokens[i-1] if last_tok != "(" and not (sql_tokens[i-2] in AGGS): print("\nWarning: unexcepted HAVING format") print("\n========sql: ", sql) skip = True if sql_tokens[i-1] == ">": continue columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2] + " " + sql_tokens[i+2]) # ORDER BY COUNT ( * ) {DASC} LIMIT # ORDER BY COUNT ( * ) {DASC} # ORDER BY {COLUMN1} {DASC} LIMIT # ORDER BY {COLUMN1} LIMIT # ORDER BY {COLUMN1} , {COLUMN1} {DASC} LIMIT # ORDER BY {COLUMN1} {DASC} if no DASC then is ASC # order_by max ( station.lat ) desc elif cur_struct == "ORDER_BY": last_tok = sql_tokens[i-1] if last_tok == "(": limit_tok = "" dasc_tok = "ASC" if i+2 < cur_len and sql_tokens[i+2] in DASCS: dasc_tok = sql_tokens[i+2] elif i+3 < cur_len and sql_tokens[i+3] == "LIMIT": limit_tok = "LIMIT" columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2] + " " + dasc_tok + " " + limit_tok) elif last_tok == "ORDER_BY" or last_tok == ",": dasc_tok = "ASC" limit_tok = "" # small dirty pass if i+1 < cur_len and sql_tokens[i+1] in DASCS: dasc_tok = sql_tokens[i+1] if i+2 < cur_len and sql_tokens[i+2] == "LIMIT": limit_tok = "LIMIT" elif i+1 < cur_len and sql_tokens[i+1] == "LIMIT": limit_tok = "LIMIT" columns[tok].append(cur_nest + " " + cur_struct + " " + dasc_tok + " " + limit_tok) else: print("\n------------Warning: unexcepted COLUMN label format") print("\n========sql: ", sql) skip = True column_labels = {} if star_table is not None: column_labels[star_table] = "SELECT" for col, labels in columns.items(): label_str = " ".join([l.strip() for l in labels]) column_labels[col] = label_str return column_labels
null
164,097
import os, json, pickle, argparse, sys, time from asdl.asdl import ASDLGrammar from asdl.transition_system import TransitionSystem from asdl.action_info import get_action_infos from preprocess.common_utils import Preprocessor def process_tables(processor, tables_list, output_path=None, verbose=False): tables = {} for each in tables_list: if verbose: print('*************** Processing database %s **************' % (each['db_id'])) tables[each['db_id']] = processor.preprocess_database(each, verbose=verbose) print('In total, process %d databases .' % (len(tables))) if output_path is not None: pickle.dump(tables, open(output_path, 'wb')) return tables
null
164,098
import os, json, pickle, argparse, sys, time from asdl.asdl import ASDLGrammar from asdl.transition_system import TransitionSystem from asdl.action_info import get_action_infos from preprocess.common_utils import Preprocessor def process_example(processor, entry, db, trans, verbose=False): # preprocess raw tokens, schema linking and subgraph extraction entry = processor.pipeline(entry, db, verbose=verbose) # generate target output actions ast = trans.surface_code_to_ast(entry['sql']) actions = trans.get_actions(ast) entry['ast'] = ast entry['actions'] = get_action_infos(tgt_actions=actions) return entry class ASDLGrammar(object): """ Collection of types, constructors and productions """ def __init__(self, productions, file_path): # productions are indexed by their head types file_name = os.path.basename(file_path) grammar_name = file_name[:file_name.index('.txt')] if '.txt' in file_name else file_name self._grammar_name = grammar_name self._productions = OrderedDict() self._constructor_production_map = dict() for prod in productions: if prod.type not in self._productions: self._productions[prod.type] = list() self._productions[prod.type].append(prod) self._constructor_production_map[prod.constructor.name] = prod self.root_type = productions[0].type # number of constructors self.size = sum(len(head) for head in self._productions.values()) # get entities to their ids map self.prod2id = {prod: i for i, prod in enumerate(self.productions)} self.type2id = {type: i for i, type in enumerate(self.types)} self.field2id = {field: i for i, field in enumerate(self.fields)} self.id2prod = {i: prod for i, prod in enumerate(self.productions)} self.id2type = {i: type for i, type in enumerate(self.types)} self.id2field = {i: field for i, field in enumerate(self.fields)} def __len__(self): return self.size def productions(self): return sorted(chain.from_iterable(self._productions.values()), key=lambda x: repr(x)) def __getitem__(self, datum): if isinstance(datum, str): return self._productions[ASDLType(datum)] elif isinstance(datum, ASDLType): return self._productions[datum] def get_prod_by_ctr_name(self, name): return self._constructor_production_map[name] def types(self): if not hasattr(self, '_types'): all_types = set() for prod in self.productions: all_types.add(prod.type) all_types.update(map(lambda x: x.type, prod.constructor.fields)) self._types = sorted(all_types, key=lambda x: x.name) return self._types def fields(self): if not hasattr(self, '_fields'): all_fields = set() for prod in self.productions: all_fields.update(prod.constructor.fields) self._fields = sorted(all_fields, key=lambda x: (x.name, x.type.name, x.cardinality)) return self._fields def primitive_types(self): return filter(lambda x: isinstance(x, ASDLPrimitiveType), self.types) def composite_types(self): return filter(lambda x: isinstance(x, ASDLCompositeType), self.types) def is_composite_type(self, asdl_type): return asdl_type in self.composite_types def is_primitive_type(self, asdl_type): return asdl_type in self.primitive_types def from_filepath(file_path): def _parse_field_from_text(_text): d = _text.strip().split(' ') name = d[1].strip() type_str = d[0].strip() cardinality = 'single' if type_str[-1] == '*': type_str = type_str[:-1] cardinality = 'multiple' elif type_str[-1] == '?': type_str = type_str[:-1] cardinality = 'optional' if type_str in primitive_type_names: return Field(name, ASDLPrimitiveType(type_str), cardinality=cardinality) else: return Field(name, ASDLCompositeType(type_str), cardinality=cardinality) def _parse_constructor_from_text(_text): _text = _text.strip() fields = None if '(' in _text: name = _text[:_text.find('(')] field_blocks = _text[_text.find('(') + 1:_text.find(')')].split(',') fields = map(_parse_field_from_text, field_blocks) else: name = _text if name == '': name = None return ASDLConstructor(name, fields) with open(file_path, 'r') as inf: text = inf.read() lines = remove_comment(text).split('\n') lines = list(map(lambda l: l.strip(), lines)) lines = list(filter(lambda l: l, lines)) line_no = 0 # first line is always the primitive types primitive_type_names = list(map(lambda x: x.strip(), lines[line_no].split(','))) line_no += 1 all_productions = list() while True: type_block = lines[line_no] type_name = type_block[:type_block.find('=')].strip() constructors_blocks = type_block[type_block.find('=') + 1:].split('|') i = line_no + 1 while i < len(lines) and lines[i].strip().startswith('|'): t = lines[i].strip() cont_constructors_blocks = t[1:].split('|') constructors_blocks.extend(cont_constructors_blocks) i += 1 constructors_blocks = filter(lambda x: x and x.strip(), constructors_blocks) # parse type name new_type = ASDLPrimitiveType(type_name) if type_name in primitive_type_names else ASDLCompositeType(type_name) constructors = map(_parse_constructor_from_text, constructors_blocks) productions = list(map(lambda c: ASDLProduction(new_type, c), constructors)) all_productions.extend(productions) line_no = i if line_no == len(lines): break grammar = ASDLGrammar(all_productions, file_path) return grammar class TransitionSystem(object): def __init__(self, grammar): self.grammar = grammar def get_actions(self, asdl_ast): """ generate action sequence given the ASDL Syntax Tree """ actions = [] parent_action = ApplyRuleAction(asdl_ast.production) actions.append(parent_action) for field in asdl_ast.fields: # is a composite field if self.grammar.is_composite_type(field.type): if field.cardinality == 'single': field_actions = self.get_actions(field.value) else: field_actions = [] if field.value is not None: if field.cardinality == 'multiple': for val in field.value: cur_child_actions = self.get_actions(val) field_actions.extend(cur_child_actions) elif field.cardinality == 'optional': field_actions = self.get_actions(field.value) # if an optional field is filled, then do not need Reduce action if field.cardinality == 'multiple' or field.cardinality == 'optional' and not field_actions: field_actions.append(ReduceAction()) else: # is a primitive field field_actions = self.get_primitive_field_actions(field) # if an optional field is filled, then do not need Reduce action if field.cardinality == 'multiple' or field.cardinality == 'optional' and not field_actions: # reduce action field_actions.append(ReduceAction()) actions.extend(field_actions) return actions def tokenize_code(self, code, mode): raise NotImplementedError def compare_ast(self, hyp_ast, ref_ast): raise NotImplementedError def ast_to_surface_code(self, asdl_ast): raise NotImplementedError def surface_code_to_ast(self, code): raise NotImplementedError def get_primitive_field_actions(self, realized_field): raise NotImplementedError def get_valid_continuation_types(self, hyp): if hyp.tree: if self.grammar.is_composite_type(hyp.frontier_field.type): if hyp.frontier_field.cardinality == 'single': return ApplyRuleAction, else: # optional, multiple return ApplyRuleAction, ReduceAction else: if hyp.frontier_field.cardinality == 'single': return GenTokenAction, elif hyp.frontier_field.cardinality == 'optional': if hyp._value_buffer: return GenTokenAction, else: return GenTokenAction, ReduceAction else: return GenTokenAction, ReduceAction else: return ApplyRuleAction, def get_valid_continuating_productions(self, hyp): if hyp.tree: if self.grammar.is_composite_type(hyp.frontier_field.type): return self.grammar[hyp.frontier_field.type] else: raise ValueError else: return self.grammar[self.grammar.root_type] def get_class_by_lang(lang): if lang == 'sql': from asdl.sql.sql_transition_system import SQLTransitionSystem else: raise ValueError('unknown language %s' % lang) return SQLTransitionSystem GRAMMAR_FILEPATH = 'asdl/sql/grammar/sql_asdl_v2.txt' def process_dataset(processor, dataset, tables, output_path=None, skip_large=False, verbose=False): from utils.constants import GRAMMAR_FILEPATH grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH) trans = TransitionSystem.get_class_by_lang('sql')(grammar) processed_dataset = [] for idx, entry in enumerate(dataset): if skip_large and len(tables[entry['db_id']]['column_names']) > 100: continue if verbose: print('*************** Processing %d-th sample **************' % (idx)) entry = process_example(processor, entry, tables[entry['db_id']], trans, verbose=verbose) processed_dataset.append(entry) print('In total, process %d samples , skip %d extremely large databases.' % (len(processed_dataset), len(dataset) - len(processed_dataset))) if output_path is not None: # serialize preprocessed dataset pickle.dump(processed_dataset, open(output_path, 'wb')) return processed_dataset
null
164,099
import os, sqlite3 import numpy as np import stanza, torch from nltk.corpus import stopwords from itertools import product, combinations from utils.constants import MAX_RELATIVE_DIST from transformers import RobertaTokenizer import nltk def is_number(s): try: float(s) return True except ValueError: return False
null
164,100
import os, sqlite3 import numpy as np import stanza, torch from nltk.corpus import stopwords from itertools import product, combinations from utils.constants import MAX_RELATIVE_DIST from transformers import RobertaTokenizer import nltk The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem: Normalize all usage of quotation marks into a separate \" Here is the function: def quote_normalization(question): """ Normalize all usage of quotation marks into a separate \" """ new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"] for idx, tok in enumerate(question): if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks: new_question += ["\"", tok[1:-1], "\""] elif len(tok) > 2 and tok[0] in quotation_marks: new_question += ["\"", tok[1:]] elif len(tok) > 2 and tok[-1] in quotation_marks: new_question += [tok[:-1], "\"" ] elif tok in quotation_marks: new_question.append("\"") elif len(tok) == 2 and tok[0] in quotation_marks: # special case: the length of entity value is 1 if idx + 1 < len(question) and question[idx + 1] in quotation_marks: new_question += ["\"", tok[1]] else: new_question.append(tok) else: new_question.append(tok) return new_question
Normalize all usage of quotation marks into a separate \"
164,103
import argparse import os import sys import pickle import json import shutil import sqlparse def read_database_schema(database_schema, schema_tokens, column_names, database_schemas_dict): def get_schema_tokens(table_schema): column_names_surface_form = [] column_names = [] column_names_original = table_schema['column_names_original'] table_names = table_schema['table_names'] table_names_original = table_schema['table_names_original'] for i, (table_id, column_name) in enumerate(column_names_original): if table_id >= 0: table_name = table_names_original[table_id] column_name_surface_form = '{}.{}'.format(table_name, column_name) else: # this is just * column_name_surface_form = column_name column_names_surface_form.append(column_name_surface_form.lower()) column_names.append(column_name.lower()) # also add table_name.* for table_name in table_names_original: column_names_surface_form.append('{}.*'.format(table_name.lower())) return column_names_surface_form, column_names database_id = database_schema['db_id'] database_schemas_dict[database_id] = database_schema schema_tokens[database_id], column_names[database_id] = get_schema_tokens(database_schema) return schema_tokens, column_names, database_schemas_dict
null
164,104
import argparse import os import sys import pickle import json import shutil import sqlparse def get_candidate_tables(format_sql, schema): candidate_tables = [] tokens = format_sql.split() for i, token in enumerate(tokens): if '.' in token: table_name = token.split('.')[0] candidate_tables.append(table_name) candidate_tables = list(set(candidate_tables)) candidate_tables.sort() table_names_original = [table_name.lower() for table_name in schema['table_names_original']] candidate_tables_id = [table_names_original.index(table_name) for table_name in candidate_tables] assert -1 not in candidate_tables_id table_names_original = schema['table_names_original'] return candidate_tables_id, table_names_original def remove_from_with_join(format_sql_2): used_tables_list = [] format_sql_3 = [] table_to_name = {} table_list = [] old_table_to_name = {} old_table_list = [] for sub_sql in format_sql_2.split('\n'): if 'select ' in sub_sql: # only replace alias: t1 -> table_name, t2 -> table_name, etc... if len(table_list) > 0: for i in range(len(format_sql_3)): for table, name in table_to_name.items(): format_sql_3[i] = format_sql_3[i].replace(table, name) old_table_list = table_list old_table_to_name = table_to_name table_to_name = {} table_list = [] format_sql_3.append(sub_sql) elif sub_sql.startswith('from'): new_sub_sql = None sub_sql_tokens = sub_sql.split() for t_i, t in enumerate(sub_sql_tokens): if t == 'as': table_to_name[sub_sql_tokens[t_i + 1]] = sub_sql_tokens[t_i - 1] table_list.append(sub_sql_tokens[t_i - 1]) elif t == ')' and new_sub_sql is None: # new_sub_sql keeps some trailing parts after ')' new_sub_sql = ' '.join(sub_sql_tokens[t_i:]) if len(table_list) > 0: # if it's a from clause with join if new_sub_sql is not None: format_sql_3.append(new_sub_sql) used_tables_list.append(table_list) else: # if it's a from clause without join table_list = old_table_list table_to_name = old_table_to_name assert 'join' not in sub_sql if new_sub_sql is not None: sub_sub_sql = sub_sql[:-len(new_sub_sql)].strip() assert len(sub_sub_sql.split()) == 2 used_tables_list.append([sub_sub_sql.split()[1]]) format_sql_3.append(sub_sub_sql) format_sql_3.append(new_sub_sql) elif 'join' not in sub_sql: assert len(sub_sql.split()) == 2 or len(sub_sql.split()) == 1 if len(sub_sql.split()) == 2: used_tables_list.append([sub_sql.split()[1]]) format_sql_3.append(sub_sql) else: print('bad from clause in remove_from_with_join') exit() else: format_sql_3.append(sub_sql) if len(table_list) > 0: for i in range(len(format_sql_3)): for table, name in table_to_name.items(): format_sql_3[i] = format_sql_3[i].replace(table, name) used_tables = [] for t in used_tables_list: for tt in t: used_tables.append(tt) used_tables = list(set(used_tables)) return format_sql_3, used_tables, used_tables_list def remove_from_without_join(format_sql_3, column_names, schema_tokens): format_sql_4 = [] table_name = None for sub_sql in format_sql_3.split('\n'): if 'select ' in sub_sql: if table_name: for i in range(len(format_sql_4)): tokens = format_sql_4[i].split() for ii, token in enumerate(tokens): if token in column_names and tokens[ii - 1] != '.': if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[ ii + 1] != '(') or ii + 1 == len(tokens): if '{}.{}'.format(table_name, token) in schema_tokens: tokens[ii] = '{} . {}'.format(table_name, token) format_sql_4[i] = ' '.join(tokens) format_sql_4.append(sub_sql) elif sub_sql.startswith('from'): sub_sql_tokens = sub_sql.split() if len(sub_sql_tokens) == 1: table_name = None elif len(sub_sql_tokens) == 2: table_name = sub_sql_tokens[1] else: print('bad from clause in remove_from_without_join') print(format_sql_3) exit() else: format_sql_4.append(sub_sql) if table_name: for i in range(len(format_sql_4)): tokens = format_sql_4[i].split() for ii, token in enumerate(tokens): if token in column_names and tokens[ii - 1] != '.': if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[ii + 1] != '(') or ii + 1 == len( tokens): if '{}.{}'.format(table_name, token) in schema_tokens: tokens[ii] = '{} . {}'.format(table_name, token) format_sql_4[i] = ' '.join(tokens) return format_sql_4 def add_table_name(format_sql_3, used_tables, column_names, schema_tokens): # If just one table used, easy case, replace all column_name -> table_name.column_name if len(used_tables) == 1: table_name = used_tables[0] format_sql_4 = [] for sub_sql in format_sql_3.split('\n'): if sub_sql.startswith('from'): format_sql_4.append(sub_sql) continue tokens = sub_sql.split() for ii, token in enumerate(tokens): if token in column_names and tokens[ii - 1] != '.': if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[ii + 1] != '(') or ii + 1 == len( tokens): if '{}.{}'.format(table_name, token) in schema_tokens: tokens[ii] = '{} . {}'.format(table_name, token) format_sql_4.append(' '.join(tokens)) return format_sql_4 def get_table_name_for(token): table_names = [] for table_name in used_tables: if '{}.{}'.format(table_name, token) in schema_tokens: table_names.append(table_name) if len(table_names) == 0: return 'table' if len(table_names) > 1: return None else: return table_names[0] format_sql_4 = [] for sub_sql in format_sql_3.split('\n'): if sub_sql.startswith('from'): format_sql_4.append(sub_sql) continue tokens = sub_sql.split() for ii, token in enumerate(tokens): # skip * if token == '*': continue if token in column_names and tokens[ii - 1] != '.': if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[ii + 1] != '(') or ii + 1 == len(tokens): table_name = get_table_name_for(token) if table_name: tokens[ii] = '{} . {}'.format(table_name, token) format_sql_4.append(' '.join(tokens)) return format_sql_4 def check_oov(format_sql_final, output_vocab, schema_tokens): for sql_tok in format_sql_final.split(): if not (sql_tok in schema_tokens or sql_tok in output_vocab): print('OOV!', sql_tok) raise Exception('OOV') def normalize_space(format_sql): format_sql_1 = [' '.join( sub_sql.strip().replace(',', ' , ').replace('.', ' . ').replace('(', ' ( ').replace(')', ' ) ').split()) for sub_sql in format_sql.split('\n')] format_sql_1 = '\n'.join(format_sql_1) format_sql_2 = format_sql_1.replace('\njoin', ' join').replace(',\n', ', ').replace(' where', '\nwhere').replace( ' intersect', '\nintersect').replace('\nand', ' and').replace('order by t2 .\nstart desc', 'order by t2 . start desc') format_sql_2 = format_sql_2.replace('select\noperator', 'select operator').replace('select\nconstructor', 'select constructor').replace( 'select\nstart', 'select start').replace('select\ndrop', 'select drop').replace('select\nwork', 'select work').replace( 'select\ngroup', 'select group').replace('select\nwhere_built', 'select where_built').replace('select\norder', 'select order').replace( 'from\noperator', 'from operator').replace('from\nforward', 'from forward').replace('from\nfor', 'from for').replace( 'from\ndrop', 'from drop').replace('from\norder', 'from order').replace('.\nstart', '. start').replace( '.\norder', '. order').replace('.\noperator', '. operator').replace('.\nsets', '. sets').replace( '.\nwhere_built', '. where_built').replace('.\nwork', '. work').replace('.\nconstructor', '. constructor').replace('.\ngroup', '. group').replace( '.\nfor', '. for').replace('.\ndrop', '. drop').replace('.\nwhere', '. where') format_sql_2 = format_sql_2.replace('group by', 'group_by').replace('order by', 'order_by').replace('! =', '!=').replace( 'limit value', 'limit_value') return format_sql_2 def normalize_final_sql(format_sql_5): format_sql_final = format_sql_5.replace('\n', ' ').replace(' . ', '.').replace('group by', 'group_by').replace( 'order by', 'order_by').replace('! =', '!=').replace('limit value', 'limit_value') # normalize two bad sqls if 't1' in format_sql_final or 't2' in format_sql_final or 't3' in format_sql_final or 't4' in format_sql_final: format_sql_final = format_sql_final.replace('t2.dormid', 'dorm.dormid') # This is the failure case of remove_from_without_join() format_sql_final = format_sql_final.replace( 'select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by population desc limit_value', 'select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by city.population desc limit_value') return format_sql_final def parse_sql(sql_string, column_names, output_vocab, schema_tokens, schema): format_sql = sqlparse.format(sql_string, reindent=True) format_sql_2 = normalize_space(format_sql) num_from = sum([1 for sub_sql in format_sql_2.split('\n') if sub_sql.startswith('from')]) num_select = format_sql_2.count('select ') + format_sql_2.count('select\n') format_sql_3, used_tables, used_tables_list = remove_from_with_join(format_sql_2) format_sql_3 = '\n'.join(format_sql_3) format_sql_4 = add_table_name(format_sql_3, used_tables, column_names, schema_tokens) format_sql_4 = '\n'.join(format_sql_4) format_sql_5 = remove_from_without_join(format_sql_4, column_names, schema_tokens) format_sql_5 = '\n'.join(format_sql_5) format_sql_final = normalize_final_sql(format_sql_5) candidate_tables_id, table_names_original = get_candidate_tables(format_sql_final, schema) failure = False if len(candidate_tables_id) != len(used_tables): failure = True check_oov(format_sql_final, output_vocab, schema_tokens) return format_sql_final
null
164,105
import argparse, os, sys, pickle, json from collections import Counter def construct_vocab_from_dataset(*data_paths, table_path='data/table_s.bin', mwf=4, reference_file=None, output_path=None, sep='\t'): words = [] tables = pickle.load(open(table_path, 'rb')) for fp in data_paths: dataset = pickle.load(open(fp, 'rb')) for ex in dataset: words.extend(ex['processed_question_toks']) db = tables[ex['db_id']] words.extend(['table'] * len(db['table_names'])) words.extend(db['column_types']) for c in db['processed_column_toks']: words.extend(c) for t in db['processed_table_toks']: words.extend(t) cnt = Counter(words) vocab = sorted(list(cnt.items()), key=lambda x: - x[1]) glove_vocab = set() with open(reference_file, 'r', encoding='utf-8') as inf: for line in inf: line = line.strip() if line == '': continue glove_vocab.add(line) oov_words, oov_but_freq_words = set(), [] for w, c in vocab: if w not in glove_vocab: oov_words.add(w) if c >= mwf: oov_but_freq_words.append((w, c)) print('Out of glove vocabulary size: %d\nAmong them, %d words occur equal or more than %d times in training dataset.' % (len(oov_words), len(oov_but_freq_words), mwf)) with open(output_path, 'w') as of: # first serialize oov but frequent words, allowing fine-tune them during training for w, c in oov_but_freq_words: of.write(w + sep + str(c) + '\n') # next serialize words in both train vocab and glove vocab according to decreasing frequency for w, c in vocab: if w not in oov_words: of.write(w + sep + str(c) + '\n') return len(vocab)
null
164,108
import sys, os, time, json, gc, pickle from argparse import Namespace from utils.args import init_args from utils.hyperparams import hyperparam_path from utils.initialization import * from utils.example import Example from utils.batch import Batch from utils.optimization import set_optimizer from model.model_utils import Registrable from model.model_constructor import * from preprocess.parse_sql.schema import * from preprocess.parse_sql.parse import get_label args = init_args(sys.argv[1:]) device = set_torch_device(args.device) if args.read_model_path: params = json.load(open(os.path.join(args.read_model_path, 'params.json')), object_hook=lambda d: Namespace(**d)) params.lazy_load = True else: params = args train_dataset = Example.load_dataset('train_electra', label) dev_dataset = pickle.load(open('data/dev_electra.lgesql.bin', 'rb')) args.word_vocab, args.relation_num = len(Example.word_vocab), len(Example.relation_vocab) model = Registrable.by_name('text2sql')(params, sql_trans).to(device) if args.read_model_path: check_point = torch.load(open(os.path.join(args.read_model_path, 'model_IM.bin'), 'rb'), map_location=device) model.load_state_dict(check_point['model']) logger.info("Load saved model from path: %s" % (args.read_model_path)) else: json.dump(vars(params), open(os.path.join(exp_path, 'params.json'), 'w'), indent=4) if params.plm is None: ratio = Example.word2vec.load_embeddings(model.encoder.input_layer.word_embed, Example.word_vocab, device=device) logger.info("Init model and word embedding layer with a coverage %.2f" % (ratio)) if not args.testing: num_training_steps = ((len(train_dataset) + args.batch_size - 1) // args.batch_size) * args.max_epoch num_warmup_steps = int(num_training_steps * args.warmup_ratio) fgm = FGM(model,epsilon=1,emb_name='word_embeddings.') logger.info('Total training steps: %d;\t Warmup steps: %d' % (num_training_steps, num_warmup_steps)) optimizer, scheduler = set_optimizer(model, args, num_warmup_steps, num_training_steps) start_epoch, nsamples, best_result = 0, len(train_dataset), {'dev_acc': 0.,'IM': 0.} train_index, step_size = np.arange(nsamples), args.batch_size // args.grad_accumulate # if args.read_model_path and args.load_optimizer: # optimizer.load_state_dict(check_point['optim']) # scheduler.load_state_dict(check_point['scheduler']) # start_epoch = check_point['epoch'] + 1 logger.info('Start training ......') for i in range(start_epoch, args.max_epoch): start_time = time.time() epoch_loss, epoch_gp_loss, count = 0, 0, 0 np.random.shuffle(train_index) model.train() for j in range(0, nsamples, step_size): count += 1 cur_dataset = [train_dataset[k] for k in train_index[j: j + step_size]] current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing) loss, gp_loss = model(current_batch) # see utils/batch.py for batch elements epoch_loss = epoch_loss + loss.item() epoch_gp_loss = epoch_gp_loss + gp_loss.item() # print("Minibatch loss: %.4f" % (loss.item())) loss = loss + gp_loss loss.backward() # fgm.attack() # loss_adv, gp_loss_adv = model(current_batch) # loss_adv = loss_adv + gp_loss_adv # loss_adv.backward() # fgm.restore() if count == args.grad_accumulate or j + step_size >= nsamples: count = 0 model.pad_embedding_grad_zero() optimizer.step() scheduler.step() optimizer.zero_grad() logger.info('Training: \tEpoch: %d\tTime: %.4f\tTraining loss: %.4f/%.4f' % (i, time.time() - start_time, epoch_loss, epoch_gp_loss)) torch.cuda.empty_cache() gc.collect() start_time = time.time() dev_acc,IM = dev_decode('dev', os.path.join(exp_path, 'dev.iter' + str(i)), acc_type='sql') logger.info('Evaluation: \tEpoch: %d\tTime: %.4f\tDev acc: %.4f\tIM: %.4f' % (i, time.time() - start_time, dev_acc, IM)) if i < args.eval_after_epoch: # avoid unnecessary evaluation continue if dev_acc > best_result['dev_acc']: best_result['dev_acc'], best_result['iter'] = dev_acc, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tDev acc: %.4f' % (i, dev_acc)) if IM > best_result['IM']: best_result['IM'], best_result['iter_IM'] = IM, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model_IM.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tIM: %.4f' % (i, IM)) logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc: %.4f' % (best_result['iter'], best_result['dev_acc'])) logger.info('FINAL BEST RESULT IM: \tEpoch: %d\tIM: %.4f' % (best_result['iter'], best_result['IM'])) # check_point = torch.load(open(os.path.join(exp_path, 'model.bin'), 'rb')) # model.load_state_dict(check_point['model']) # dev_acc_beam = decode('dev', output_path=os.path.join(exp_path, 'dev.iter' + str(best_result['iter']) + '.beam' + str(args.beam_size)), acc_type='beam') # logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc/Beam acc: %.4f/%.4f' % (best_result['iter'], best_result['dev_acc'], dev_acc_beam)) else: # start_time = time.time() # train_acc = decode('train', output_path=os.path.join(args.read_model_path, 'train.eval'), acc_type='sql') # logger.info("Evaluation costs %.2fs ; Train dataset exact match acc is %.4f ." % (time.time() - start_time, train_acc)) start_time = time.time() dev_acc,IM = dev_decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval'), acc_type='sql') # dev_acc_checker = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.checker'), acc_type='sql', use_checker=True) # dev_acc_beam = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.beam' + str(args.beam_size)), acc_type='beam') # logger.info("Evaluation costs %.2fs ; Dev dataset exact match/checker/beam acc is %.4f/%.4f ." % (time.time() - start_time, dev_acc, dev_acc_checker, dev_acc_beam)) class Batch(): def __init__(self, examples, device='cpu'): def from_example_list(cls, ex_list, device='cpu', train=True, method='text2sql', **kwargs): def __len__(self): def __getitem__(self, idx): def max_question_len(self): def max_table_len(self): def max_column_len(self): def max_table_word_len(self): def max_column_word_len(self): def max_question_subword_len(self): def max_table_subword_len(self): def max_column_subword_len(self): def mask(self): def question_mask(self): def table_mask(self): def column_mask(self): def table_word_mask(self): def column_word_mask(self): def question_subword_mask(self): def table_subword_mask(self): def column_subword_mask(self): def get_frontier_field_idx(self, t): def get_frontier_prod_idx(self, t): def get_frontier_field_type_idx(self, t): def decode(choice, output_path, acc_type='sql', use_checker=False): assert acc_type in ['beam', 'ast', 'sql'] and choice in ['train', 'dev'] model.eval() dataset = train_dataset if choice == 'train' else dev_dataset all_hyps = [] with torch.no_grad(): for i in range(0, len(dataset), 1): current_batch = Batch.from_example_list(dataset[i: i + args.batch_size], device, train=False) hyps = model.parse(current_batch, args.beam_size) all_hyps.extend(hyps) acc,IM = evaluator.acc(all_hyps, dataset, output_path, acc_type=acc_type, etype='match', use_checker=use_checker) torch.cuda.empty_cache() gc.collect() return acc,IM
null
164,109
import sys, os, time, json, gc, pickle from argparse import Namespace from utils.args import init_args from utils.hyperparams import hyperparam_path from utils.initialization import * from utils.example import Example from utils.batch import Batch from utils.optimization import set_optimizer from model.model_utils import Registrable from model.model_constructor import * from preprocess.parse_sql.schema import * from preprocess.parse_sql.parse import get_label db = pickle.load(open('data/tables_electra.bin','rb')) schemas, db_names, thetables = get_schemas_from_json(table_file) with open('data/label.json','r') as f: label = json.load(f) args = init_args(sys.argv[1:]) device = set_torch_device(args.device) if args.read_model_path: params = json.load(open(os.path.join(args.read_model_path, 'params.json')), object_hook=lambda d: Namespace(**d)) params.lazy_load = True else: params = args Example.configuration(plm=params.plm, method=params.model) train_dataset = Example.load_dataset('train_electra', label) dev_dataset = pickle.load(open('data/dev_electra.lgesql.bin', 'rb')) args.word_vocab, args.relation_num = len(Example.word_vocab), len(Example.relation_vocab) model = Registrable.by_name('text2sql')(params, sql_trans).to(device) if args.read_model_path: check_point = torch.load(open(os.path.join(args.read_model_path, 'model_IM.bin'), 'rb'), map_location=device) model.load_state_dict(check_point['model']) logger.info("Load saved model from path: %s" % (args.read_model_path)) else: json.dump(vars(params), open(os.path.join(exp_path, 'params.json'), 'w'), indent=4) if params.plm is None: ratio = Example.word2vec.load_embeddings(model.encoder.input_layer.word_embed, Example.word_vocab, device=device) logger.info("Init model and word embedding layer with a coverage %.2f" % (ratio)) if not args.testing: num_training_steps = ((len(train_dataset) + args.batch_size - 1) // args.batch_size) * args.max_epoch num_warmup_steps = int(num_training_steps * args.warmup_ratio) fgm = FGM(model,epsilon=1,emb_name='word_embeddings.') logger.info('Total training steps: %d;\t Warmup steps: %d' % (num_training_steps, num_warmup_steps)) optimizer, scheduler = set_optimizer(model, args, num_warmup_steps, num_training_steps) start_epoch, nsamples, best_result = 0, len(train_dataset), {'dev_acc': 0.,'IM': 0.} train_index, step_size = np.arange(nsamples), args.batch_size // args.grad_accumulate # if args.read_model_path and args.load_optimizer: # optimizer.load_state_dict(check_point['optim']) # scheduler.load_state_dict(check_point['scheduler']) # start_epoch = check_point['epoch'] + 1 logger.info('Start training ......') for i in range(start_epoch, args.max_epoch): start_time = time.time() epoch_loss, epoch_gp_loss, count = 0, 0, 0 np.random.shuffle(train_index) model.train() for j in range(0, nsamples, step_size): count += 1 cur_dataset = [train_dataset[k] for k in train_index[j: j + step_size]] current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing) loss, gp_loss = model(current_batch) # see utils/batch.py for batch elements epoch_loss = epoch_loss + loss.item() epoch_gp_loss = epoch_gp_loss + gp_loss.item() # print("Minibatch loss: %.4f" % (loss.item())) loss = loss + gp_loss loss.backward() # fgm.attack() # loss_adv, gp_loss_adv = model(current_batch) # loss_adv = loss_adv + gp_loss_adv # loss_adv.backward() # fgm.restore() if count == args.grad_accumulate or j + step_size >= nsamples: count = 0 model.pad_embedding_grad_zero() optimizer.step() scheduler.step() optimizer.zero_grad() logger.info('Training: \tEpoch: %d\tTime: %.4f\tTraining loss: %.4f/%.4f' % (i, time.time() - start_time, epoch_loss, epoch_gp_loss)) torch.cuda.empty_cache() gc.collect() start_time = time.time() dev_acc,IM = dev_decode('dev', os.path.join(exp_path, 'dev.iter' + str(i)), acc_type='sql') logger.info('Evaluation: \tEpoch: %d\tTime: %.4f\tDev acc: %.4f\tIM: %.4f' % (i, time.time() - start_time, dev_acc, IM)) if i < args.eval_after_epoch: # avoid unnecessary evaluation continue if dev_acc > best_result['dev_acc']: best_result['dev_acc'], best_result['iter'] = dev_acc, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tDev acc: %.4f' % (i, dev_acc)) if IM > best_result['IM']: best_result['IM'], best_result['iter_IM'] = IM, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model_IM.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tIM: %.4f' % (i, IM)) logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc: %.4f' % (best_result['iter'], best_result['dev_acc'])) logger.info('FINAL BEST RESULT IM: \tEpoch: %d\tIM: %.4f' % (best_result['iter'], best_result['IM'])) # check_point = torch.load(open(os.path.join(exp_path, 'model.bin'), 'rb')) # model.load_state_dict(check_point['model']) # dev_acc_beam = decode('dev', output_path=os.path.join(exp_path, 'dev.iter' + str(best_result['iter']) + '.beam' + str(args.beam_size)), acc_type='beam') # logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc/Beam acc: %.4f/%.4f' % (best_result['iter'], best_result['dev_acc'], dev_acc_beam)) else: # start_time = time.time() # train_acc = decode('train', output_path=os.path.join(args.read_model_path, 'train.eval'), acc_type='sql') # logger.info("Evaluation costs %.2fs ; Train dataset exact match acc is %.4f ." % (time.time() - start_time, train_acc)) start_time = time.time() dev_acc,IM = dev_decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval'), acc_type='sql') # dev_acc_checker = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.checker'), acc_type='sql', use_checker=True) # dev_acc_beam = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.beam' + str(args.beam_size)), acc_type='beam') # logger.info("Evaluation costs %.2fs ; Dev dataset exact match/checker/beam acc is %.4f/%.4f ." % (time.time() - start_time, dev_acc, dev_acc_checker, dev_acc_beam)) class Example(): def configuration(cls, plm=None, method='lgesql', table_path='data/tables.json', tables='data/tables.bin', db_dir='data/database'): def load_dataset(cls, choice, debug=False): def __init__(self, ex: dict, db: dict): class Batch(): def __init__(self, examples, device='cpu'): def from_example_list(cls, ex_list, device='cpu', train=True, method='text2sql', **kwargs): def __len__(self): def __getitem__(self, idx): def max_question_len(self): def max_table_len(self): def max_column_len(self): def max_table_word_len(self): def max_column_word_len(self): def max_question_subword_len(self): def max_table_subword_len(self): def max_column_subword_len(self): def mask(self): def question_mask(self): def table_mask(self): def column_mask(self): def table_word_mask(self): def column_word_mask(self): def question_subword_mask(self): def table_subword_mask(self): def column_subword_mask(self): def get_frontier_field_idx(self, t): def get_frontier_prod_idx(self, t): def get_frontier_field_type_idx(self, t): def get_label(sql,column_len): def dev_decode(choice, output_path, acc_type='sql', use_checker=False): assert acc_type in ['beam', 'ast', 'sql'] and choice in ['train', 'dev'] model.eval() dataset = train_dataset if choice == 'train' else dev_dataset all_hyps = [] with torch.no_grad(): last_sql = '' sql_label = [] final_data = [] with open('predict.txt','w',encoding='utf8') as f: for i in range(0, len(dataset), 1): db_id = dataset[i]['db_id'] tables = db[dataset[i]['db_id']] schema = schemas[db_id] table = thetables[db_id] if '[CLS]' not in dataset[i]['question'] or last_sql == '': sql_label = ['']*len(tables['column_names']) else: schema = Schema(schema, table) try: sql_label = get_sql(schema, last_sql) except: sql_label = ['']*len(tables['column_names']) else: sql_label = get_label(sql_label,len(table['column_names_original'])) if '[CLS]' not in dataset[i]['question'] and i != 0: f.write('\n') dev_ex = Example(dataset[i], tables, sql_label) current_batch = Batch.from_example_list([dev_ex], device, train=False) hyps = model.parse(current_batch, args.beam_size) last_sql = evaluator.obtain_sql(hyps[0], dev_ex.db) printsql = last_sql all_hyps.extend(hyps) final_data.append(dev_ex) f.write(printsql+'\n') f.write('\n') acc,IM = evaluator.acc(all_hyps, final_data, output_path, acc_type=acc_type, etype='match', use_checker=use_checker) torch.cuda.empty_cache() gc.collect() return acc,IM
null
164,121
import os, pickle, json import torch, random import numpy as np from asdl.asdl import ASDLGrammar from asdl.transition_system import TransitionSystem from utils.constants import UNK, GRAMMAR_FILEPATH, SCHEMA_TYPES, RELATIONS from utils.graph_example import GraphFactory from utils.vocab import Vocab from utils.word2vec import Word2vecUtils from transformers import AutoTokenizer from utils.evaluator import Evaluator from itertools import chain import random random.seed(33) def get_position_ids(ex, shuffle=True): # cluster columns with their corresponding table and randomly shuffle tables and columns # [CLS] q1 q2 ... [SEP] * t1 c1 c2 c3 t2 c4 c5 ... [SEP] db, table_word_len, column_word_len = ex.db, ex.table_word_len, ex.column_word_len table_num, column_num = len(db['table_names']), len(db['column_names']) question_position_id = list(range(len(ex.question_id))) start = len(question_position_id) table_position_id, column_position_id = [None] * table_num, [None] * column_num column_position_id[0] = list(range(start, start + column_word_len[0])) start += column_word_len[0] # special symbol * first table_idxs = list(range(table_num)) if shuffle: random.shuffle(table_idxs) for idx in table_idxs: col_idxs = db['table2columns'][idx] table_position_id[idx] = list(range(start, start + table_word_len[idx])) start += table_word_len[idx] if shuffle: random.shuffle(col_idxs) for col_id in col_idxs: column_position_id[col_id] = list(range(start, start + column_word_len[col_id])) start += column_word_len[col_id] position_id = question_position_id + list(chain.from_iterable(table_position_id)) + \ list(chain.from_iterable(column_position_id)) + [start] assert len(position_id) == len(ex.input_id) return position_id
null
164,130
import sys, os def hyperparam_path_text2sql(args): def hyperparam_path(args): if args.read_model_path and args.testing: return args.read_model_path exp_path = hyperparam_path_text2sql(args) if not os.path.exists(exp_path): os.makedirs(exp_path) return exp_path
null
164,131
import argparse import sys def add_argument_base(arg_parser): #### General configuration #### arg_parser.add_argument('--task', default='text2sql', help='task name') arg_parser.add_argument('--seed', default=999, type=int, help='Random seed') arg_parser.add_argument('--device', type=int, default=0, help='Use which device: -1 -> cpu ; the index of gpu o.w.') arg_parser.add_argument('--testing', action='store_true', help='training or evaluation mode') arg_parser.add_argument('--read_model_path', type=str, help='read pretrained model path') #### Training Hyperparams #### arg_parser.add_argument('--batch_size', default=10, type=int, help='Batch size') arg_parser.add_argument('--grad_accumulate', default=5, type=int, help='accumulate grad and update once every x steps') arg_parser.add_argument('--lr', type=float, default=1e-4, help='learning rate') arg_parser.add_argument('--layerwise_decay', type=float, default=0.8, help='layerwise decay rate for lr, used for PLM') arg_parser.add_argument('--l2', type=float, default=0.1, help='weight decay coefficient') arg_parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup steps proportion') arg_parser.add_argument('--lr_schedule', default='linear', choices=['constant', 'linear', 'ratsql', 'cosine'], help='lr scheduler') arg_parser.add_argument('--eval_after_epoch', default=50, type=int, help='Start to evaluate after x epoch') arg_parser.add_argument('--load_optimizer', action='store_true', default=False, help='Whether to load optimizer state') arg_parser.add_argument('--max_epoch', type=int, default=200, help='terminate after maximum epochs') arg_parser.add_argument('--max_norm', default=5., type=float, help='clip gradients') return arg_parser def add_argument_encoder(arg_parser): # Encoder Hyperparams arg_parser.add_argument('--model', choices=['rgatsql', 'lgesql'], default='lgesql', help='which text2sql model to use') arg_parser.add_argument('--local_and_nonlocal', choices=['mmc', 'msde', 'local', 'global'], default='msde', help='how to integrate local and non-local relations: mmc -> multi-head multi-view concatenation ; msde -> mixed static and dynamic embeddings') arg_parser.add_argument('--output_model', choices=['without_pruning', 'with_pruning'], default='with_pruning', help='whether add graph pruning') arg_parser.add_argument('--plm', type=str, choices=['bert-base-uncased', 'bert-large-uncased', 'bert-large-uncased-whole-word-masking', 'roberta-base', 'roberta-large', 'grappa_large_jnt', 'electra-base-discriminator', 'electra-large-discriminator','SCORE','roberta', 'star_40k','sss' ], help='pretrained model name',default='sss') arg_parser.add_argument('--subword_aggregation', choices=['mean-pooling', 'max-pooling', 'attentive-pooling'], default='attentive-pooling', help='aggregate subword feats from PLM') arg_parser.add_argument('--schema_aggregation', choices=['mean-pooling', 'max-pooling', 'attentive-pooling', 'head+tail'], default='head+tail', help='aggregate schema words feats') arg_parser.add_argument('--dropout', type=float, default=0.2, help='feature dropout rate') arg_parser.add_argument('--attn_drop', type=float, default=0., help='dropout rate of attention weights') arg_parser.add_argument('--embed_size', default=300, type=int, help='size of word embeddings, only used in glove.42B.300d') arg_parser.add_argument('--gnn_num_layers', default=8, type=int, help='num of GNN layers in encoder') arg_parser.add_argument('--gnn_hidden_size', default=512, type=int, help='size of GNN layers hidden states') arg_parser.add_argument('--num_heads', default=8, type=int, help='num of heads in multihead attn') arg_parser.add_argument('--relation_share_layers', action='store_true',default='--relation_share_heads') arg_parser.add_argument('--relation_share_heads', action='store_true') arg_parser.add_argument('--score_function', choices=['affine', 'bilinear', 'biaffine', 'dot'], default='affine', help='graph pruning score function') arg_parser.add_argument('--smoothing', type=float, default=0.15, help='label smoothing factor for graph pruning') return arg_parser def add_argument_decoder(arg_parser): # Decoder Hyperparams arg_parser.add_argument('--lstm', choices=['lstm', 'onlstm'], default='onlstm', help='Type of LSTM used, ONLSTM or traditional LSTM') arg_parser.add_argument('--chunk_size', default=8, type=int, help='parameter of ONLSTM') arg_parser.add_argument('--att_vec_size', default=512, type=int, help='size of attentional vector') arg_parser.add_argument('--sep_cxt', action='store_true', help='when calculating context vectors, use seperate cxt for question and schema') arg_parser.add_argument('--drop_connect', type=float, default=0.2, help='recurrent connection dropout rate in decoder lstm') arg_parser.add_argument('--lstm_num_layers', type=int, default=1, help='num_layers of decoder') arg_parser.add_argument('--lstm_hidden_size', default=512, type=int, help='Size of LSTM hidden states') arg_parser.add_argument('--action_embed_size', default=128, type=int, help='Size of ApplyRule/GenToken action embeddings') arg_parser.add_argument('--field_embed_size', default=64, type=int, help='Embedding size of ASDL fields') arg_parser.add_argument('--type_embed_size', default=64, type=int, help='Embeddings ASDL types') arg_parser.add_argument('--no_context_feeding', action='store_true', default='--no_context_feeding', help='Do not use embedding of context vectors') arg_parser.add_argument('--no_parent_production_embed', default=False, action='store_true', help='Do not use embedding of parent ASDL production to update decoder LSTM state') arg_parser.add_argument('--no_parent_field_embed', default=False, action='store_true', help='Do not use embedding of parent field to update decoder LSTM state') arg_parser.add_argument('--no_parent_field_type_embed', default=False, action='store_true', help='Do not use embedding of the ASDL type of parent field to update decoder LSTM state') arg_parser.add_argument('--no_parent_state', default=False, action='store_true', help='Do not use the parent hidden state to update decoder LSTM state') arg_parser.add_argument('--beam_size', default=5, type=int, help='Beam size for beam search') arg_parser.add_argument('--decode_max_step', default=100, type=int, help='Maximum number of time steps used in decoding') return arg_parser def init_args(params=sys.argv[1:]): arg_parser = argparse.ArgumentParser() arg_parser = add_argument_base(arg_parser) arg_parser = add_argument_encoder(arg_parser) arg_parser = add_argument_decoder(arg_parser) opt = arg_parser.parse_args(params) if opt.model == 'rgatsql' and opt.local_and_nonlocal == 'msde': opt.local_and_nonlocal = 'global' if opt.model == 'lgesql' and opt.local_and_nonlocal == 'global': opt.local_and_nonlocal = 'msde' return opt
null
164,155
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql def eval_nested(pred, label): def eval_IUEN(pred, label): lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect']) lt2, pt2, cnt2 = eval_nested(pred['except'], label['except']) lt3, pt3, cnt3 = eval_nested(pred['union'], label['union']) label_total = lt1 + lt2 + lt3 pred_total = pt1 + pt2 + pt3 cnt = cnt1 + cnt2 + cnt3 return label_total, pred_total, cnt
null
164,161
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql class Evaluator: """A simple evaluator""" def __init__(self): self.partial_scores = None def eval_hardness(self, sql): count_comp1_ = count_component1(sql) count_comp2_ = count_component2(sql) count_others_ = count_others(sql) if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0: return "easy" elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \ (count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0): return "medium" elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \ (2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \ (count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1): return "hard" else: return "extra" def eval_exact_match(self, pred, label): partial_scores = self.eval_partial_match(pred, label) self.partial_scores = partial_scores for key, score in list(partial_scores.items()): if score['f1'] != 1: return 0 if len(label['from']['table_units']) > 0: label_tables = sorted(label['from']['table_units']) pred_tables = sorted(pred['from']['table_units']) return label_tables == pred_tables return 1 def eval_partial_match(self, pred, label): res = {} label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total) res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total) res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_group(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_having(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_order(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_and_or(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_IUEN(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_keywords(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} return res def print_scores(scores, etype): turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4'] levels = ['easy', 'medium', 'hard', 'extra', 'all', "joint_all"] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] print("{:20} {:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels)) counts = [scores[level]['count'] for level in levels] print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts)) if etype in ["all", "exec"]: print('===================== EXECUTION ACCURACY =====================') this_scores = [scores[level]['exec'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores)) if etype in ["all", "match"]: print('\n====================== EXACT MATCHING ACCURACY =====================') exact_scores = [scores[level]['exact'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores)) print('\n---------------------PARTIAL MATCHING ACCURACY----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['acc'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING RECALL ----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['rec'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING F1 --------------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['f1'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) print("\n\n{:20} {:20} {:20} {:20} {:20} {:20}".format("", *turns)) counts = [scores[turn]['count'] for turn in turns] print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts)) if etype in ["all", "exec"]: print('===================== TRUN XECUTION ACCURACY =====================') this_scores = [scores[turn]['exec'] for turn in turns] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores)) if etype in ["all", "match"]: print('\n====================== TRUN EXACT MATCHING ACCURACY =====================') exact_scores = [scores[turn]['exact'] for turn in turns] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores)) def eval_exec_match(db, p_str, g_str, pred, gold): """ return 1 if the values between prediction and gold are matching in the corresponding index. Currently not support multiple col_unit(pairs). """ conn = sqlite3.connect(db) cursor = conn.cursor() try: cursor.execute(p_str) p_res = cursor.fetchall() except: return False cursor.execute(g_str) q_res = cursor.fetchall() def res_map(res, val_units): rmap = {} for idx, val_unit in enumerate(val_units): key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2])) rmap[key] = [r[idx] for r in res] return rmap p_val_units = [unit[1] for unit in pred['select'][1]] q_val_units = [unit[1] for unit in gold['select'][1]] return res_map(p_res, p_val_units) == res_map(q_res, q_val_units) def rebuild_sql_val(sql): if sql is None or not DISABLE_VALUE: return sql sql['from']['conds'] = rebuild_condition_val(sql['from']['conds']) sql['having'] = rebuild_condition_val(sql['having']) sql['where'] = rebuild_condition_val(sql['where']) sql['intersect'] = rebuild_sql_val(sql['intersect']) sql['except'] = rebuild_sql_val(sql['except']) sql['union'] = rebuild_sql_val(sql['union']) return sql def build_valid_col_units(table_units, schema): col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']] prefixs = [col_id[:-2] for col_id in col_ids] valid_col_units= [] for value in list(schema.idMap.values()): if '.' in value and value[:value.index('.')] in prefixs: valid_col_units.append(value) return valid_col_units def rebuild_sql_col(valid_col_units, sql, kmap): if sql is None: return sql sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap) sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap) sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap) sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap) sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap) sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap) sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap) sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap) sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap) return sql class Schema: """ Simple schema which maps table&column to a unique identifier """ def __init__(self, schema): self._schema = schema self._idMap = self._map(self._schema) def schema(self): return self._schema def idMap(self): return self._idMap def _map(self, schema): idMap = {"*": "__all__"} id = 1 for key, vals in schema.items(): for val in vals: idMap[key.lower() + "." + val.lower()] = ( "__" + key.lower() + "." + val.lower() + "__" ) id += 1 for key in schema: idMap[key.lower()] = "__" + key.lower() + "__" id += 1 return idMap def get_schema(db): """ Get database's schema, which is a dict with table name as key and list of column names as value :param db: database path :return: schema dict """ schema = {} conn = sqlite3.connect(db) cursor = conn.cursor() # fetch table names cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = [str(table[0].lower()) for table in cursor.fetchall()] # fetch table info for table in tables: cursor.execute("PRAGMA table_info({})".format(table)) schema[table] = [str(col[1].lower()) for col in cursor.fetchall()] return schema def get_sql(schema, query): toks = tokenize(query) tables_with_alias = get_tables_with_alias(schema.schema, toks) _, sql = parse_sql(toks, 0, tables_with_alias, schema) return sql def evaluate(gold, predict, db_dir, etype, kmaps): with open(gold) as f: glist = [] gseq_one = [] for l in f.readlines(): if len(l.strip()) == 0: glist.append(gseq_one) gseq_one = [] else: lstrip = l.strip().split('\t') gseq_one.append(lstrip) #glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] with open(predict) as f: plist = [] pseq_one = [] for l in f.readlines(): if len(l.strip()) == 0: plist.append(pseq_one) pseq_one = [] else: pseq_one.append(l.strip().split('\t')) #plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] # plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]] # glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]] evaluator = Evaluator() turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4'] levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] entries = [] scores = {} for turn in turns: scores[turn] = {'count': 0, 'exact': 0.} scores[turn]['exec'] = 0 for level in levels: scores[level] = {'count': 0, 'partial': {}, 'exact': 0.} scores[level]['exec'] = 0 for type_ in partial_types: scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0} eval_err_num = 0 for p, g in zip(plist, glist): scores['joint_all']['count'] += 1 turn_scores = {"exec": [], "exact": []} for idx, pg in enumerate(zip(p, g)): p, g = pg p_str = p[0] p_str = p_str.replace("value", "1") g_str, db = g db_name = db db = os.path.join(db_dir, db, db + ".sqlite") schema = Schema(get_schema(db)) g_sql = get_sql(schema, g_str) hardness = evaluator.eval_hardness(g_sql) if idx > 3: idx = ">4" else: idx += 1 turn_id = "turn " + str(idx) scores[turn_id]['count'] += 1 scores[hardness]['count'] += 1 scores['all']['count'] += 1 try: p_sql = get_sql(schema, p_str) except: # If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql p_sql = { "except": None, "from": { "conds": [], "table_units": [] }, "groupBy": [], "having": [], "intersect": None, "limit": None, "orderBy": [], "select": [ False, [] ], "union": None, "where": [] } eval_err_num += 1 print(("eval_err_num:{}".format(eval_err_num))) # rebuild sql for value evaluation kmap = kmaps[db_name] g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema) g_sql = rebuild_sql_val(g_sql) g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap) p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema) p_sql = rebuild_sql_val(p_sql) p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap) if etype in ["all", "exec"]: exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql) if exec_score: scores[hardness]['exec'] += 1 scores[turn_id]['exec'] += 1 turn_scores['exec'].append(1) else: turn_scores['exec'].append(0) if etype in ["all", "match"]: exact_score = evaluator.eval_exact_match(p_sql, g_sql) partial_scores = evaluator.partial_scores if exact_score == 0: turn_scores['exact'].append(0) print(("{} pred: {}".format(hardness,p_str))) print(("{} gold: {}".format(hardness,g_str))) print("") else: turn_scores['exact'].append(1) scores[turn_id]['exact'] += exact_score scores[hardness]['exact'] += exact_score scores['all']['exact'] += exact_score for type_ in partial_types: if partial_scores[type_]['pred_total'] > 0: scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores[hardness]['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores[hardness]['partial'][type_]['rec_count'] += 1 scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1'] if partial_scores[type_]['pred_total'] > 0: scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores['all']['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores['all']['partial'][type_]['rec_count'] += 1 scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1'] entries.append({ 'predictSQL': p_str, 'goldSQL': g_str, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores }) if all(v == 1 for v in turn_scores["exec"]): scores['joint_all']['exec'] += 1 if all(v == 1 for v in turn_scores["exact"]): scores['joint_all']['exact'] += 1 for turn in turns: if scores[turn]['count'] == 0: continue if etype in ["all", "exec"]: scores[turn]['exec'] /= scores[turn]['count'] if etype in ["all", "match"]: scores[turn]['exact'] /= scores[turn]['count'] for level in levels: if scores[level]['count'] == 0: continue if etype in ["all", "exec"]: scores[level]['exec'] /= scores[level]['count'] if etype in ["all", "match"]: scores[level]['exact'] /= scores[level]['count'] for type_ in partial_types: if scores[level]['partial'][type_]['acc_count'] == 0: scores[level]['partial'][type_]['acc'] = 0 else: scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \ scores[level]['partial'][type_]['acc_count'] * 1.0 if scores[level]['partial'][type_]['rec_count'] == 0: scores[level]['partial'][type_]['rec'] = 0 else: scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \ scores[level]['partial'][type_]['rec_count'] * 1.0 if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0: scores[level]['partial'][type_]['f1'] = 1 else: scores[level]['partial'][type_]['f1'] = \ 2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / ( scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc']) print_scores(scores, etype)
null
164,183
import os, sys import json import sqlite3 import traceback import argparse from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql class Evaluator: """A simple evaluator""" def __init__(self): self.partial_scores = None def eval_hardness(self, sql): count_comp1_ = count_component1(sql) count_comp2_ = count_component2(sql) count_others_ = count_others(sql) if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0: return "easy" elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \ (count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0): return "medium" elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \ (2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \ (count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1): return "hard" else: return "extra" def eval_exact_match(self, pred, label): partial_scores = self.eval_partial_match(pred, label) self.partial_scores = partial_scores for _, score in partial_scores.items(): if score['f1'] != 1: return 0 if len(label['from']['table_units']) > 0: if label['from']['table_units'][0][0] == 'sql' and pred['from']['table_units'][0][0] == 'sql': return self.eval_exact_match(pred['from']['table_units'][0][1], label['from']['table_units'][0][1]) # still wrong else: label_tables = sorted(label['from']['table_units']) pred_tables = sorted(pred['from']['table_units']) return label_tables == pred_tables return 1 def eval_partial_match(self, pred, label): res = {} label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total) res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total) res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_group(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_having(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_order(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_and_or(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_IUEN(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} label_total, pred_total, cnt = eval_keywords(pred, label) acc, rec, f1 = get_scores(cnt, pred_total, label_total) res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total} return res def print_scores(scores, etype): levels = ['easy', 'medium', 'hard', 'extra', 'all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels)) counts = [scores[level]['count'] for level in levels] print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts)) if etype in ["all", "exec"]: print('===================== EXECUTION ACCURACY =====================') this_scores = [scores[level]['exec'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores)) if etype in ["all", "match"]: print('\n====================== EXACT MATCHING ACCURACY =====================') exact_scores = [scores[level]['exact'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores)) print('\n---------------------PARTIAL MATCHING ACCURACY----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['acc'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING RECALL ----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['rec'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING F1 --------------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['f1'] for level in levels] print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) def eval_exec_match(db, p_str, g_str, pred, gold): """ return 1 if the values between prediction and gold are matching in the corresponding index. Currently not support multiple col_unit(pairs). """ conn = sqlite3.connect(db) cursor = conn.cursor() try: cursor.execute(p_str) p_res = cursor.fetchall() except: return False cursor.execute(g_str) q_res = cursor.fetchall() def res_map(res, val_units): rmap = {} for idx, val_unit in enumerate(val_units): key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2])) rmap[key] = [r[idx] for r in res] return rmap p_val_units = [unit[1] for unit in pred['select'][1]] q_val_units = [unit[1] for unit in gold['select'][1]] return res_map(p_res, p_val_units) == res_map(q_res, q_val_units) def rebuild_sql_val(sql): if sql is None or not DISABLE_VALUE: return sql if len(sql['from']['table_units']) > 0 and sql['from']['table_units'][0][0] == 'sql': sql['from']['table_units'][0] = ('sql', rebuild_sql_val(sql['from']['table_units'][0][1])) sql['from']['conds'] = rebuild_condition_val(sql['from']['conds']) sql['having'] = rebuild_condition_val(sql['having']) sql['where'] = rebuild_condition_val(sql['where']) sql['intersect'] = rebuild_sql_val(sql['intersect']) sql['except'] = rebuild_sql_val(sql['except']) sql['union'] = rebuild_sql_val(sql['union']) return sql def build_valid_col_units(table_units, schema): col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']] prefixs = [col_id[:-2] for col_id in col_ids] valid_col_units= [] for value in schema.idMap.values(): if '.' in value and value[:value.index('.')] in prefixs: valid_col_units.append(value) return valid_col_units def rebuild_sql_col(valid_col_units, sql, kmap): if sql is None: return sql sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap) sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap) sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap) sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap) sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap) sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap) sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap) sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap) sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap) return sql class Schema: """ Simple schema which maps table&column to a unique identifier """ def __init__(self, schema): self._schema = schema self._idMap = self._map(self._schema) def schema(self): return self._schema def idMap(self): return self._idMap def _map(self, schema): idMap = {"*": "__all__"} id = 1 for key, vals in schema.items(): for val in vals: idMap[key.lower() + "." + val.lower()] = ( "__" + key.lower() + "." + val.lower() + "__" ) id += 1 for key in schema: idMap[key.lower()] = "__" + key.lower() + "__" id += 1 return idMap def get_schema(db): """ Get database's schema, which is a dict with table name as key and list of column names as value :param db: database path :return: schema dict """ schema = {} conn = sqlite3.connect(db) cursor = conn.cursor() # fetch table names cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = [str(table[0].lower()) for table in cursor.fetchall()] # fetch table info for table in tables: cursor.execute("PRAGMA table_info({})".format(table)) schema[table] = [str(col[1].lower()) for col in cursor.fetchall()] return schema def get_sql(schema, query): toks = tokenize(query) tables_with_alias = get_tables_with_alias(schema.schema, toks) _, sql = parse_sql(toks, 0, tables_with_alias, schema) return sql def evaluate(gold, predict, db_dir, etype, kmaps): with open(gold) as f: glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] with open(predict) as f: plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0] # plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")] # glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")] evaluator = Evaluator() levels = ['easy', 'medium', 'hard', 'extra', 'all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] entries = [] scores = {} for level in levels: scores[level] = {'count': 0, 'partial': {}, 'exact': 0.} scores[level]['exec'] = 0 for type_ in partial_types: scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0} eval_err_num = 0 total_acc = [] for p, g in zip(plist, glist): p_str = p[0] g_str, db = g db_name = db db = os.path.join(db_dir, db, db + ".sqlite") schema = Schema(get_schema(db)) # .schema: map lowercased raw tab name to lowercased raw col name list # .idMap: map tab name to __tab__, tab.col to __tab.col__, * to __all__, all lowercased g_sql = get_sql(schema, g_str) hardness = evaluator.eval_hardness(g_sql) scores[hardness]['count'] += 1 scores['all']['count'] += 1 try: p_sql = get_sql(schema, p_str) except: # If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql p_sql = { "except": None, "from": { "conds": [], "table_units": [] }, "groupBy": [], "having": [], "intersect": None, "limit": None, "orderBy": [], "select": [ False, [] ], "union": None, "where": [] } eval_err_num += 1 print("eval_err_num:{}".format(eval_err_num)) # rebuild sql for value evaluation kmap = kmaps[db_name] g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema) # extract all __tab.col__ that has tab in from clause, not include __all__ g_sql = rebuild_sql_val(g_sql) g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap) # kmap: map __tab.col__ to pivot __tab.col__ p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema) p_sql = rebuild_sql_val(p_sql) p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap) if etype in ["all", "exec"]: exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql) if exec_score: scores[hardness]['exec'] += 1 if etype in ["all", "match"]: exact_score = evaluator.eval_exact_match(p_sql, g_sql) total_acc.append(exact_score) partial_scores = evaluator.partial_scores if exact_score == 0: print("{} pred: {}".format(hardness,p_str)) print("{} gold: {}".format(hardness,g_str)) print("") scores[hardness]['exact'] += exact_score scores['all']['exact'] += exact_score for type_ in partial_types: if partial_scores[type_]['pred_total'] > 0: scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores[hardness]['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores[hardness]['partial'][type_]['rec_count'] += 1 scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1'] if partial_scores[type_]['pred_total'] > 0: scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores['all']['partial'][type_]['acc_count'] += 1 if partial_scores[type_]['label_total'] > 0: scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores['all']['partial'][type_]['rec_count'] += 1 scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1'] entries.append({ 'predictSQL': p_str, 'goldSQL': g_str, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores }) thedev = [3, 2, 3, 4, 3, 3, 4, 4, 3, 3, 5, 3, 3, 3, 3, 4, 2, 3, 6, 3, 3, 3, 2, 4, 3, 2, 3, 5, 1, 1, 3, 3, 4, 4, 3, 4, 4, 2, 2, 2, 3, 3, 3, 3, 3, 4, 2, 4, 2, 3, 3, 4, 3, 4, 3, 3, 2, 3, 4, 3, 4, 6, 1, 2, 3, 1, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 8, 3, 3, 1, 3, 3, 5, 2, 3, 6, 7, 3, 3, 3, 5, 4, 4, 4, 3, 5, 4, 1, 5, 4, 4, 4, 3, 5, 3, 4, 3, 3, 3, 5, 4, 2, 5, 3, 3, 3, 3, 5, 6, 5, 4, 4, 4, 4, 3, 4, 3, 3, 6, 5, 3, 2, 3, 4, 4, 5, 5, 3, 3, 3, 2, 2, 3, 2, 4, 4, 4, 6, 1, 4, 3, 3, 5, 3, 3, 4, 4, 3, 4, 2, 4, 3, 2, 5, 3, 5, 4, 3, 4, 3, 3, 2, 3, 4, 4, 2, 3, 4, 3, 3, 3, 5, 3, 3, 3, 3, 4, 5, 3, 4, 3, 5, 4, 3, 2, 3, 3, 4, 2, 3, 1, 3, 4, 3, 3, 4, 3, 3, 2, 2, 3, 2, 1, 4, 5, 3, 5, 4, 2, 6, 3, 2, 4, 3, 4, 3, 3, 3, 3, 5, 3, 5, 3, 3, 4, 2, 3, 3, 3, 2, 6, 3, 3, 3, 3, 5, 4, 3, 2, 6, 4, 4, 5, 5, 3, 2, 3, 4, 2, 3, 4, 4, 2, 2, 7, 4, 3, 2, 6, 7, 2, 2, 4, 9, 3, 5, 4, 4, 4, 3, 3, 6, 3, 2, 3, 4, 5, 3, 6, 3, 3, 3, 2] IM = 0 index = 0 for item in thedev: flag = 1 for thescore in total_acc[index:index+item]: if thescore == 0: flag = 0 if flag == 1: IM += 1 index += item print('the IM is ',IM/len(thedev)) for level in levels: if scores[level]['count'] == 0: continue if etype in ["all", "exec"]: scores[level]['exec'] /= scores[level]['count'] if etype in ["all", "match"]: scores[level]['exact'] /= scores[level]['count'] for type_ in partial_types: if scores[level]['partial'][type_]['acc_count'] == 0: scores[level]['partial'][type_]['acc'] = 0 else: scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \ scores[level]['partial'][type_]['acc_count'] * 1.0 if scores[level]['partial'][type_]['rec_count'] == 0: scores[level]['partial'][type_]['rec'] = 0 else: scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \ scores[level]['partial'][type_]['rec_count'] * 1.0 if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0: scores[level]['partial'][type_]['f1'] = 1 else: scores[level]['partial'][type_]['f1'] = \ 2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / ( scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc']) print_scores(scores, etype) IM = IM/len(thedev) return scores, IM
null
164,205
import sys, os, time, json, gc, pickle from argparse import Namespace from utils.args import init_args from utils.hyperparams import hyperparam_path from utils.initialization import * from utils.example import Example from utils.batch import Batch from utils.optimization import set_optimizer from model.model_utils import Registrable from model.model_constructor import * from preprocess.parse_sql.schema import * from preprocess.parse_sql.parse import get_label args = init_args(sys.argv[1:]) device = set_torch_device(args.device) if args.read_model_path: params = json.load(open(os.path.join(args.read_model_path, 'params.json')), object_hook=lambda d: Namespace(**d)) params.lazy_load = True else: params = args train_dataset = Example.load_dataset('train_electra', label) dev_dataset = pickle.load(open('data/dev_electra.lgesql.bin', 'rb')) args.word_vocab, args.relation_num = len(Example.word_vocab), len(Example.relation_vocab) model = Registrable.by_name('text2sql')(params, sql_trans).to(device) if args.read_model_path: check_point = torch.load(open(os.path.join(args.read_model_path, 'model_IM.bin'), 'rb'), map_location=device) model.load_state_dict(check_point['model']) logger.info("Load saved model from path: %s" % (args.read_model_path)) else: json.dump(vars(params), open(os.path.join(exp_path, 'params.json'), 'w'), indent=4) if params.plm is None: ratio = Example.word2vec.load_embeddings(model.encoder.input_layer.word_embed, Example.word_vocab, device=device) logger.info("Init model and word embedding layer with a coverage %.2f" % (ratio)) if not args.testing: num_training_steps = ((len(train_dataset) + args.batch_size - 1) // args.batch_size) * args.max_epoch num_warmup_steps = int(num_training_steps * args.warmup_ratio) fgm = FGM(model,epsilon=1,emb_name='word_embeddings.') logger.info('Total training steps: %d;\t Warmup steps: %d' % (num_training_steps, num_warmup_steps)) optimizer, scheduler = set_optimizer(model, args, num_warmup_steps, num_training_steps) start_epoch, nsamples, best_result = 0, len(train_dataset), {'dev_acc': 0.,'IM': 0.} train_index, step_size = np.arange(nsamples), args.batch_size // args.grad_accumulate # if args.read_model_path and args.load_optimizer: # optimizer.load_state_dict(check_point['optim']) # scheduler.load_state_dict(check_point['scheduler']) # start_epoch = check_point['epoch'] + 1 logger.info('Start training ......') for i in range(start_epoch, args.max_epoch): start_time = time.time() epoch_loss, epoch_gp_loss, count = 0, 0, 0 np.random.shuffle(train_index) model.train() for j in range(0, nsamples, step_size): count += 1 cur_dataset = [train_dataset[k] for k in train_index[j: j + step_size]] current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing) loss, gp_loss = model(current_batch) # see utils/batch.py for batch elements epoch_loss = epoch_loss + loss.item() epoch_gp_loss = epoch_gp_loss + gp_loss.item() # print("Minibatch loss: %.4f" % (loss.item())) loss = loss + gp_loss loss.backward() # fgm.attack() # loss_adv, gp_loss_adv = model(current_batch) # loss_adv = loss_adv + gp_loss_adv # loss_adv.backward() # fgm.restore() if count == args.grad_accumulate or j + step_size >= nsamples: count = 0 model.pad_embedding_grad_zero() optimizer.step() scheduler.step() optimizer.zero_grad() logger.info('Training: \tEpoch: %d\tTime: %.4f\tTraining loss: %.4f/%.4f' % (i, time.time() - start_time, epoch_loss, epoch_gp_loss)) torch.cuda.empty_cache() gc.collect() start_time = time.time() dev_acc,IM = dev_decode('dev', os.path.join(exp_path, 'dev.iter' + str(i)), acc_type='sql') logger.info('Evaluation: \tEpoch: %d\tTime: %.4f\tDev acc: %.4f\tIM: %.4f' % (i, time.time() - start_time, dev_acc, IM)) if i < args.eval_after_epoch: # avoid unnecessary evaluation continue if dev_acc > best_result['dev_acc']: best_result['dev_acc'], best_result['iter'] = dev_acc, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tDev acc: %.4f' % (i, dev_acc)) if IM > best_result['IM']: best_result['IM'], best_result['iter_IM'] = IM, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model_IM.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tIM: %.4f' % (i, IM)) logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc: %.4f' % (best_result['iter'], best_result['dev_acc'])) logger.info('FINAL BEST RESULT IM: \tEpoch: %d\tIM: %.4f' % (best_result['iter'], best_result['IM'])) # check_point = torch.load(open(os.path.join(exp_path, 'model.bin'), 'rb')) # model.load_state_dict(check_point['model']) # dev_acc_beam = decode('dev', output_path=os.path.join(exp_path, 'dev.iter' + str(best_result['iter']) + '.beam' + str(args.beam_size)), acc_type='beam') # logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc/Beam acc: %.4f/%.4f' % (best_result['iter'], best_result['dev_acc'], dev_acc_beam)) else: # start_time = time.time() # train_acc = decode('train', output_path=os.path.join(args.read_model_path, 'train.eval'), acc_type='sql') # logger.info("Evaluation costs %.2fs ; Train dataset exact match acc is %.4f ." % (time.time() - start_time, train_acc)) start_time = time.time() dev_acc,IM = dev_decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval'), acc_type='sql') # dev_acc_checker = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.checker'), acc_type='sql', use_checker=True) # dev_acc_beam = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.beam' + str(args.beam_size)), acc_type='beam') # logger.info("Evaluation costs %.2fs ; Dev dataset exact match/checker/beam acc is %.4f/%.4f ." % (time.time() - start_time, dev_acc, dev_acc_checker, dev_acc_beam)) class Batch(): def __init__(self, examples, device='cpu'): super(Batch, self).__init__() self.examples = examples self.device = device def from_example_list(cls, ex_list, device='cpu', train=True, method='text2sql', **kwargs): method_dict = { "text2sql": from_example_list_text2sql, } return method_dict[method](ex_list, device, train=train, **kwargs) def __len__(self): return len(self.examples) def __getitem__(self, idx): return self.examples[idx] def max_question_len(self): return torch.max(self.question_lens).item() def max_table_len(self): return torch.max(self.table_lens).item() def max_column_len(self): return torch.max(self.column_lens).item() def max_table_word_len(self): return torch.max(self.table_word_lens).item() def max_column_word_len(self): return torch.max(self.column_word_lens).item() def max_question_subword_len(self): return torch.max(self.question_subword_lens).item() def max_table_subword_len(self): return torch.max(self.table_subword_lens).item() def max_column_subword_len(self): return torch.max(self.column_subword_lens).item() """ Different types of nodes are seperated instead of concatenated together """ def mask(self): return torch.cat([self.question_mask, self.table_mask, self.column_mask], dim=1) def question_mask(self): return lens2mask(self.question_lens) def table_mask(self): return lens2mask(self.table_lens) def column_mask(self): return lens2mask(self.column_lens) def table_word_mask(self): return lens2mask(self.table_word_lens) def column_word_mask(self): return lens2mask(self.column_word_lens) def question_subword_mask(self): return lens2mask(self.question_subword_lens) def table_subword_mask(self): return lens2mask(self.table_subword_lens) def column_subword_mask(self): return lens2mask(self.column_subword_lens) def get_frontier_field_idx(self, t): ids = [] for e in self.examples: if t < len(e.tgt_action): ids.append(Example.grammar.field2id[e.tgt_action[t].frontier_field]) # assert self.grammar.id2field[ids[-1]] == e.tgt_action[t].frontier_field else: ids.append(0) return torch.tensor(ids, dtype=torch.long, device=self.device) def get_frontier_prod_idx(self, t): ids = [] for e in self.examples: if t < len(e.tgt_action): ids.append(Example.grammar.prod2id[e.tgt_action[t].frontier_prod]) # assert self.grammar.id2prod[ids[-1]] == e.tgt_action[t].frontier_prod else: ids.append(0) return torch.tensor(ids, dtype=torch.long, device=self.device) def get_frontier_field_type_idx(self, t): ids = [] for e in self.examples: if t < len(e.tgt_action): ids.append(Example.grammar.type2id[e.tgt_action[t].frontier_field.type]) # assert self.grammar.id2type[ids[-1]] == e.tgt_action[t].frontier_field.type else: ids.append(0) return torch.tensor(ids, dtype=torch.long, device=self.device) def decode(choice, output_path, acc_type='sql', use_checker=False): assert acc_type in ['beam', 'ast', 'sql'] and choice in ['train', 'dev'] model.eval() dataset = train_dataset if choice == 'train' else dev_dataset all_hyps = [] with torch.no_grad(): for i in range(0, len(dataset), 1): current_batch = Batch.from_example_list(dataset[i: i + args.batch_size], device, train=False) hyps = model.parse(current_batch, args.beam_size) all_hyps.extend(hyps) acc,IM = evaluator.acc(all_hyps, dataset, output_path, acc_type=acc_type, etype='match', use_checker=use_checker) torch.cuda.empty_cache() gc.collect() return acc,IM
null
164,206
import sys, os, time, json, gc, pickle from argparse import Namespace from utils.args import init_args from utils.hyperparams import hyperparam_path from utils.initialization import * from utils.example import Example from utils.batch import Batch from utils.optimization import set_optimizer from model.model_utils import Registrable from model.model_constructor import * from preprocess.parse_sql.schema import * from preprocess.parse_sql.parse import get_label db = pickle.load(open('data/tables_electra.bin','rb')) schemas, db_names, thetables = get_schemas_from_json(table_file) with open('data/label.json','r') as f: label = json.load(f) args = init_args(sys.argv[1:]) device = set_torch_device(args.device) if args.read_model_path: params = json.load(open(os.path.join(args.read_model_path, 'params.json')), object_hook=lambda d: Namespace(**d)) params.lazy_load = True else: params = args Example.configuration(plm=params.plm, method=params.model) train_dataset = Example.load_dataset('train_electra', label) dev_dataset = pickle.load(open('data/dev_electra.lgesql.bin', 'rb')) args.word_vocab, args.relation_num = len(Example.word_vocab), len(Example.relation_vocab) model = Registrable.by_name('text2sql')(params, sql_trans).to(device) if args.read_model_path: check_point = torch.load(open(os.path.join(args.read_model_path, 'model_IM.bin'), 'rb'), map_location=device) model.load_state_dict(check_point['model']) logger.info("Load saved model from path: %s" % (args.read_model_path)) else: json.dump(vars(params), open(os.path.join(exp_path, 'params.json'), 'w'), indent=4) if params.plm is None: ratio = Example.word2vec.load_embeddings(model.encoder.input_layer.word_embed, Example.word_vocab, device=device) logger.info("Init model and word embedding layer with a coverage %.2f" % (ratio)) if not args.testing: num_training_steps = ((len(train_dataset) + args.batch_size - 1) // args.batch_size) * args.max_epoch num_warmup_steps = int(num_training_steps * args.warmup_ratio) fgm = FGM(model,epsilon=1,emb_name='word_embeddings.') logger.info('Total training steps: %d;\t Warmup steps: %d' % (num_training_steps, num_warmup_steps)) optimizer, scheduler = set_optimizer(model, args, num_warmup_steps, num_training_steps) start_epoch, nsamples, best_result = 0, len(train_dataset), {'dev_acc': 0.,'IM': 0.} train_index, step_size = np.arange(nsamples), args.batch_size // args.grad_accumulate # if args.read_model_path and args.load_optimizer: # optimizer.load_state_dict(check_point['optim']) # scheduler.load_state_dict(check_point['scheduler']) # start_epoch = check_point['epoch'] + 1 logger.info('Start training ......') for i in range(start_epoch, args.max_epoch): start_time = time.time() epoch_loss, epoch_gp_loss, count = 0, 0, 0 np.random.shuffle(train_index) model.train() for j in range(0, nsamples, step_size): count += 1 cur_dataset = [train_dataset[k] for k in train_index[j: j + step_size]] current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing) loss, gp_loss = model(current_batch) # see utils/batch.py for batch elements epoch_loss = epoch_loss + loss.item() epoch_gp_loss = epoch_gp_loss + gp_loss.item() # print("Minibatch loss: %.4f" % (loss.item())) loss = loss + gp_loss loss.backward() # fgm.attack() # loss_adv, gp_loss_adv = model(current_batch) # loss_adv = loss_adv + gp_loss_adv # loss_adv.backward() # fgm.restore() if count == args.grad_accumulate or j + step_size >= nsamples: count = 0 model.pad_embedding_grad_zero() optimizer.step() scheduler.step() optimizer.zero_grad() logger.info('Training: \tEpoch: %d\tTime: %.4f\tTraining loss: %.4f/%.4f' % (i, time.time() - start_time, epoch_loss, epoch_gp_loss)) torch.cuda.empty_cache() gc.collect() start_time = time.time() dev_acc,IM = dev_decode('dev', os.path.join(exp_path, 'dev.iter' + str(i)), acc_type='sql') logger.info('Evaluation: \tEpoch: %d\tTime: %.4f\tDev acc: %.4f\tIM: %.4f' % (i, time.time() - start_time, dev_acc, IM)) if i < args.eval_after_epoch: # avoid unnecessary evaluation continue if dev_acc > best_result['dev_acc']: best_result['dev_acc'], best_result['iter'] = dev_acc, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tDev acc: %.4f' % (i, dev_acc)) if IM > best_result['IM']: best_result['IM'], best_result['iter_IM'] = IM, i torch.save({ 'epoch': i, 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, open(os.path.join(exp_path, 'model_IM.bin'), 'wb')) logger.info('NEW BEST MODEL: \tEpoch: %d\tIM: %.4f' % (i, IM)) logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc: %.4f' % (best_result['iter'], best_result['dev_acc'])) logger.info('FINAL BEST RESULT IM: \tEpoch: %d\tIM: %.4f' % (best_result['iter'], best_result['IM'])) # check_point = torch.load(open(os.path.join(exp_path, 'model.bin'), 'rb')) # model.load_state_dict(check_point['model']) # dev_acc_beam = decode('dev', output_path=os.path.join(exp_path, 'dev.iter' + str(best_result['iter']) + '.beam' + str(args.beam_size)), acc_type='beam') # logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc/Beam acc: %.4f/%.4f' % (best_result['iter'], best_result['dev_acc'], dev_acc_beam)) else: # start_time = time.time() # train_acc = decode('train', output_path=os.path.join(args.read_model_path, 'train.eval'), acc_type='sql') # logger.info("Evaluation costs %.2fs ; Train dataset exact match acc is %.4f ." % (time.time() - start_time, train_acc)) start_time = time.time() dev_acc,IM = dev_decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval'), acc_type='sql') # dev_acc_checker = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.checker'), acc_type='sql', use_checker=True) # dev_acc_beam = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.beam' + str(args.beam_size)), acc_type='beam') # logger.info("Evaluation costs %.2fs ; Dev dataset exact match/checker/beam acc is %.4f/%.4f ." % (time.time() - start_time, dev_acc, dev_acc_checker, dev_acc_beam)) class Example(): def configuration(cls, plm=None, method='lgesql', table_path='data/tables.json', tables='data/tables.bin', db_dir='data/database'): cls.plm, cls.method = plm, method cls.grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH) cls.trans = TransitionSystem.get_class_by_lang('sql')(cls.grammar) cls.tables = pickle.load(open(tables, 'rb')) if type(tables) == str else tables cls.evaluator = Evaluator(cls.trans, table_path, db_dir) if plm is None: cls.word2vec = Word2vecUtils() cls.tokenizer = lambda x: x cls.word_vocab = Vocab(padding=True, unk=True, boundary=True, default=UNK, filepath='./pretrained_models/glove.42b.300d/vocab.txt', specials=SCHEMA_TYPES) # word vocab for glove.42B.300d else: cls.tokenizer = AutoTokenizer.from_pretrained(os.path.join('./pretrained_models', plm)) cls.word_vocab = cls.tokenizer.get_vocab() cls.relation_vocab = Vocab(padding=False, unk=False, boundary=False, iterable=RELATIONS, default=None) cls.graph_factory = GraphFactory(cls.method, cls.relation_vocab) def load_dataset(cls, choice, debug=False): assert choice in ['train', 'dev'] fp = os.path.join('data', choice + '.' + cls.method + '.bin') datasets = pickle.load(open(fp, 'rb')) # question_lens = [len(ex['processed_question_toks']) for ex in datasets] # print('Max/Min/Avg question length in %s dataset is: %d/%d/%.2f' % (choice, max(question_lens), min(question_lens), float(sum(question_lens))/len(question_lens))) # action_lens = [len(ex['actions']) for ex in datasets] # print('Max/Min/Avg action length in %s dataset is: %d/%d/%.2f' % (choice, max(action_lens), min(action_lens), float(sum(action_lens))/len(action_lens))) examples, outliers = [], 0 for ex in datasets: if choice == 'train' and len(cls.tables[ex['db_id']]['column_names']) > 100: outliers += 1 continue examples.append(cls(ex, cls.tables[ex['db_id']])) if debug and len(examples) >= 100: return examples if choice == 'train': print("Skip %d extremely large samples in training dataset ..." % (outliers)) return examples def __init__(self, ex: dict, db: dict): super(Example, self).__init__() self.ex = ex self.db = db """ Mapping word to corresponding index """ if Example.plm is None: self.question = ex['processed_question_toks'] self.question_id = [Example.word_vocab[w] for w in self.question] self.column = [[db['column_types'][idx].lower()] + c for idx, c in enumerate(db['processed_column_toks'])] self.column_id = [[Example.word_vocab[w] for w in c] for c in self.column] self.table = [['table'] + t for t in db['processed_table_toks']] self.table_id = [[Example.word_vocab[w] for w in t] for t in self.table] else: t = Example.tokenizer self.question = [q.lower() for q in ex['raw_question_toks']] self.question_id = [t.cls_token_id] # map token to id self.question_mask_plm = [] # remove SEP token in our case self.question_subword_len = [] # subword len for each word, exclude SEP token for w in self.question: toks = t.convert_tokens_to_ids(t.tokenize(w)) self.question_id.extend(toks) self.question_subword_len.append(len(toks)) self.question_mask_plm = [0] + [1] * (len(self.question_id) - 1) + [0] self.question_id.append(t.sep_token_id) self.table = [['table'] + t.lower().split() for t in db['table_names']] self.table_id, self.table_mask_plm, self.table_subword_len = [], [], [] self.table_word_len = [] for s in self.table: l = 0 for w in s: toks = t.convert_tokens_to_ids(t.tokenize(w)) self.table_id.extend(toks) self.table_subword_len.append(len(toks)) l += len(toks) self.table_word_len.append(l) self.table_mask_plm = [1] * len(self.table_id) self.column = [[db['column_types'][idx].lower()] + c.lower().split() for idx, (_, c) in enumerate(db['column_names'])] self.column_id, self.column_mask_plm, self.column_subword_len = [], [], [] self.column_word_len = [] for s in self.column: l = 0 for w in s: toks = t.convert_tokens_to_ids(t.tokenize(w)) self.column_id.extend(toks) self.column_subword_len.append(len(toks)) l += len(toks) self.column_word_len.append(l) self.column_mask_plm = [1] * len(self.column_id) + [0] self.column_id.append(t.sep_token_id) self.input_id = self.question_id + self.table_id + self.column_id self.segment_id = [0] * len(self.question_id) + [1] * (len(self.table_id) + len(self.column_id)) \ if Example.plm != 'grappa_large_jnt' and not Example.plm.startswith('roberta') \ else [0] * (len(self.question_id) + len(self.table_id) + len(self.column_id)) self.question_mask_plm = self.question_mask_plm + [0] * (len(self.table_id) + len(self.column_id)) self.table_mask_plm = [0] * len(self.question_id) + self.table_mask_plm + [0] * len(self.column_id) self.column_mask_plm = [0] * (len(self.question_id) + len(self.table_id)) + self.column_mask_plm self.graph = Example.graph_factory.graph_construction(ex, db) # outputs self.query = ' '.join(ex['query'].split('\t')) self.ast = ex['ast'] self.tgt_action = ex['actions'] self.used_tables, self.used_columns = ex['used_tables'], ex['used_columns'] class Batch(): def __init__(self, examples, device='cpu'): super(Batch, self).__init__() self.examples = examples self.device = device def from_example_list(cls, ex_list, device='cpu', train=True, method='text2sql', **kwargs): method_dict = { "text2sql": from_example_list_text2sql, } return method_dict[method](ex_list, device, train=train, **kwargs) def __len__(self): return len(self.examples) def __getitem__(self, idx): return self.examples[idx] def max_question_len(self): return torch.max(self.question_lens).item() def max_table_len(self): return torch.max(self.table_lens).item() def max_column_len(self): return torch.max(self.column_lens).item() def max_table_word_len(self): return torch.max(self.table_word_lens).item() def max_column_word_len(self): return torch.max(self.column_word_lens).item() def max_question_subword_len(self): return torch.max(self.question_subword_lens).item() def max_table_subword_len(self): return torch.max(self.table_subword_lens).item() def max_column_subword_len(self): return torch.max(self.column_subword_lens).item() """ Different types of nodes are seperated instead of concatenated together """ def mask(self): return torch.cat([self.question_mask, self.table_mask, self.column_mask], dim=1) def question_mask(self): return lens2mask(self.question_lens) def table_mask(self): return lens2mask(self.table_lens) def column_mask(self): return lens2mask(self.column_lens) def table_word_mask(self): return lens2mask(self.table_word_lens) def column_word_mask(self): return lens2mask(self.column_word_lens) def question_subword_mask(self): return lens2mask(self.question_subword_lens) def table_subword_mask(self): return lens2mask(self.table_subword_lens) def column_subword_mask(self): return lens2mask(self.column_subword_lens) def get_frontier_field_idx(self, t): ids = [] for e in self.examples: if t < len(e.tgt_action): ids.append(Example.grammar.field2id[e.tgt_action[t].frontier_field]) # assert self.grammar.id2field[ids[-1]] == e.tgt_action[t].frontier_field else: ids.append(0) return torch.tensor(ids, dtype=torch.long, device=self.device) def get_frontier_prod_idx(self, t): ids = [] for e in self.examples: if t < len(e.tgt_action): ids.append(Example.grammar.prod2id[e.tgt_action[t].frontier_prod]) # assert self.grammar.id2prod[ids[-1]] == e.tgt_action[t].frontier_prod else: ids.append(0) return torch.tensor(ids, dtype=torch.long, device=self.device) def get_frontier_field_type_idx(self, t): ids = [] for e in self.examples: if t < len(e.tgt_action): ids.append(Example.grammar.type2id[e.tgt_action[t].frontier_field.type]) # assert self.grammar.id2type[ids[-1]] == e.tgt_action[t].frontier_field.type else: ids.append(0) return torch.tensor(ids, dtype=torch.long, device=self.device) def get_label(sql,column_len): thelabel = [] slot = {} for idx in range(column_len): slot[idx] = "" for value in get_labels(sql,slot,'').values(): thelabel.append(value) return thelabel def dev_decode(choice, output_path, acc_type='sql', use_checker=False): assert acc_type in ['beam', 'ast', 'sql'] and choice in ['train', 'dev'] model.eval() dataset = train_dataset if choice == 'train' else dev_dataset all_hyps = [] with torch.no_grad(): last_sql = '' sql_label = [] final_data = [] with open('predict.txt','w',encoding='utf8') as f: for i in range(0, len(dataset), 1): db_id = dataset[i]['db_id'] tables = db[dataset[i]['db_id']] schema = schemas[db_id] table = thetables[db_id] if '[CLS]' not in dataset[i]['question'] or last_sql == '': sql_label = ['']*len(tables['column_names']) else: schema = Schema(schema, table) try: sql_label = get_sql(schema, last_sql) except: sql_label = ['']*len(tables['column_names']) else: sql_label = get_label(sql_label,len(table['column_names_original'])) if '[CLS]' not in dataset[i]['question'] and i != 0: f.write('\n') dev_ex = Example(dataset[i], tables, sql_label) current_batch = Batch.from_example_list([dev_ex], device, train=False) hyps = model.parse(current_batch, args.beam_size) last_sql = evaluator.obtain_sql(hyps[0], dev_ex.db) printsql = last_sql all_hyps.extend(hyps) final_data.append(dev_ex) f.write(printsql+'\n') f.write('\n') acc,IM = evaluator.acc(all_hyps, final_data, output_path, acc_type=acc_type, etype='match', use_checker=use_checker) torch.cuda.empty_cache() gc.collect() return acc,IM
null
164,220
import logging import re, math import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from torch.nn.utils import clip_grad_norm_ from collections import defaultdict schedule_dict = { "constant": get_constant_schedule, "linear": get_linear_schedule_with_warmup, "ratsql": get_ratsql_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, } class AdamW(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, max_grad_norm=-1, correct_bias=True): def step(self, closure=None): def set_optimizer(model, args, num_warmup_steps, num_training_steps, last_epoch=-1): plm = hasattr(model.encoder.input_layer, 'plm_model') if plm and args.layerwise_decay <= 0.: # fix plm params for n, p in model.named_parameters(): if 'plm_model' in n: p.requires_grad = False params = [(n, p) for n, p in model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.weight'] if plm and 0. < args.layerwise_decay <= 0.5: # seperate lr for plm grouped_params = [ {'params': list(set([p for n, p in params if 'plm_model' in n and not any(nd in n for nd in no_decay)])), 'lr': args.layerwise_decay * args.lr, 'weight_decay': args.l2}, {'params': list(set([p for n, p in params if 'plm_model' in n and any(nd in n for nd in no_decay)])), 'lr': args.layerwise_decay * args.lr, 'weight_decay': 0.0}, {'params': list(set([p for n, p in params if 'plm_model' not in n and not any(nd in n for nd in no_decay)])), 'weight_decay': args.l2}, {'params': list(set([p for n, p in params if 'plm_model' not in n and any(nd in n for nd in no_decay)])), 'weight_decay': 0.0}, ] print('Use seperate lr %f for pretrained model ...' % (args.lr * args.layerwise_decay)) elif plm and 0.5 < args.layerwise_decay < 1.: # lr decay layerwise for plm pattern = r'encoder\.layer\.(.*?)\.' num_layers = int(model.encoder.input_layer.plm_model.config.num_hidden_layers) groups = {"decay": defaultdict(list), "no_decay": defaultdict(list)} # record grouped params for n, p in params: res = re.search(pattern, n) if 'plm_model' in n else None depth = int(res.group(1)) if res is not None else 0 if 'plm_model' in n else num_layers if any(nd in n for nd in no_decay): groups["no_decay"][int(depth)].append(p) else: groups["decay"][int(depth)].append(p) grouped_params = [] for d in groups["decay"]: lr = args.lr * (args.layerwise_decay ** (num_layers - d)) grouped_params.append({'params': list(set(groups["decay"][d])), 'lr': lr, 'weight_decay': args.l2}) for d in groups["no_decay"]: lr = args.lr * (args.layerwise_decay ** (num_layers - d)) grouped_params.append({'params': list(set(groups["no_decay"][d])), 'lr': lr, 'weight_decay': 0.0}) print('Use layerwise decay (rate %f) lr %f for pretrained model ...' % (args.layerwise_decay, args.lr)) else: # the same lr for plm and other modules grouped_params = [ {'params': list(set([p for n, p in params if not any(nd in n for nd in no_decay)])), 'weight_decay': args.l2}, {'params': list(set([p for n, p in params if any(nd in n for nd in no_decay)])), 'weight_decay': 0.0}, ] print('Use the same lr %f for all parameters ...' % (args.lr)) optimizer = AdamW(grouped_params, lr=args.lr, max_grad_norm=args.max_norm) schedule_func = schedule_dict[args.lr_schedule] scheduler = schedule_func(optimizer, num_warmup_steps, num_training_steps, last_epoch=last_epoch) return optimizer, scheduler
null
164,234
import logging import math import os from dataclasses import dataclass, field from typing import Optional import torch from transformers import ( MODEL_WITH_LM_HEAD_MAPPING, AutoTokenizer, HfArgumentParser, PreTrainedTokenizer, set_seed, ) from generator.models.relogic import RelogicModel from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic from generator.scorers.text_generation import TextGenerationScorer from generator.trainer import Generator_Trainer from generator.training_args import Generator_TrainingArguments from evaluator.models.adversarial_evaluator import AdversarialModel from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial from evaluator.scorers.adv_eval import EvalScorer from evaluator.trainer import Evaluator_Trainer from evaluator.training_args import Evaluator_TrainingArguments class RelogicModel(nn.Module): def __init__(self, pretrain_config): def forward(self, *input, **kwargs): def save_pretrained(self, save_directory): class RelogicDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, file_path, block_size, local_rank=-1,translated_logic=False, snow_ball=False, preprocess_path = None, mutation_data_path = None, aug_sample_num = 5, augmented=False, snowball_iteration = 0, total_snowball_iteration = 1, multi_task = False): def process_data(self, augmented, snowball_iteration, total_snowball_iteration, aug_sample_num, raw_file, tokenizer, add_prefix_space, translated_logic, preprocess_path, snow_ball, mutation_data_path, logic_key, text_key, invalid_idx, datastart): def __len__(self): def __getitem__(self, i): class DataCollatorForRelogic: def __post_init__(self): def collate_batch(self, examples): class TextGenerationScorer: def __init__(self, tokenizer, bos_id, eos_id, output_path): def __call__(self, prediction, epoch = 0, snow_ball = False, mode_name='eval'): def get_sequence(self, seq): class Generator_Trainer: def __init__( self, model: PreTrainedModel, args: Generator_TrainingArguments, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, tb_writer: Optional["SummaryWriter"] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None, model_name = "", output_dump_dir = '', reranker = None ): def get_train_dataloader(self) -> DataLoader: def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: def get_optimizers( self, num_training_steps: int ) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]: def _setup_wandb(self): def num_examples(self, dataloader: DataLoader) -> int: def train(self, model_path: Optional[str] = None): def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None: def _training_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer ) -> float: def is_local_master(self) -> bool: def is_world_master(self) -> bool: def save_model(self, output_dir: Optional[str] = None): def _save_tpu(self, output_dir: Optional[str] = None): def _save(self, output_dir: Optional[str] = None): def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]: def _rotate_checkpoints(self, use_mtime=False) -> None: def evaluate( self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False ) -> Dict[str, float]: def predict(self, test_dataset: Dataset, mode_name = 'test') -> PredictionOutput: def _prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False, mode_name: Optional[str] = 'eval' ) -> PredictionOutput: def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor: def distributed_concat_tensor(self, tensor: torch.Tensor): def distributed_concat_varsize_tensor(self, tensor: torch.Tensor): def distributed_concat_with_size(self, tensor: torch.Tensor, size: torch.Tensor, num_total_examples: int) -> torch.Tensor: def train_generator(config_name, data_path, preprocess_path, tokenizer, data_args,training_args, model_args, output_dump_dir, eval_dataset, test_dataset, outdomain_test_dataset, snow_ball=False, generator = None, reranker = None, augmented=False, snowball_iteration = 0, total_snowball_iteration = 1, multi_task=False): #logger.info("Start trainning generator on dataset: {}".format(data_path)) if not generator or model_args.refresh_model: generator = RelogicModel(config_name) if training_args.gen_wo_gen_rerank: reranker = None train_dataset = RelogicDataset(tokenizer=tokenizer, file_path=data_path, preprocess_path=preprocess_path, block_size=data_args.block_size, translated_logic=data_args.translated_logic, augmented=augmented, snowball_iteration = snowball_iteration, total_snowball_iteration = total_snowball_iteration, multi_task=multi_task) data_collator = DataCollatorForRelogic(tokenizer=tokenizer) label_bos_id = data_collator.label_bos_id label_eos_id = data_collator.label_eos_id scorer = TextGenerationScorer(bos_id=label_bos_id, eos_id=label_eos_id, tokenizer=tokenizer, output_path=output_dump_dir) # Initialize our Trainer trainer = Generator_Trainer( model=generator, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=scorer, prediction_loss_only = False, model_name=training_args.gen_model, output_dump_dir = output_dump_dir, reranker = reranker ) # Training model_path = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path) else None ) trainer.train(model_path=model_path) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(output_dump_dir) if training_args.gen_do_test: trainer.predict(test_dataset=test_dataset, mode_name='test') if training_args.gen_do_out_domain_test: trainer.predict(test_dataset=outdomain_test_dataset, mode_name='out_domain_test') torch.cuda.empty_cache() return generator
null
164,235
import logging import math import os from dataclasses import dataclass, field from typing import Optional import torch from transformers import ( MODEL_WITH_LM_HEAD_MAPPING, AutoTokenizer, HfArgumentParser, PreTrainedTokenizer, set_seed, ) from generator.models.relogic import RelogicModel from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic from generator.scorers.text_generation import TextGenerationScorer from generator.trainer import Generator_Trainer from generator.training_args import Generator_TrainingArguments from evaluator.models.adversarial_evaluator import AdversarialModel from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial from evaluator.scorers.adv_eval import EvalScorer from evaluator.trainer import Evaluator_Trainer from evaluator.training_args import Evaluator_TrainingArguments class AdversarialModel(nn.Module): """ output: tuple: (loss, ) in training """ def __init__(self, config_name): super().__init__() self.bart = BartForSequenceClassification.from_pretrained(config_name) self.tokenizer = BartTokenizer.from_pretrained(config_name) special_tokens_dict = {'additional_special_tokens': ['<SQL>', '<LOGIC>']} self.tokenizer.add_special_tokens(special_tokens_dict) self.bart.resize_token_embeddings(len(self.tokenizer)) self.prelu = nn.PReLU() self.fc = nn.Linear(3, 1) self.sigmoid = nn.Sigmoid() self.loss = nn.BCELoss() def label_smoothing(self, labels, epsilon=0.1): K = 2 # number of channels return ((1-epsilon) * labels) + (epsilon / K) def forward(self, *input, **kwargs): encoder_inputs = kwargs.pop("encoder_input_ids").contiguous() labels = kwargs.pop('labels').unsqueeze(1) pad_token_id = kwargs.pop("pad_token_id") attention_mask = (encoder_inputs != pad_token_id).long() outputs = self.bart(encoder_inputs, attention_mask=attention_mask) # for i in range(encoder_inputs.shape[0]): # print("Input", self.tokenizer.decode(encoder_inputs[i], skip_special_tokens=True)) #3 logits -> 1 score #print("outputs", outputs) # score = self.prelu(outputs[0]) # score = self.fc(score) # score = self.sigmoid(score) score = torch.sum(outputs[0], 1).view(-1, 1) score = self.sigmoid(score) labels = labels.float() # print("score", score.view(1, -1)) # print("labels", labels.view(1, -1)) labels = self.label_smoothing(labels) loss = self.loss(score, labels) if self.training: return (loss, score) else: return (loss.detach(), score) def save_pretrained(self, save_directory): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method. Arguments: save_directory: directory to which to save. """ assert os.path.isdir( save_directory ), "Saving path should be a directory where the model and configuration can be saved" # Only save the model itself if we are using distributed training model_to_save = self.module if hasattr(self, "module") else self # Attach architecture to the config # model_to_save.config.architectures = [model_to_save.__class__.__name__] # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) torch.save(model_to_save.state_dict(), output_model_file) logger.info("Model weights saved in {}".format(output_model_file)) class AdversarialDataset(Dataset): """ Dataset for training task: SQL (+ schema) -> text """ def __init__(self, tokenizer: PreTrainedTokenizer, file_path, block_size, translated_logic=False, local_rank=-1,evaluate = False, multi_task=False): assert os.path.isfile(file_path) logger.info("Creating features from dataset file at {}".format(file_path)) replacee = 'spider' replacer = 'logic2text' replaceetoken = ['<SQL>'] replacertoken = ['<LOGIC>'] if replacee not in file_path: replacee = 'logic2text' replacer = 'spider' replaceetoken = ['<LOGIC>'] replacertoken = ['<SQL>'] if multi_task: print('replacee: {}, replacer: {}'.format(replacee, replacer)) raw_file = open(file_path, encoding='utf-8') self.preprocess_data(translated_logic, tokenizer, raw_file, evaluate, replaceetoken) if multi_task and os.path.exists(file_path.replace(replacee, replacer)): raw_file = open(file_path.replace(replacee, replacer), encoding='utf-8') self.preprocess_data(translated_logic, tokenizer, raw_file, evaluate, replacertoken) def preprocess_data(self, translated_logic, tokenizer, raw_file, evaluate, datastart): logic_key = 'sql' text_key = 'question' if translated_logic: logic_key = 'translated_sql' self.examples = [] invali_idx = [] add_prefix_space = isinstance(tokenizer, BartTokenizer) or isinstance(tokenizer, RobertaTokenizer) for idx, line in tqdm(enumerate(raw_file)): example = json.loads(line) if evaluate: logic = example[logic_key] text = example[text_key] remark = example['remark'] label = int(example['label']) sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(logic, add_prefix_space=add_prefix_space) + [ tokenizer.eos_token] + tokenizer.tokenize(text, add_prefix_space=add_prefix_space) + [ tokenizer.sep_token] sql_question_token_ids = tokenizer.convert_tokens_to_ids(sql_question_token) self.examples.append({ "sql_question_token_ids": sql_question_token_ids, "label": label, "logic": logic, "text": text, "remark": remark}) else: mutated_logic = example["mutated_logic"] mutated_text = example['mutated_text'] original_logic = example['original_logic'] original_text = example['original_text'] # negative down-sampling if random.random() < 0.3: neg_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(mutated_logic, add_prefix_space=add_prefix_space) + [ tokenizer.eos_token] + tokenizer.tokenize(original_text, add_prefix_space=add_prefix_space) + [ tokenizer.sep_token] neg_sql_question_token_ids = tokenizer.convert_tokens_to_ids(neg_sql_question_token) self.examples.append({ "sql_question_token_ids": neg_sql_question_token_ids, "label": 0, "logic": mutated_logic, "text": original_text, "remark": 'negative'}) else: neg_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(original_logic, add_prefix_space=add_prefix_space) + [ tokenizer.eos_token] + tokenizer.tokenize(mutated_text, add_prefix_space=add_prefix_space) + [ tokenizer.sep_token] neg_sql_question_token_ids = tokenizer.convert_tokens_to_ids(neg_sql_question_token) self.examples.append({ "sql_question_token_ids": neg_sql_question_token_ids, "label": 0, "logic": original_logic, "text": mutated_text, "remark": 'negative'}) # postive up-sampling if random.random() < 0.8: pos_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(original_logic, add_prefix_space=add_prefix_space) + [ tokenizer.eos_token] + tokenizer.tokenize(original_text, add_prefix_space=add_prefix_space) + [ tokenizer.sep_token] pos_sql_question_token_ids = tokenizer.convert_tokens_to_ids(pos_sql_question_token) self.examples.append({ "sql_question_token_ids": pos_sql_question_token_ids, "label": 1, "logic": original_logic, "text": original_text, "remark": 'positive'}) else: pos_sql_question_token = datastart + [tokenizer.cls_token] + tokenizer.tokenize(mutated_logic, add_prefix_space=add_prefix_space) + [ tokenizer.eos_token] + tokenizer.tokenize(mutated_text, add_prefix_space=add_prefix_space) + [ tokenizer.sep_token] pos_sql_question_token_ids = tokenizer.convert_tokens_to_ids(pos_sql_question_token) self.examples.append({ "sql_question_token_ids": pos_sql_question_token_ids, "label": 1, "logic": mutated_logic, "text": mutated_text, "remark": 'positive'}) def __len__(self): return len(self.examples) def __getitem__(self, i): example = self.examples[i] return example class DataCollatorForAdversarial: """ """ tokenizer: PreTrainedTokenizer def __post_init__(self): self.label_bos_id = self.tokenizer.cls_token_id self.label_eos_id = self.tokenizer.sep_token_id def collate_batch(self, examples): sql_question_ids_sequences = [example["sql_question_token_ids"] for example in examples] logics = [example["logic"] for example in examples] texts = [example["text"] for example in examples] labels = [example["label"] for example in examples] remarks = [example["remark"] for example in examples] padded_sql_question_ids_tensor = pad_and_tensorize_sequence( sql_question_ids_sequences, padding_value=self.tokenizer.pad_token_id) try: label_tensor = pad_and_tensorize_sequence( labels, tensorize = True, padding_value=self.tokenizer.pad_token_id) except: print(labels) return { "encoder_input_ids": padded_sql_question_ids_tensor, "labels": label_tensor, "logics": logics, "texts": texts, "remarks": remarks, "pad_token_id": self.tokenizer.pad_token_id, "label_eos_id": self.label_eos_id, "label_bos_id": self.label_bos_id, "label_padding_id": self.tokenizer.pad_token_id } class EvalScorer: def __init__(self, tokenizer, bos_id, eos_id, output_path): self.output_path = output_path #compute the Precision, Recall, F1 and AUC def compute_score(self, scores, labels,threshold=0.5): tp, fp, tn, fn = 0.0, 0.0, 0.0, 0.0 accuracy, precision, recall, f1, auc_score = 0.0, 0.0, 0.0, 0.0, 0.0 scores = scores.cpu() labels = labels.cpu() predictions = scores > threshold predictions = predictions.long() accuracy = accuracy_score(labels, predictions) precision = precision_score(labels, predictions) recall = recall_score(labels, predictions) f1 = f1_score(labels, predictions) fpr, tpr, thresholds = roc_curve(labels, scores) auc_score = auc(fpr, tpr) return accuracy, precision, recall, f1, auc_score def __call__(self, prediction, epoch = 0, dump_output=True, mode_name='eval'): output_dir = os.path.join(self.output_path, mode_name) if not os.path.exists(output_dir): os.makedirs(output_dir) output_path = output_dir + os.sep + 'epoch_{}'.format(int(epoch)) + '.json' acc, p, r, f1, auc = self.compute_score(prediction["pred_scores"], prediction["pred_labels"]) if dump_output: if is_rank_0(): fout = open(output_path, "w") for idx, (logic, text, remark, label, score) in enumerate(zip(prediction["logics"], prediction["texts"], prediction["remarks"], prediction["pred_labels"], prediction["pred_scores"])): if is_rank_0(): fout.write( json.dumps({ "logic":logic, "text":text, "remark":remark, "label":str(label.item()), "score":str(score.item()) }) + '\n' ) return { "eval_accuracy":acc, "eval_precision":p, "eval_recall" :r, "eval_F1" : f1, "eval_AUC":auc } class Evaluator_Trainer: """ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for Transformers. """ model: PreTrainedModel args: Evaluator_TrainingArguments data_collator: DataCollator train_dataset: Optional[Dataset] eval_dataset: Optional[Dataset] compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None prediction_loss_only: bool tb_writer: Optional["SummaryWriter"] = None optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None global_step: Optional[int] = None epoch: Optional[float] = None def __init__( self, model: PreTrainedModel, args: Evaluator_TrainingArguments, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, tb_writer: Optional["SummaryWriter"] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None, model_name = "", output_dump_dir = '' ): """ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for Transformers. Args: prediction_loss_only: (Optional) in evaluation and prediction, only return the loss """ self.model = model.to(args.device) self.model_name = model_name self.args = args self.data_collator = data_collator if data_collator is not None else default_data_collator self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.prediction_loss_only = prediction_loss_only self.optimizers = optimizers self.output_dir = output_dump_dir self.auc_score = 0 self.early_stopping = EarlyStopping(patience=7, output_dir=output_dump_dir) if tb_writer is not None: self.tb_writer = tb_writer elif is_tensorboard_available() and self.is_world_master(): self.tb_writer = SummaryWriter(log_dir=self.args.eval_logging_dir) if not is_tensorboard_available(): logger.warning( "You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it." ) if is_wandb_available(): self._setup_wandb() else: logger.info( "You are instantiating a Trainer but W&B is not installed. To use wandb logging, " "run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface." ) set_seed(self.args.eval_seed) # Create output directory if needed if self.is_world_master(): os.makedirs(self.output_dir, exist_ok=True) #set logger LOG_FILE = os.path.join(self.output_dir, 'log.txt') file_handler = logging.FileHandler(LOG_FILE) file_handler.setLevel(level=logging.DEBUG) logger.addHandler(file_handler) if is_torch_tpu_available(): # Set an xla_device flag on the model's config. # We'll find a more elegant and not need to do this in the future. self.model.config.xla_device = True if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): self.data_collator = self.data_collator.collate_batch warnings.warn( ( "The `data_collator` should now be a simple callable (function, class with `__call__`), classes " + "with a `collate_batch` are deprecated and won't be supported in a future version." ), FutureWarning, ) def get_train_dataloader(self) -> DataLoader: if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") if is_torch_tpu_available(): train_sampler = get_tpu_sampler(self.train_dataset) else: train_sampler = ( RandomSampler(self.train_dataset) if self.args.eval_local_rank == -1 else DistributedSampler(self.train_dataset) ) data_loader = DataLoader( self.train_dataset, batch_size=self.args.train_batch_size, sampler=train_sampler, collate_fn=self.data_collator, drop_last=self.args.eval_dataloader_drop_last, ) return data_loader def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset if is_torch_tpu_available(): sampler = SequentialDistributedSampler( eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() ) elif self.args.eval_local_rank != -1: sampler = SequentialDistributedSampler(eval_dataset) else: sampler = SequentialSampler(eval_dataset) data_loader = DataLoader( eval_dataset, sampler=sampler, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.eval_dataloader_drop_last, ) return data_loader def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: # We use the same batch_size as for eval. if is_torch_tpu_available(): sampler = SequentialDistributedSampler( test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() ) elif self.args.eval_local_rank != -1: sampler = SequentialDistributedSampler(test_dataset) else: sampler = SequentialSampler(test_dataset) data_loader = DataLoader( test_dataset, sampler=sampler, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.eval_dataloader_drop_last, ) return data_loader def get_optimizers( self, num_training_steps: int ) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]: """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init, or override this method in a subclass. """ if self.optimizers is not None: return self.optimizers # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.eval_weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.eval_learning_rate, eps=self.args.eval_adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.args.eval_warmup_steps, num_training_steps=num_training_steps ) return optimizer, scheduler def _setup_wandb(self): """ Setup the optional Weights & Biases (`wandb`) integration. One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface You can also override the following environment variables: Environment: WANDB_WATCH: (Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging or "all" to log gradients and parameters WANDB_PROJECT: (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project WANDB_DISABLED: (Optional): boolean - defaults to false, set to "true" to disable wandb entirely """ if self.is_world_master(): logger.info( 'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"' ) wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args)) # keep track of model topology and gradients, unsupported on TPU if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false": wandb.watch( self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.eval_logging_steps) ) def num_examples(self, dataloader: DataLoader) -> int: """ Helper to get num of examples from a DataLoader, by accessing its Dataset. """ return len(dataloader.dataset) def train(self, model_path: Optional[str] = None): """ Main training entry point. Args: model_path: (Optional) Local path to model if model to train has been instantiated from a local path If present, we will try reloading the optimizer/scheduler states from there. """ train_dataloader = self.get_train_dataloader() if self.args.eval_max_steps > 0: t_total = self.args.eval_max_steps num_train_epochs = ( self.args.eval_max_steps // (len(train_dataloader) // self.args.eval_gradient_accumulation_steps) + 1 ) else: t_total = int(len(train_dataloader) // self.args.eval_gradient_accumulation_steps * self.args.eval_num_train_epochs) num_train_epochs = self.args.eval_num_train_epochs optimizer, scheduler = self.get_optimizers(num_training_steps=t_total) # Check if saved optimizer or scheduler states exist if ( model_path is not None and os.path.isfile(os.path.join(model_path, "optimizer.pt")) and os.path.isfile(os.path.join(model_path, "scheduler.pt")) ): # Load in optimizer and scheduler states optimizer.load_state_dict( torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.eval_device) ) scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt"))) model = self.model if self.args.eval_fp16: if not is_apex_available(): raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.eval_fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if self.args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if self.args.eval_local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.args.eval_local_rank], output_device=self.args.eval_local_rank, find_unused_parameters=True, ) if self.tb_writer is not None: self.tb_writer.add_text("args", self.args.eval_to_json_string()) self.tb_writer.add_hparams(self.args.eval_to_sanitized_dict(), metric_dict={}) # Train! if is_torch_tpu_available(): total_train_batch_size = self.args.eval_train_batch_size * xm.xrt_world_size() else: total_train_batch_size = ( self.args.train_batch_size * self.args.eval_gradient_accumulation_steps * (torch.distributed.get_world_size() if self.args.eval_local_rank != -1 else 1) ) logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_examples(train_dataloader)) logger.info(" Num Epochs = %d", num_train_epochs) logger.info(" Instantaneous batch size per device = %d", self.args.eval_per_device_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size) logger.info(" Gradient Accumulation steps = %d", self.args.eval_gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) self.global_step = 0 self.epoch = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if model_path is not None: # set global_step to global_step of last saved checkpoint from model path try: self.global_step = int(model_path.split("-")[-1].split("/")[0]) epochs_trained = self.global_step // (len(train_dataloader) // self.args.eval_gradient_accumulation_steps) steps_trained_in_current_epoch = self.global_step % ( len(train_dataloader) // self.args.eval_gradient_accumulation_steps ) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", self.global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: self.global_step = 0 logger.info(" Starting fine-tuning.") tr_loss = 0.0 logging_loss = 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(num_train_epochs), desc="Epoch", disable=not self.is_local_master() or not self.args.eval_logging_tqdm ) for epoch in train_iterator: if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.eval_device]).per_device_loader( self.args.eval_device ) epoch_iterator = tqdm(parallel_loader, desc="Iteration", disable=not self.is_local_master() or not self.args.eval_logging_tqdm) else: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=not self.is_local_master() or not self.args.eval_logging_tqdm) for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue tr_loss += self._training_step(model, inputs, optimizer) if (step + 1) % self.args.eval_gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps len(epoch_iterator) <= self.args.eval_gradient_accumulation_steps and (step + 1) == len(epoch_iterator) ): if self.args.eval_fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.eval_max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.eval_max_grad_norm) if is_torch_tpu_available(): xm.optimizer_step(optimizer) else: optimizer.step() scheduler.step() model.zero_grad() self.global_step += 1 self.epoch = epoch + (step + 1) / len(epoch_iterator) if (self.args.eval_logging_steps > 0 and self.global_step % self.args.eval_logging_steps == 0) or ( self.global_step == 1 and self.args.eval_logging_first_step ): logs: Dict[str, float] = {} logs["loss"] = (tr_loss - logging_loss) / self.args.eval_logging_steps # backward compatibility for pytorch schedulers logs["learning_rate"] = ( scheduler.get_last_lr()[0] if version.parse(torch.__version__) >= version.parse("1.4") else scheduler.get_lr()[0] ) logging_loss = tr_loss self._log(logs) if (self.args.eval_eval_epochs > 0 and self.epoch % self.args.eval_eval_epochs == 0): if self.args.eval_evaluate_during_training: self.evaluate() if self.global_step % 500 == 0: if self.args.eval_evaluate_during_training: self.evaluate(dump_output=True) self.early_stopping(self.auc_score, self.model) # early stopping if self.early_stopping.early_stop: logger.info("Early stopping with the AUC: {}".format(self.auc_score)) break if self.args.eval_save_epochs > 0 and self.epoch % self.args.eval_save_epochs == 0: # In all cases (even distributed/parallel), self.model is always a reference # to the model we want to save. if hasattr(model, "module"): assert model.module is self.model else: assert model is self.model # Save model checkpoint output_dir = os.path.join(self.output_dir, f"{PREFIX_CHECKPOINT_DIR}-epoch-{self.epoch}") self.save_model(output_dir) if self.is_world_master(): self._rotate_checkpoints() if is_torch_tpu_available(): xm.rendezvous("saving_optimizer_states") xm.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) xm.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) elif self.is_world_master(): torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) if self.args.eval_max_steps > 0 and self.global_step > self.args.eval_max_steps: epoch_iterator.close() break #load model from early stopping checkpoint self.model.load_state_dict(torch.load(os.path.join(self.output_dir, 'checkpoint.pt'))) if self.args.eval_max_steps > 0 and self.global_step > self.args.eval_max_steps: train_iterator.close() break if self.args.eval_tpu_metrics_debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) if self.tb_writer: self.tb_writer.close() logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") return TrainOutput(self.global_step, tr_loss / self.global_step) def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None: if self.epoch is not None: logs["epoch"] = self.epoch if self.global_step is None: # when logging evaluation metrics without training self.global_step = 0 if self.tb_writer: for k, v in logs.items(): if isinstance(v, (int, float)): self.tb_writer.add_scalar(k, v, self.global_step) else: logger.warning( "Trainer is attempting to log a value of " '"%s" of type %s for key "%s" as a scalar. ' "This invocation of Tensorboard's writer.add_scalar() " "is incorrect so we dropped this attribute.", v, type(v), k, ) self.tb_writer.flush() if is_wandb_available(): if self.is_world_master(): wandb.log(logs, step=self.global_step) output = {**logs, **{"step": self.global_step}} if iterator is not None: iterator.write(output) else: logger.info(output) def _training_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer ) -> float: model.train() for k, v in inputs.items(): if isinstance(v, torch.Tensor): inputs[k] = v.to(self.args.device) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) #print("Loss", loss) if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.args.eval_gradient_accumulation_steps > 1: loss = loss / self.args.eval_gradient_accumulation_steps if self.args.eval_fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() return loss.item() def is_local_master(self) -> bool: if is_torch_tpu_available(): return xm.is_master_ordinal(local=True) else: return self.args.eval_local_rank in [-1, 0] def is_world_master(self) -> bool: """ This will be True only in one process, even in distributed mode, even when training on multiple machines. """ if is_torch_tpu_available(): return xm.is_master_ordinal(local=False) else: return self.args.eval_local_rank == -1 or torch.distributed.get_rank() == 0 def save_model(self, output_dir: Optional[str] = None): """ Saving best-practices: if you use default names for the model, you can reload it using from_pretrained(). Will only save from the world_master process (unless in TPUs). """ if is_torch_tpu_available(): self._save_tpu(output_dir) elif self.is_world_master(): self._save(output_dir) def _save_tpu(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.output_dir logger.info("Saving model checkpoint to %s", output_dir) if xm.is_master_ordinal(): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, "training_args.bin")) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel): raise ValueError("Trainer.model appears to not be a PreTrainedModel") xm.rendezvous("saving_checkpoint") self.model.save_pretrained(output_dir) def _save(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.output_dir os.makedirs(output_dir, exist_ok=True) logger.info("Saving model checkpoint to %s", output_dir) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # if not isinstance(self.model, PreTrainedModel): # raise ValueError("Trainer.model appears to not be a PreTrainedModel") self.model.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, "training_args.bin")) def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(self.output_dir).glob(f"{checkpoint_prefix}-*")] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) if regex_match and regex_match.groups(): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False) -> None: if self.args.eval_save_total_limit is None or self.args.eval_save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime) if len(checkpoints_sorted) <= self.args.eval_save_total_limit: return number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.eval_save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) shutil.rmtree(checkpoint) def evaluate( self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None, dump_output:bool = True ) -> Dict[str, float]: """ Run evaluation and return metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent. Args: eval_dataset: (Optional) Pass a dataset if you wish to override the one on the instance. Returns: A dict containing: - the eval loss - the potential metrics computed from the predictions """ eval_dataloader = self.get_eval_dataloader(eval_dataset) output = self._prediction_loop(eval_dataloader, description="Evaluation", prediction_loss_only = prediction_loss_only, dump_output=dump_output) self._log(output.metrics) if self.args.eval_tpu_metrics_debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) return output.metrics def predict(self, test_dataset: Dataset, mode_name='test') -> PredictionOutput: """ Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). """ test_dataloader = self.get_test_dataloader(test_dataset) output = self._prediction_loop(test_dataloader, description="Prediction", mode_name=mode_name) self._log(output.metrics) return output.metrics def _prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, dump_output:bool = True, mode_name: str ='pred' ) -> PredictionOutput: """ Prediction/evaluation loop, shared by `evaluate()` and `predict()`. Works both with or without labels. NOTE: One issue is on the size of prediction and labels. For current code, it considers all the prediction and labels in different batch have same length of sequence. This is not true for our application. To make this more evaleral, I will reformat the predictions and labels. """ prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only model = self.model # multi-gpu eval if self.args.n_gpu > 1: model = torch.nn.DataParallel(model) else: model = self.model # Note: in torch.distributed mode, there's no point in wrapping the model # inside a DistributedDataParallel as we'll be under `no_grad` anyways. batch_size = dataloader.batch_size logger.info("***** Running {}: {} *****".format(description, mode_name)) logger.info(" Num examples = %d", self.num_examples(dataloader)) logger.info(" Batch size = %d", batch_size) eval_losses: List[float] = [] logics : List[str] = [] pred_scores: torch.Tensor = None pred_labels: torch.Tensor = None texts : List[str] = [] remarks : List[str] = [] probabilities: List[float] = [] preds: torch.Tensor = None preds_size: torch.Tensor = None label_ids: torch.Tensor = None label_size: torch.Tensor = None model.eval() if is_torch_tpu_available(): dataloader = pl.ParallelLoader(dataloader, [self.args.eval_device]).per_device_loader(self.args.eval_device) for inputs in tqdm(dataloader, desc=description): has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"]) for k, v in inputs.items(): if isinstance(v, torch.Tensor): inputs[k] = v.to(self.args.device) with torch.no_grad(): outputs = model(**inputs) if has_labels: step_eval_loss, logits = outputs[:2] eval_losses += [step_eval_loss.mean().item()] logics += inputs["logics"] texts += inputs["texts"] remarks += inputs["remarks"] if pred_labels is None: pred_labels= inputs["labels"].detach().view(-1) pred_scores = logits.detach().view(-1) else: pred_labels = torch.cat((pred_labels, inputs["labels"].detach().view(-1)), dim=0) pred_scores = torch.cat((pred_scores, logits.detach().view(-1)), dim=0) if self.args.eval_local_rank != -1: # In distributed mode, concatenate all results from all nodes: if preds is not None: # preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader)) preds, preds_size = self.distributed_concat_with_size(preds, preds_size, num_total_examples=self.num_examples(dataloader)) if label_ids is not None: # label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader)) label_ids, label_size = self.distributed_concat_with_size(label_ids, label_size, num_total_examples=self.num_examples(dataloader)) elif is_torch_tpu_available(): # tpu-comment: Get all predictions and labels from all worker shards of eval dataset # NOTE: We do not modify this for now. if preds is not None: preds = xm.mesh_reduce("eval_preds", preds, torch.cat) if label_ids is not None: label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat) # Finally, turn the aggregated tensors into numpy arrays. if preds is not None: preds = preds.cpu().numpy() preds_size = preds_size.cpu().numpy() if label_ids is not None: label_ids = label_ids.cpu().numpy() label_size = label_size.cpu().numpy() #print(self.compute_metrics, preds , label_ids) if self.compute_metrics is not None: eval_predictions ={ "pred_scores": pred_scores, "pred_labels": pred_labels, "logics" : logics, "texts" : texts, "remarks" : remarks } metrics = self.compute_metrics(eval_predictions, epoch = self.epoch, dump_output=dump_output, mode_name=mode_name) self.auc_score = float(metrics['eval_AUC']) else: metrics = {} if len(eval_losses) > 0: metrics["eval_loss"] = np.mean(eval_losses) # Prefix all keys with eval_ for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) # return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) return PredictionOutputWithSize(predictions=preds, predictions_size=preds_size, label_ids=label_ids, label_size=label_size, metrics=metrics) def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor: assert self.args.eval_local_rank != -1 output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(output_tensors, tensor) concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler output = concat[:num_total_examples] return output def distributed_concat_tensor(self, tensor: torch.Tensor): assert self.args.eval_local_rank != -1 output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(output_tensors, tensor) concat = torch.cat(output_tensors, dim=0) return concat def distributed_concat_varsize_tensor(self, tensor: torch.Tensor): assert self.args.eval_local_rank != -1 sizes = self.distributed_concat_tensor(tensor.new_full(size=(1,), fill_value=tensor.size(0))) max_size = sizes.max().item() padded = tensor.new_zeros(max_size) padded[:tensor.size(0)] = tensor padded_agg = self.distributed_concat_tensor(padded) slices = [] for i, size in enumerate(sizes): start_idx = i * max_size end_idx = start_idx + size.item() slices.append(padded_agg[start_idx: end_idx]) ret = torch.cat(slices, dim=0) return ret def distributed_concat_with_size(self, tensor: torch.Tensor, size: torch.Tensor, num_total_examples: int) -> torch.Tensor: assert self.args.eval_local_rank != -1 # output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())] # output_sizes = [size.clone() for _ in range(torch.distributed.get_world_size())] # torch.distributed.all_gather(output_tensors, tensor) # torch.distributed.all_gather(output_sizes, size) # concat = torch.cat(output_tensors, dim=0) # concat_sizes = torch.cat(output_sizes, dim=0) concat_sizes = self.distributed_concat_tensor(size) concat = self.distributed_concat_varsize_tensor(tensor) output_sizes = concat_sizes[:num_total_examples] assert output_sizes.sum() == concat.size(0) return concat, output_sizes def train_evaluator(config_name, data_path, tokenizer, data_args,training_args, model_args, output_dump_dir, eval_dataset, test_dataset, snow_ball=True, evaluator = None, multi_task=False): #logger.info("Start trainning generator on dataset: {}".format(data_path)) if not evaluator or model_args.refresh_model: evaluator = AdversarialModel(config_name) train_dataset = AdversarialDataset(tokenizer=tokenizer, file_path=data_path, block_size=data_args.block_size, translated_logic=data_args.translated_logic, evaluate = False, multi_task=multi_task) data_collator = DataCollatorForAdversarial(tokenizer=tokenizer) label_bos_id = data_collator.label_bos_id label_eos_id = data_collator.label_eos_id scorer = EvalScorer(bos_id=label_bos_id, eos_id=label_eos_id, tokenizer=tokenizer, output_path=output_dump_dir) # Initialize our Trainer trainer = Evaluator_Trainer( model=evaluator, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=scorer, prediction_loss_only = False, model_name=training_args.eval_model, output_dump_dir = output_dump_dir ) # Training model_path = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path) else None ) trainer.train(model_path=model_path) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(output_dump_dir) if training_args.eval_do_test: trainer.predict(test_dataset=test_dataset, mode_name='test') torch.cuda.empty_cache() return evaluator
null
164,236
import logging import math import os from dataclasses import dataclass, field from typing import Optional import torch from transformers import ( MODEL_WITH_LM_HEAD_MAPPING, AutoTokenizer, HfArgumentParser, PreTrainedTokenizer, set_seed, ) from generator.models.relogic import RelogicModel from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic from generator.scorers.text_generation import TextGenerationScorer from generator.trainer import Generator_Trainer from generator.training_args import Generator_TrainingArguments from evaluator.models.adversarial_evaluator import AdversarialModel from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial from evaluator.scorers.adv_eval import EvalScorer from evaluator.trainer import Evaluator_Trainer from evaluator.training_args import Evaluator_TrainingArguments class RelogicDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, file_path, block_size, local_rank=-1,translated_logic=False, snow_ball=False, preprocess_path = None, mutation_data_path = None, aug_sample_num = 5, augmented=False, snowball_iteration = 0, total_snowball_iteration = 1, multi_task = False): def process_data(self, augmented, snowball_iteration, total_snowball_iteration, aug_sample_num, raw_file, tokenizer, add_prefix_space, translated_logic, preprocess_path, snow_ball, mutation_data_path, logic_key, text_key, invalid_idx, datastart): def __len__(self): def __getitem__(self, i): class DataCollatorForRelogic: def __post_init__(self): def collate_batch(self, examples): class TextGenerationScorer: def __init__(self, tokenizer, bos_id, eos_id, output_path): def __call__(self, prediction, epoch = 0, snow_ball = False, mode_name='eval'): def get_sequence(self, seq): class Generator_Trainer: def __init__( self, model: PreTrainedModel, args: Generator_TrainingArguments, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, tb_writer: Optional["SummaryWriter"] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None, model_name = "", output_dump_dir = '', reranker = None ): def get_train_dataloader(self) -> DataLoader: def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: def get_optimizers( self, num_training_steps: int ) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]: def _setup_wandb(self): def num_examples(self, dataloader: DataLoader) -> int: def train(self, model_path: Optional[str] = None): def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None: def _training_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer ) -> float: def is_local_master(self) -> bool: def is_world_master(self) -> bool: def save_model(self, output_dir: Optional[str] = None): def _save_tpu(self, output_dir: Optional[str] = None): def _save(self, output_dir: Optional[str] = None): def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]: def _rotate_checkpoints(self, use_mtime=False) -> None: def evaluate( self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False ) -> Dict[str, float]: def predict(self, test_dataset: Dataset, mode_name = 'test') -> PredictionOutput: def _prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, snow_ball: Optional[bool] = False, mode_name: Optional[str] = 'eval' ) -> PredictionOutput: def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor: def distributed_concat_tensor(self, tensor: torch.Tensor): def distributed_concat_varsize_tensor(self, tensor: torch.Tensor): def distributed_concat_with_size(self, tensor: torch.Tensor, size: torch.Tensor, num_total_examples: int) -> torch.Tensor: def augment_data(data_path, preprocess_path, mutation_data_path, tokenizer, data_args,training_args, model_args, output_dump_dir, snow_ball, generator = None, reranker = None, multi_task=False): if training_args.gen_wo_aug_rerank: reranker = None aug_dataset = RelogicDataset(tokenizer=tokenizer, file_path=data_path, preprocess_path=preprocess_path, mutation_data_path=mutation_data_path, block_size=data_args.block_size, translated_logic=data_args.translated_logic, snow_ball=snow_ball, multi_task=multi_task) data_collator = DataCollatorForRelogic(tokenizer=tokenizer) label_bos_id = data_collator.label_bos_id label_eos_id = data_collator.label_eos_id scorer = TextGenerationScorer(bos_id=label_bos_id, eos_id=label_eos_id, tokenizer=tokenizer, output_path=output_dump_dir) # Initialize our Trainer trainer = Generator_Trainer( model=generator, args=training_args, data_collator=data_collator, eval_dataset=aug_dataset, compute_metrics=scorer, prediction_loss_only = False, model_name=training_args.gen_model, output_dump_dir = output_dump_dir, reranker = reranker ) eval_output = trainer.evaluate(snow_ball=snow_ball) torch.cuda.empty_cache()
null
164,237
import logging import math import os from dataclasses import dataclass, field from typing import Optional import torch from transformers import ( MODEL_WITH_LM_HEAD_MAPPING, AutoTokenizer, HfArgumentParser, PreTrainedTokenizer, set_seed, ) from generator.models.relogic import RelogicModel from generator.datasets.text_generation.relogic import RelogicDataset, DataCollatorForRelogic from generator.scorers.text_generation import TextGenerationScorer from generator.trainer import Generator_Trainer from generator.training_args import Generator_TrainingArguments from evaluator.models.adversarial_evaluator import AdversarialModel from evaluator.datasets.evaluator.adversarial import AdversarialDataset, DataCollatorForAdversarial from evaluator.scorers.adv_eval import EvalScorer from evaluator.trainer import Evaluator_Trainer from evaluator.training_args import Evaluator_TrainingArguments def create_save_dir(model_args, snowball_iteration = 0): generator_output_dump_dir = model_args.output_dir + '/generator/{}/'.format(snowball_iteration) if not os.path.exists(generator_output_dump_dir): os.makedirs(generator_output_dump_dir) evaluator_output_dump_dir = model_args.output_dir + '/evaluator/{}/'.format(snowball_iteration) if not os.path.exists(evaluator_output_dump_dir): os.makedirs(evaluator_output_dump_dir) aug_output_dump_dir = model_args.output_dir + '/augmentation/{}/'.format(snowball_iteration) if not os.path.exists(aug_output_dump_dir): os.makedirs(aug_output_dump_dir) return generator_output_dump_dir, evaluator_output_dump_dir, aug_output_dump_dir
null
164,238
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from evaluator.training_args import Evaluator_TrainingArguments from evaluator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize, EarlyStopping def is_tensorboard_available(): return _has_tensorboard
null
164,239
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from evaluator.training_args import Evaluator_TrainingArguments from evaluator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize, EarlyStopping def is_wandb_available(): return False
null
164,240
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from evaluator.training_args import Evaluator_TrainingArguments from evaluator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize, EarlyStopping try: from torch.utils.tensorboard import SummaryWriter _has_tensorboard = True except ImportError: try: from tensorboardX import SummaryWriter _has_tensorboard = True except ImportError: _has_tensorboard = False def set_seed(seed: int): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available
null
164,241
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from evaluator.training_args import Evaluator_TrainingArguments from evaluator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize, EarlyStopping try: from torch.utils.tensorboard import SummaryWriter _has_tensorboard = True except ImportError: try: from tensorboardX import SummaryWriter _has_tensorboard = True except ImportError: _has_tensorboard = False The provided code snippet includes necessary dependencies for implementing the `torch_distributed_zero_first` function. Write a Python function `def torch_distributed_zero_first(local_rank: int)` to solve the following problem: Decorator to make all processes in distributed training wait for each local_master to do something. Here is the function: def torch_distributed_zero_first(local_rank: int): """ Decorator to make all processes in distributed training wait for each local_master to do something. """ if local_rank not in [-1, 0]: torch.distributed.barrier() yield if local_rank == 0: torch.distributed.barrier()
Decorator to make all processes in distributed training wait for each local_master to do something.
164,242
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from evaluator.training_args import Evaluator_TrainingArguments from evaluator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize, EarlyStopping def get_tpu_sampler(dataset: Dataset): if xm.xrt_world_size() <= 1: return RandomSampler(dataset) return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
null
164,243
import torch def pad_and_tensorize_sequence(sequences, padding_value = None, tensorize = False): if tensorize: return torch.tensor(sequences, dtype=torch.long) max_size = max([len(sequence) for sequence in sequences]) padded_sequences = [] for sequence in sequences: padded_sequence = sequence + [padding_value] * (max_size - len(sequence)) padded_sequences.append(padded_sequence) return torch.tensor(padded_sequences, dtype=torch.long)
null
164,244
import json import torch import os from sklearn.metrics import accuracy_score,f1_score,roc_curve,auc,recall_score,precision_score def is_rank_0(): if torch.distributed.is_initialized(): if torch.distributed.get_rank() == 0: return True else: return True return False
null
164,245
import dataclasses import json import logging import os from dataclasses import dataclass, field from typing import Any, Dict, Optional, Tuple from transformers.file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required The provided code snippet includes necessary dependencies for implementing the `default_logdir` function. Write a Python function `def default_logdir() -> str` to solve the following problem: Same default as PyTorch Here is the function: def default_logdir() -> str: """ Same default as PyTorch """ import socket from datetime import datetime current_time = datetime.now().strftime("%b%d_%H-%M-%S") return os.path.join("runs", current_time + "_" + socket.gethostname())
Same default as PyTorch
164,247
from setuptools import setup, find_packages import unittest import codecs def test_suite(): test_loader = unittest.TestLoader() test_suite = test_loader.discover('subword_nmt/tests', pattern='test_*.py') return test_suite
null
164,248
from __future__ import print_function import os import sys import inspect import warnings import argparse import codecs from collections import Counter from io import open argparse.open = open def create_parser(subparsers=None): if subparsers: parser = subparsers.add_parser('get-vocab', formatter_class=argparse.RawDescriptionHelpFormatter, description="Generates vocabulary") else: parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="Generates vocabulary") parser.add_argument( '--input', '-i', type=argparse.FileType('r'), default=sys.stdin, metavar='PATH', help="Input file (default: standard input).") parser.add_argument( '--output', '-o', type=argparse.FileType('w'), default=sys.stdout, metavar='PATH', help="Output file (default: standard output)") return parser
null
164,249
from __future__ import print_function import os import sys import inspect import warnings import argparse import codecs from collections import Counter from io import open def get_vocab(train_file, vocab_file): c = Counter() for line in train_file: for word in line.strip('\r\n ').split(' '): if word: c[word] += 1 for key,f in sorted(c.items(), key=lambda x: x[1], reverse=True): vocab_file.write(key+" "+ str(f) + "\n")
null
164,250
from __future__ import print_function, unicode_literals, division import sys import codecs import io import argparse from collections import defaultdict from io import open argparse.open = open def create_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="learn BPE-based word segmentation") parser.add_argument( '--ref', '-r', type=argparse.FileType('r'), required=True, metavar='PATH', help="Reference file") parser.add_argument( '--hyp', type=argparse.FileType('r'), metavar='PATH', default=sys.stdin, help="Hypothesis file (default: stdin).") parser.add_argument( '--beta', '-b', type=float, default=3, metavar='FLOAT', help="beta parameter (default: '%(default)s')") parser.add_argument( '--ngram', '-n', type=int, default=6, metavar='INT', help="ngram order (default: '%(default)s')") parser.add_argument( '--space', '-s', action='store_true', help="take spaces into account (default: '%(default)s')") parser.add_argument( '--precision', action='store_true', help="report precision (default: '%(default)s')") parser.add_argument( '--recall', action='store_true', help="report recall (default: '%(default)s')") return parser
null
164,251
from __future__ import print_function, unicode_literals, division import sys import codecs import io import argparse from collections import defaultdict from io import open def extract_ngrams(words, max_length=4, spaces=False): if not spaces: words = ''.join(words.split()) else: words = words.strip() results = defaultdict(lambda: defaultdict(int)) for length in range(max_length): for start_pos in range(len(words)): end_pos = start_pos + length + 1 if end_pos <= len(words): results[length][tuple(words[start_pos: end_pos])] += 1 return results
null
164,252
from __future__ import print_function, unicode_literals, division import sys import codecs import io import argparse from collections import defaultdict from io import open def get_correct(ngrams_ref, ngrams_test, correct, total): for rank in ngrams_test: for chain in ngrams_test[rank]: total[rank] += ngrams_test[rank][chain] if chain in ngrams_ref[rank]: correct[rank] += min(ngrams_test[rank][chain], ngrams_ref[rank][chain]) return correct, total
null
164,253
from __future__ import print_function, unicode_literals, division import sys import codecs import io import argparse from collections import defaultdict from io import open def f1(correct, total_hyp, total_ref, max_length, beta=3, smooth=0): precision = 0 recall = 0 for i in range(max_length): if total_hyp[i] + smooth and total_ref[i] + smooth: precision += (correct[i] + smooth) / (total_hyp[i] + smooth) recall += (correct[i] + smooth) / (total_ref[i] + smooth) precision /= max_length recall /= max_length return (1 + beta**2) * (precision*recall) / ((beta**2 * precision) + recall), precision, recall
null
164,254
from __future__ import unicode_literals, division import sys import codecs import argparse from io import open argparse.open = open def create_parser(subparsers=None): if subparsers: parser = subparsers.add_parser('segment-char-ngrams', formatter_class=argparse.RawDescriptionHelpFormatter, description="segment rare words into character n-grams") else: parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="segment rare words into character n-grams") parser.add_argument( '--input', '-i', type=argparse.FileType('r'), default=sys.stdin, metavar='PATH', help="Input file (default: standard input).") parser.add_argument( '--vocab', type=argparse.FileType('r'), metavar='PATH', required=True, help="Vocabulary file.") parser.add_argument( '--shortlist', type=int, metavar='INT', default=0, help="do not segment INT most frequent words in vocabulary (default: '%(default)s')).") parser.add_argument( '-n', type=int, metavar='INT', default=2, help="segment rare words into character n-grams of size INT (default: '%(default)s')).") parser.add_argument( '--output', '-o', type=argparse.FileType('w'), default=sys.stdout, metavar='PATH', help="Output file (default: standard output)") parser.add_argument( '--separator', '-s', type=str, default='@@', metavar='STR', help="Separator between non-final subword units (default: '%(default)s'))") return parser
null
164,255
from __future__ import unicode_literals, division import sys import codecs import argparse from io import open def segment_char_ngrams(args): vocab = [line.split()[0] for line in args.vocab if len(line.split()) == 2] vocab = dict((y,x) for (x,y) in enumerate(vocab)) for line in args.input: for word in line.split(): if word not in vocab or vocab[word] > args.shortlist: i = 0 while i*args.n < len(word): args.output.write(word[i*args.n:i*args.n+args.n]) i += 1 if i*args.n < len(word): args.output.write(args.separator) args.output.write(' ') else: args.output.write(word + ' ') args.output.write('\n')
null
164,258
from __future__ import unicode_literals import os import sys import inspect import codecs import re import copy import argparse import warnings import tempfile from multiprocessing import Pool, cpu_count from collections import defaultdict, Counter from io import open argparse.open = open def create_parser(subparsers=None): if subparsers: parser = subparsers.add_parser('learn-bpe', formatter_class=argparse.RawDescriptionHelpFormatter, description="learn BPE-based word segmentation") else: parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="learn BPE-based word segmentation") parser.add_argument( '--input', '-i', type=argparse.FileType('r'), default=sys.stdin, metavar='PATH', help="Input text (default: standard input).") parser.add_argument( '--output', '-o', type=argparse.FileType('w'), default=sys.stdout, metavar='PATH', help="Output file for BPE codes (default: standard output)") parser.add_argument( '--symbols', '-s', type=int, default=10000, help="Create this many new symbols (each representing a character n-gram) (default: %(default)s)") parser.add_argument( '--min-frequency', type=int, default=2, metavar='FREQ', help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s)') parser.add_argument('--dict-input', action="store_true", help="If set, input file is interpreted as a dictionary where each line contains a word-count pair") parser.add_argument( '--total-symbols', '-t', action="store_true", help="subtract number of characters from the symbols to be generated (so that '--symbols' becomes an estimate for the total number of symbols needed to encode text).") parser.add_argument( '--num-workers', type=int, default=1, help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)") parser.add_argument( '--verbose', '-v', action="store_true", help="verbose mode.") return parser
null
164,259
from __future__ import unicode_literals, division import sys import os import inspect import codecs import io import argparse import re import warnings import random import tempfile from multiprocessing import Pool, cpu_count from io import open def _process_lines(bpe, filename, outfile, dropout, begin, end): if isinstance(outfile, str): fo = open(outfile, "w", encoding="utf-8") else: fo = outfile with open(filename, encoding="utf-8") as f: f.seek(begin) line = f.readline() while line: pos = f.tell() assert 0 <= pos < 1e20, "Bad new line separator, e.g. '\\r'" if end > 0 and pos > end: break fo.write(bpe.process_line(line, dropout)) line = f.readline() if isinstance(outfile, str): fo.close()
null
164,260
from __future__ import unicode_literals, division import sys import os import inspect import codecs import io import argparse import re import warnings import random import tempfile from multiprocessing import Pool, cpu_count from io import open argparse.open = open def create_parser(subparsers=None): if subparsers: parser = subparsers.add_parser('apply-bpe', formatter_class=argparse.RawDescriptionHelpFormatter, description="learn BPE-based word segmentation") else: parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="learn BPE-based word segmentation") parser.add_argument( '--input', '-i', type=argparse.FileType('r'), default=sys.stdin, metavar='PATH', help="Input file (default: standard input).") parser.add_argument( '--codes', '-c', type=argparse.FileType('r'), metavar='PATH', required=True, help="File with BPE codes (created by learn_bpe.py).") parser.add_argument( '--merges', '-m', type=int, default=-1, metavar='INT', help="Use this many BPE operations (<= number of learned symbols)"+ "default: Apply all the learned merge operations") parser.add_argument( '--output', '-o', type=argparse.FileType('w'), default=sys.stdout, metavar='PATH', help="Output file (default: standard output)") parser.add_argument( '--separator', '-s', type=str, default='@@', metavar='STR', help="Separator between non-final subword units (default: '%(default)s'))") parser.add_argument( '--vocabulary', type=argparse.FileType('r'), default=None, metavar="PATH", help="Vocabulary file (built with get_vocab.py). If provided, this script reverts any merge operations that produce an OOV.") parser.add_argument( '--vocabulary-threshold', type=int, default=None, metavar="INT", help="Vocabulary threshold. If vocabulary is provided, any word with frequency < threshold will be treated as OOV") parser.add_argument( '--dropout', type=float, default=0, metavar="P", help="Dropout BPE merge operations with probability P (Provilkov et al., 2019). Use this on training data only.") parser.add_argument( '--glossaries', type=str, nargs='+', default=None, metavar="STR", help="Glossaries. Words matching any of the words/regex provided in glossaries will not be affected "+ "by the BPE (i.e. they will neither be broken into subwords, nor concatenated with other subwords. "+ "Can be provided as a list of words/regex after the --glossaries argument. Enclose each regex in quotes.") parser.add_argument( '--seed', type=int, default=None, metavar="S", help="Random seed for the random number generators (e.g. for BPE dropout with --dropout).") parser.add_argument( '--num-workers', type=int, default=1, help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)") return parser
null
164,261
from __future__ import unicode_literals, division import sys import os import inspect import codecs import io import argparse import re import warnings import random import tempfile from multiprocessing import Pool, cpu_count from io import open def check_vocab_and_split(orig, bpe_codes, vocab, separator): """Check for each segment in word if it is in-vocabulary, and segment OOV segments into smaller units by reversing the BPE merge operations""" out = [] for segment in orig[:-1]: if segment + separator in vocab: out.append(segment) else: #sys.stderr.write('OOV: {0}\n'.format(segment)) for item in recursive_split(segment, bpe_codes, vocab, separator, False): out.append(item) segment = orig[-1] if segment in vocab: out.append(segment) else: #sys.stderr.write('OOV: {0}\n'.format(segment)) for item in recursive_split(segment, bpe_codes, vocab, separator, True): out.append(item) return out The provided code snippet includes necessary dependencies for implementing the `encode` function. Write a Python function `def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries_regex=None, dropout=0)` to solve the following problem: Encode word based on list of BPE merge operations, which are applied consecutively Here is the function: def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries_regex=None, dropout=0): """Encode word based on list of BPE merge operations, which are applied consecutively """ if not dropout and orig in cache: return cache[orig] if glossaries_regex and glossaries_regex.match(orig): cache[orig] = (orig,) return (orig,) if len(orig) == 1: return orig if version == (0, 1): word = list(orig) + ['</w>'] elif version == (0, 2): # more consistent handling of word-final segments word = list(orig[:-1]) + [orig[-1] + '</w>'] else: raise NotImplementedError while len(word) > 1: # get list of symbol pairs; optionally apply dropout pairs = [(bpe_codes[pair],i,pair) for (i,pair) in enumerate(zip(word, word[1:])) if (not dropout or random.random() > dropout) and pair in bpe_codes] if not pairs: break #get first merge operation in list of BPE codes bigram = min(pairs)[2] # find start position of all pairs that we want to merge positions = [i for (rank,i,pair) in pairs if pair == bigram] i = 0 new_word = [] bigram = ''.join(bigram) for j in positions: # merges are invalid if they start before current position. This can happen if there are overlapping pairs: (x x x -> xx x) if j < i: continue new_word.extend(word[i:j]) # all symbols before merged pair new_word.append(bigram) # merged pair i = j+2 # continue after merged pair new_word.extend(word[i:]) # add all symbols until end of word word = new_word # don't print end-of-word symbols if word[-1] == '</w>': word = word[:-1] elif word[-1].endswith('</w>'): word[-1] = word[-1][:-4] word = tuple(word) if vocab: word = check_vocab_and_split(word, bpe_codes_reverse, vocab, separator) cache[orig] = word return word
Encode word based on list of BPE merge operations, which are applied consecutively
164,262
from __future__ import unicode_literals, division import sys import os import inspect import codecs import io import argparse import re import warnings import random import tempfile from multiprocessing import Pool, cpu_count from io import open The provided code snippet includes necessary dependencies for implementing the `read_vocabulary` function. Write a Python function `def read_vocabulary(vocab_file, threshold)` to solve the following problem: read vocabulary file produced by get_vocab.py, and filter according to frequency threshold. Here is the function: def read_vocabulary(vocab_file, threshold): """read vocabulary file produced by get_vocab.py, and filter according to frequency threshold. """ vocabulary = set() for line in vocab_file: word, freq = line.strip('\r\n ').split(' ') freq = int(freq) if threshold == None or freq >= threshold: vocabulary.add(word) return vocabulary
read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.
164,263
from __future__ import unicode_literals, division import sys import os import inspect import codecs import io import argparse import re import warnings import random import tempfile from multiprocessing import Pool, cpu_count from io import open The provided code snippet includes necessary dependencies for implementing the `isolate_glossary` function. Write a Python function `def isolate_glossary(word, glossary)` to solve the following problem: Isolate a glossary present inside a word. Returns a list of subwords. In which all 'glossary' glossaries are isolated For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is: ['1934', 'USA', 'B', 'USA'] Here is the function: def isolate_glossary(word, glossary): """ Isolate a glossary present inside a word. Returns a list of subwords. In which all 'glossary' glossaries are isolated For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is: ['1934', 'USA', 'B', 'USA'] """ # regex equivalent of (if word == glossary or glossary not in word) if re.match('^'+glossary+'$', word) or not re.search(glossary, word): return [word] else: segments = re.split(r'({})'.format(glossary), word) segments, ending = segments[:-1], segments[-1] segments = list(filter(None, segments)) # Remove empty strings in regex group. return segments + [ending.strip('\r\n ')] if ending != '' else segments
Isolate a glossary present inside a word. Returns a list of subwords. In which all 'glossary' glossaries are isolated For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is: ['1934', 'USA', 'B', 'USA']
164,264
from __future__ import unicode_literals import sys import os import inspect import codecs import argparse import tempfile import warnings from collections import Counter from multiprocessing import cpu_count from io import open argparse.open = open def create_parser(subparsers=None): if subparsers: parser = subparsers.add_parser('learn-joint-bpe-and-vocab', formatter_class=argparse.RawDescriptionHelpFormatter, description="learn BPE-based word segmentation") else: parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="learn BPE-based word segmentation") parser.add_argument( '--input', '-i', type=argparse.FileType('r'), required=True, nargs = '+', metavar='PATH', help="Input texts (multiple allowed).") parser.add_argument( '--output', '-o', type=argparse.FileType('w'), required=True, metavar='PATH', help="Output file for BPE codes.") parser.add_argument( '--symbols', '-s', type=int, default=10000, help="Create this many new symbols (each representing a character n-gram) (default: %(default)s)") parser.add_argument( '--separator', type=str, default='@@', metavar='STR', help="Separator between non-final subword units (default: '%(default)s')") parser.add_argument( '--write-vocabulary', type=argparse.FileType('w'), required=True, nargs = '+', default=None, metavar='PATH', dest='vocab', help='Write to these vocabulary files after applying BPE. One per input text. Used for filtering in apply_bpe.py') parser.add_argument( '--min-frequency', type=int, default=2, metavar='FREQ', help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s)') parser.add_argument( '--total-symbols', '-t', action="store_true", help="subtract number of characters from the symbols to be generated (so that '--symbols' becomes an estimate for the total number of symbols needed to encode text).") parser.add_argument( '--num-workers', type=int, default=1, help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)") parser.add_argument( '--verbose', '-v', action="store_true", help="verbose mode.") return parser
null
164,265
from __future__ import unicode_literals import sys import os import inspect import codecs import argparse import tempfile import warnings from collections import Counter from multiprocessing import cpu_count from io import open def learn_bpe(infile, outfile, num_symbols, min_frequency=2, verbose=False, is_dict=False, total_symbols=False, num_workers=1): """Learn num_symbols BPE operations from vocabulary, and write to outfile. """ # version 0.2 changes the handling of the end-of-word token ('</w>'); # version numbering allows bckward compatibility outfile.write('#version: 0.2\n') vocab = get_vocabulary(infile, is_dict, num_workers) vocab = dict([(tuple(x[:-1])+(x[-1]+'</w>',) ,y) for (x,y) in vocab.items()]) sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True) stats, indices = get_pair_statistics(sorted_vocab) big_stats = copy.deepcopy(stats) if total_symbols: uniq_char_internal = set() uniq_char_final = set() for word in vocab: for char in word[:-1]: uniq_char_internal.add(char) uniq_char_final.add(word[-1]) sys.stderr.write('Number of word-internal characters: {0}\n'.format(len(uniq_char_internal))) sys.stderr.write('Number of word-final characters: {0}\n'.format(len(uniq_char_final))) sys.stderr.write('Reducing number of merge operations by {0}\n'.format(len(uniq_char_internal) + len(uniq_char_final))) num_symbols -= len(uniq_char_internal) + len(uniq_char_final) # threshold is inspired by Zipfian assumption, but should only affect speed threshold = max(stats.values()) / 10 for i in range(num_symbols): if stats: most_frequent = max(stats, key=lambda x: (stats[x], x)) # we probably missed the best pair because of pruning; go back to full statistics if not stats or (i and stats[most_frequent] < threshold): prune_stats(stats, big_stats, threshold) stats = copy.deepcopy(big_stats) most_frequent = max(stats, key=lambda x: (stats[x], x)) # threshold is inspired by Zipfian assumption, but should only affect speed threshold = stats[most_frequent] * i/(i+10000.0) prune_stats(stats, big_stats, threshold) if stats[most_frequent] < min_frequency: sys.stderr.write('no pair has frequency >= {0}. Stopping\n'.format(min_frequency)) break if verbose: sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(i, most_frequent[0], most_frequent[1], stats[most_frequent])) outfile.write('{0} {1}\n'.format(*most_frequent)) changes = replace_pair(most_frequent, sorted_vocab, indices) update_pair_statistics(most_frequent, changes, stats, indices) stats[most_frequent] = 0 if not i % 100: prune_stats(stats, big_stats, threshold) def learn_joint_bpe_and_vocab(args): if args.vocab and len(args.input) != len(args.vocab): sys.stderr.write('Error: number of input files and vocabulary files must match\n') sys.exit(1) # read/write files as UTF-8 args.input = [codecs.open(f.name, encoding='UTF-8') for f in args.input] args.vocab = [codecs.open(f.name, 'w', encoding='UTF-8') for f in args.vocab] # get combined vocabulary of all input texts full_vocab = Counter() for f in args.input: full_vocab += learn_bpe.get_vocabulary(f, num_workers=args.num_workers) f.seek(0) vocab_list = ['{0} {1}'.format(key, freq) for (key, freq) in full_vocab.items()] # learn BPE on combined vocabulary with codecs.open(args.output.name, 'w', encoding='UTF-8') as output: learn_bpe.learn_bpe(vocab_list, output, args.symbols, args.min_frequency, args.verbose, is_dict=True, total_symbols=args.total_symbols) with codecs.open(args.output.name, encoding='UTF-8') as codes: bpe = apply_bpe.BPE(codes, separator=args.separator) # apply BPE to each training corpus and get vocabulary for train_file, vocab_file in zip(args.input, args.vocab): tmp = tempfile.NamedTemporaryFile(delete=False) tmp.close() tmpout = codecs.open(tmp.name, 'w', encoding='UTF-8') train_file.seek(0) bpe.process_lines(train_file.name, tmpout, num_workers=args.num_workers) tmpout.close() tmpin = codecs.open(tmp.name, encoding='UTF-8') vocab = learn_bpe.get_vocabulary(tmpin, num_workers=args.num_workers) tmpin.close() os.remove(tmp.name) for key, freq in sorted(vocab.items(), key=lambda x: x[1], reverse=True): vocab_file.write("{0} {1}\n".format(key, freq)) vocab_file.close()
null
164,266
import re import sys import collections for i in range(num_merges): pairs = get_stats(vocab) try: best = max(pairs, key=pairs.get) except ValueError: break if pairs[best] < 2: sys.stderr.write('no pair has frequency > 1. Stopping\n') break vocab = merge_vocab(best, vocab) print(best) def get_stats(vocab): pairs = collections.defaultdict(int) for word, freq in vocab.items(): symbols = word.split() for i in range(len(symbols)-1): pairs[symbols[i],symbols[i+1]] += freq return pairs
null
164,267
import re import sys import collections def merge_vocab(pair, v_in): v_out = {} bigram_pattern = re.escape(' '.join(pair)) p = re.compile(r'(?<!\S)' + bigram_pattern + r'(?!\S)') for word in v_in: w_out = p.sub(''.join(pair), word) v_out[w_out] = v_in[word] return v_out
null
164,276
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from generator.training_args import Generator_TrainingArguments from generator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize def is_tensorboard_available(): return _has_tensorboard
null
164,277
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from generator.training_args import Generator_TrainingArguments from generator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize def is_wandb_available(): return False
null
164,278
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from generator.training_args import Generator_TrainingArguments from generator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize try: from torch.utils.tensorboard import SummaryWriter _has_tensorboard = True except ImportError: try: from tensorboardX import SummaryWriter _has_tensorboard = True except ImportError: _has_tensorboard = False def set_seed(seed: int): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available
null
164,279
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from generator.training_args import Generator_TrainingArguments from generator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize try: from torch.utils.tensorboard import SummaryWriter _has_tensorboard = True except ImportError: try: from tensorboardX import SummaryWriter _has_tensorboard = True except ImportError: _has_tensorboard = False The provided code snippet includes necessary dependencies for implementing the `torch_distributed_zero_first` function. Write a Python function `def torch_distributed_zero_first(local_rank: int)` to solve the following problem: Decorator to make all processes in distributed training wait for each local_master to do something. Here is the function: def torch_distributed_zero_first(local_rank: int): """ Decorator to make all processes in distributed training wait for each local_master to do something. """ if local_rank not in [-1, 0]: torch.distributed.barrier() yield if local_rank == 0: torch.distributed.barrier()
Decorator to make all processes in distributed training wait for each local_master to do something.
164,280
import logging import math import os import random import re import shutil import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler from tqdm.auto import tqdm, trange from transformers.data.data_collator import DataCollator, default_data_collator from transformers.file_utils import is_apex_available, is_torch_tpu_available from transformers.modeling_utils import PreTrainedModel from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutpu from generator.training_args import Generator_TrainingArguments from generator.trainer_utils import EvalPredictionWithSize, PredictionOutputWithSize def get_tpu_sampler(dataset: Dataset): if xm.xrt_world_size() <= 1: return RandomSampler(dataset) return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
null
164,286
import json import torch from bleu import list_bleu import os def is_rank_0(): if torch.distributed.is_initialized(): if torch.distributed.get_rank() == 0: return True else: return True return False
null
164,288
import torch def pad_and_tensorize_sequence(sequences, padding_value): max_size = max([len(sequence) for sequence in sequences]) padded_sequences = [] for sequence in sequences: padded_sequence = sequence + [padding_value] * (max_size - len(sequence)) padded_sequences.append(padded_sequence) return torch.tensor(padded_sequences, dtype=torch.long)
null
164,289
import torch import torch.nn as nn from transformers.modeling_bart import BartForConditionalGeneration from transformers.tokenization_bart import BartTokenizer from generator.keywords.keywords import SKETCH_KEYWORDS, KEYWORDS import logging import os def pad_and_tensorize_sequence(sequences, padding_value = None, tensorize = False): if tensorize: return torch.tensor(sequences, dtype=torch.long) max_size = max([len(sequence) for sequence in sequences]) padded_sequences = [] for sequence in sequences: padded_sequence = sequence + [padding_value] * (max_size - len(sequence)) padded_sequences.append(padded_sequence) return torch.tensor(padded_sequences, dtype=torch.long)
null
164,290
from moz_sql_parser import parse import json import re from mo_future import string_types, text, first, long, is_text import random from sql_formatter.keywords import RESERVED, reserved_keywords, join_keywords, precedence, binary_ops VALID = re.compile(r'^[a-zA-Z_]\w*$') reserved_keywords = [] The provided code snippet includes necessary dependencies for implementing the `should_quote` function. Write a Python function `def should_quote(identifier)` to solve the following problem: Return true if a given identifier should be quoted. This is usually true when the identifier: - is a reserved word - contain spaces - does not match the regex `[a-zA-Z_]\\w*` Here is the function: def should_quote(identifier): """ Return true if a given identifier should be quoted. This is usually true when the identifier: - is a reserved word - contain spaces - does not match the regex `[a-zA-Z_]\\w*` """ return ( identifier != '*' and ( not VALID.match(identifier) or identifier in reserved_keywords))
Return true if a given identifier should be quoted. This is usually true when the identifier: - is a reserved word - contain spaces - does not match the regex `[a-zA-Z_]\\w*`
164,291
from moz_sql_parser import parse import json import re from mo_future import string_types, text, first, long, is_text import random from sql_formatter.keywords import RESERVED, reserved_keywords, join_keywords, precedence, binary_ops def split_field(field): """ RETURN field AS ARRAY OF DOT-SEPARATED FIELDS """ if field == "." or field==None: return [] elif is_text(field) and "." in field: if field.startswith(".."): remainder = field.lstrip(".") back = len(field) - len(remainder) - 1 return [-1]*back + [k.replace("\a", ".") for k in remainder.replace("\\.", "\a").split(".")] else: return [k.replace("\a", ".") for k in field.replace("\\.", "\a").split(".")] else: return [field] def join_field(path): """ RETURN field SEQUENCE AS STRING """ output = ".".join([f.replace(".", "\\.") for f in path if f != None]) return output if output else "." # potent = [f for f in path if f != "."] # if not potent: # return "." # return ".".join([f.replace(".", "\\.") for f in potent]) The provided code snippet includes necessary dependencies for implementing the `escape` function. Write a Python function `def escape(ident, ansi_quotes, should_quote)` to solve the following problem: Escape identifiers. ANSI uses single quotes, but many databases use back quotes. Here is the function: def escape(ident, ansi_quotes, should_quote): """ Escape identifiers. ANSI uses single quotes, but many databases use back quotes. """ def esc(identifier): if not should_quote(identifier): return identifier quote = '"' if ansi_quotes else '`' identifier = identifier.replace(quote, 2*quote) return '{0}{1}{2}'.format(quote, identifier, quote) return join_field(esc(f) for f in split_field(ident))
Escape identifiers. ANSI uses single quotes, but many databases use back quotes.
164,292
from moz_sql_parser import parse import json import re from mo_future import string_types, text, first, long, is_text import random from sql_formatter.keywords import RESERVED, reserved_keywords, join_keywords, precedence, binary_ops binary_ops = { "||": "concat", "*": "mul", "/": "div", "%": "mod", "+": "add", "-": "sub", "&": "binary_and", "|": "binary_or", "<": "lt", "<=": "lte", ">": "gt", ">=": "gte", "=": "eq", "==": "eq", "!=": "neq", "<>": "neq", "not in": "nin", "is not": "neq", "is": "eq", "not like": "nlike", "not between": "not_between", "or": "or", "and": "and", } precedence = { "concat": 1, "mul": 2, "div": 2, "mod": 2, "add": 3, "sub": 3, "binary_and": 4, "binary_or": 4, "gte": 5, "lte": 5, "lt": 5, "gt": 6, "eq": 7, "neq": 7, "between": 8, "not_between": 8, "in": 8, "nin": 8, "is": 8, "like": 8, "nlike": 8, "and": 10, "or": 11, } def Operator(op): prec = precedence[binary_ops[op]] op = ' {0} '.format(op).upper() def func(self, json): acc = [] for v in json: sql = self.dispatch(v) sql = self.process_value(sql) if isinstance(v, (text, int, float, long)): acc.append(sql) continue p = precedence.get(first(v.keys())) if p is None: acc.append(sql) continue if p>=prec: acc.append("(" + sql + ")") else: acc.append(sql) return op.join(acc) return func
null
164,293
import json import sqlite3 import os import random from tqdm import tqdm import json import time import signal import multiprocessing from multiprocessing import Manager alpha = 0.5 beta = 0.5 gamma = 0.6 theta = 0.15 db_dir = "/ai/conceptflow/data/examples/semantic-parsing/text-to-sql/spider/spider/database" sql_dict = {} swap_dict = {} swap_dict["algr_op_dict"] = ['/', '%', '+', '-'] swap_dict["binary_op_dict"] = [ '>', '<', '=', '>=', '<=', '!=', ''] swap_dict["logic_binary_op_dict"] = ['OR', 'AND'] swap_dict["func_dict_upper"] = ['AVG', 'COUNT', 'MAX', 'MIN', 'SUM', ''] swap_dict["between_dict"] = ['BETWEEN', 'NOT BETWEEN'] swap_dict["no_dict"] = ['NOT', ''] swap_dict["no_op_dict"] = ['!', ''] swap_dict["like_dict"] = ['LIKE', 'NOT LIKE'] swap_dict["dist_dict"] =['DISTINCT',''] swap_dict["order_dict"] =['ASC', 'DESC',''] swap_dict["union_dict"] = ['UNION','INTERSECT','EXCEPT','EXISTS', 'NOT EXISTS','IN','NOT IN'] mutate_iter_num = 500 db_schema = {} def execute_sql(c, mutated_sql, return_dict, executable_SQL): try: cursor = c.execute(mutated_sql) if executable_SQL: if list(cursor): return_dict[mutated_sql] = mutated_sql else: return_dict[mutated_sql] = mutated_sql except: pass def mutate_sql(index, data, f_out, time_out): manager = Manager() return_dict = manager.dict() jobs = [] db_id = data['db_id'] raw_sql = data['query'] sql = data['query_toks'] tables = db_schema[db_id] db_path = os.path.join(db_dir, db_id, db_id +'.sqlite') mutated_sqls = [] if raw_sql not in sql_dict: sql_dict[raw_sql] = [] else: return executable_SQL = True conn = sqlite3.connect(db_path, timeout=10.0) c = conn.cursor() try: cursor = c.execute(raw_sql) if not list(cursor): executable_SQL = False except: executable_SQL = False for i in range(mutate_iter_num): mutated_sql = [] for tok_i, tok in enumerate(sql): #print(tok) upper_tok = tok.upper() new_tok = tok if random.random() > alpha: for k, v in swap_dict.items(): if upper_tok in v: swap_tok = random.choice(v) new_tok = swap_tok if swap_tok != tok.upper() else tok if random.random() > beta: for k, v in tables.items(): # if k == 'table_names' and random.random() > omega: # continue if '.' in tok: alias = tok.split('.')[0] col = tok.split('.')[1] if col in v or col.capitalize() in v: col = random.choice(v) new_tok = alias + '.' + col else: if tok in v or tok.capitalize() in v: new_tok = random.choice(v) if random.random() > gamma and new_tok != tok: new_tok = tok + ' , ' + new_tok if tok.isnumeric() and random.random() < theta : tok = max(int(tok) + random.randint(-10,10), 0) new_tok = str(tok) mutated_sql.append(new_tok) mutated_sql = ' '.join(mutated_sql) mutated_sql = mutated_sql.replace(", ~ ", ",").replace(" ~ ,", ",").replace(", ~ ,", ",").replace("~", "").replace('``', '\"').replace("''", '\"') #print('-1', mutated_sql) if mutated_sql == ' '.join(sql): continue p = multiprocessing.Process(target=execute_sql, args=(c, mutated_sql, return_dict, executable_SQL)) jobs.append(p) p.start() # for proc in jobs: # proc.join(time_out) start = time.time() while time.time() - start <= time_out: if not any(p.is_alive() for p in jobs): # All the processes are done, break now. break time.sleep(.1) # Just to avoid hogging the CPU else: # We only enter this if we didn't 'break' above. print("Timeout with processing: {} \n".format(raw_sql)) for p in jobs: p.terminate() p.join() mutated_sqls = return_dict.values() mutated_sqls = list(set(mutated_sqls)) #print(mutated_sqls) sql_dict[raw_sql] = mutated_sqls if len(mutated_sqls) < 5: print("SQL {}: {}".format(index, raw_sql)) print(mutated_sqls) print('Valid Muatation: {}'.format(len(mutated_sqls)), "\n--------------------------------------")
null
164,294
import json import sqlite3 import os import random from tqdm import tqdm import json import time import signal import multiprocessing from multiprocessing import Manager def handler(signum, frame): raise AssertionError
null
164,295
import os import re import json import pickle import random from template_config import * from collections import defaultdict from nltk.stem.porter import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer import nltk def read_in_all_data(data_path=DATA_PATH): training_data = json.load(open(os.path.join(data_path, "train.json"))) dev_data = json.load(open(os.path.join(data_path, "dev.json"))) tables_org = json.load(open(os.path.join(data_path, "tables.json"))) tables = {tab['db_id']: tab for tab in tables_org} return training_data, dev_data, tables
null