code
stringlengths
17
6.64M
def replace_cur_year(query: str) -> str: return re.sub('YEAR\\s*\\(\\s*CURDATE\\s*\\(\\s*\\)\\s*\\)\\s*', '2020', query, flags=re.IGNORECASE)
def exec_db_path_(sqlite_path: str, query: str) -> Tuple[(str, Any)]: query = replace_cur_year(query) cursor = get_cursor_path(sqlite_path) try: cursor.execute(query) result = cursor.fetchall() cursor.close() cursor.connection.close() return ('result', result) except Exception as e: cursor.close() cursor.connection.close() return ('exception', e)
def is_num(s): try: float(s) return True except ValueError: return False
def is_int(s): try: int(s) return True except ValueError: return False
def contain_is_num(column_elements): return {is_num(e) for e in column_elements}
def tab_col_ancestor(tab_col, dep): ancestor = tab_col while (ancestor in dep): ancestor = dep[ancestor] return ancestor
def type_values_w_db(orig_path: str, typed_values: List[Tuple[(Tuple[(str, str)], str)]], loose: bool) -> Dict[(Tuple[(str, str)], List[str])]: (t2cproperties, dep, table_col2column_elements) = get_all_db_info_path(orig_path) new_values = defaultdict(list) for ((lhs_table, lhs_col), value) in typed_values: anchors = [(table, col) for (table, col) in table_col2column_elements.keys() if (((lhs_table is None) or (lhs_table == table)) and (lhs_col == col))] all_possible_ancestors = {tab_col_ancestor(tab_col, dep) for tab_col in anchors} value_is_num = is_num(value) if (len(all_possible_ancestors) > 1): for tab_col in set(all_possible_ancestors): column_elements = table_col2column_elements[tab_col] if ((len(column_elements) > 0) and (value_is_num not in contain_is_num(column_elements))): all_possible_ancestors.remove(tab_col) if ((len(all_possible_ancestors) > 1) and (not value_is_num)): ancestors_w_vals = {a for a in all_possible_ancestors if (value in table_col2column_elements[a])} if (len(ancestors_w_vals) == 0): pass elif loose: extended_ancestors = set(ancestors_w_vals) for a in all_possible_ancestors: if ((a not in ancestors_w_vals) and any(((set(table_col2column_elements[a]) & set(table_col2column_elements[w])) for w in ancestors_w_vals))): extended_ancestors.add(a) all_possible_ancestors = extended_ancestors else: all_possible_ancestors = ancestors_w_vals for t_c in all_possible_ancestors: new_values[t_c].append(value) return new_values
def insert_row(cursor, table_name: str, column_names: List[str], row: Tuple) -> str: assert (len(row) == len(column_names)), 'number of elements per row needs to be the same as number of columns' dummy_args = ' ,'.join((['?'] * len(column_names))) q = 'INSERT INTO {table_name} VALUES ({dummy_args})'.format(table_name=table_name, dummy_args=dummy_args) try: cursor.execute(q, row) return 'success' except Exception as e: print('unable to insert the following') print(q) print(row) return 'fails'
def insert_table(cursor, table_name: str, column2elements: Dict[(str, List)]) -> None: column_names = list(column2elements.keys()) num_rows = len(column2elements[column_names[0]]) one_success = False for row_id in range(num_rows): row = tuple([column2elements[column_name][row_id] for column_name in column_names]) insertion_result = insert_row(cursor, table_name, column_names, row) if (insertion_result == 'success'): one_success = True if (not one_success): print(('no successful insertion for table %s' % table_name))
def write_db_path(orig_path: str, new_db_path: str, table2column2elements: Dict[(str, Dict[(str, List)])], overwrite: bool=False) -> None: if (os.path.exists(new_db_path) and (not overwrite)): print('new database already exists.') return empty_db_path = init_empty_db_from_orig_(orig_path) copyfile(empty_db_path, new_db_path) os.unlink(empty_db_path) cursor = get_cursor_path(new_db_path) (table_name2column_properties, _) = extract_table_column_properties_path(orig_path) for (table_name, column2elements) in table2column2elements.items(): columns = list(column2elements.keys()) orig_columns = list(table_name2column_properties[table_name].keys()) assert (columns == orig_columns), (columns, orig_columns) insert_table(cursor, table_name, column2elements) cursor.connection.commit() cursor.connection.close()
def init_empty_db_from_orig_(sqlite_path: str, verbose: bool=False) -> str: empty_db_path = ((sqlite_path + EMPTY) + str(random.randint(0, 10000000000))) assert (empty_db_path != sqlite_path) copyfile(sqlite_path, empty_db_path) cursor = get_cursor_path(empty_db_path) table_names = get_table_names_path(sqlite_path) for table_name in table_names: cursor.execute((remove_query % table_name)) if verbose: cursor.execute(table_name_query) result = cursor.fetchall() print('Tables created: ') print(result) cursor.connection.commit() cursor.connection.close() return empty_db_path
def subsample_db(orig_path: str, target_path: str, delete_fraction: float=0.5, overwrite: bool=False): if (os.path.exists(target_path) and (not overwrite)): raise Exception(('Path %s exists, do not overwrite.' % target_path)) copyfile(orig_path, target_path) cursor = get_cursor_path(target_path) (table_column_properties, child2parent, _) = get_all_db_info_path(target_path) (_, table_order) = get_process_order(child2parent, table_column_properties) for table in table_order: cursor.execute(('DELETE TOP (%d) PERCENT FROM %s;' % (int((delete_fraction * 100)), table))) cursor.connection.commit() cursor.connection.close()
def delete_entry_from_db(orig_path: str, target_path: str, table_name: str, entry: Dict[(str, Any)]): if (orig_path != target_path): os.system('cp {orig_path} {target_path}'.format(orig_path=orig_path, target_path=target_path)) deletion_query = 'delete from "{table_name}" where '.format(table_name=table_name) for (column_name, val) in entry.items(): deletion_query += '"{column_name}" = "{val}" AND '.format(column_name=column_name, val=val) deletion_query += ';' deletion_query = deletion_query.replace('AND ;', ';') cursor = get_cursor_path(target_path) cursor.execute(deletion_query) cursor.connection.commit() cursor.connection.close()
def load_predictions(f_path: str) -> List[str]: preds = [] with open(f_path, 'r') as in_file: for l in in_file: preds.append(l.strip()) return preds
def acc(l, idxes=None): if (idxes is None): idxes = [_ for _ in range(len(l))] c = 0 for idx in idxes: if l[idx]: c += 1 return (float(c) / len(idxes))
def judge(args: Tuple[(Dict[(str, Any)], str)]) -> bool: (gold_dict, pred) = args testsuite_paths = gold_dict['testsuite'] gold_query = gold_dict['query'] order_matters = ('order by' in gold_query.lower()) pass_all_testcase = True for testcase_path in testsuite_paths: start = time.time() (flg, gold_result) = exec_on_db(testcase_path, gold_query, timeout=GOLD_TIMEOUT) duration = (time.time() - start) timeout = (ADDITIVE_OVERHEAD + (MULTIPLICATIVE_OVERHEAD * duration)) if (flg != 'result'): print('Warning: executing gold query results in an exception') continue (flg, pred_result) = exec_on_db(testcase_path, pred, timeout=int(timeout)) if (flg != 'result'): pass_all_testcase = False break if (not result_eq(gold_result, pred_result, order_matters)): pass_all_testcase = False break return pass_all_testcase
def main(preds: List[str], verbose: bool=True, num_processes: int=NUM_PROCESSES) -> List[bool]: gold_dicts = pkl.load(open('classical_test.pkl', 'rb')) assert (len(gold_dicts) == len(preds)), 'number of gold and prediction should be equal' group_name2idxes = defaultdict(list) for (idx, gold_dict) in enumerate(gold_dicts): group_name2idxes[gold_dict['db_id']].append(idx) with Pool(num_processes) as pool: result = list(tqdm.tqdm(pool.imap(judge, zip(gold_dicts, preds)), total=len(gold_dicts))) if verbose: print('overall accuracy: ', acc(result)) for (group, idxes) in group_name2idxes.items(): print('accuracy for ', group, acc(result, idxes)) return result
def condition_has_or(conds): return ('or' in conds[1::2])
def condition_has_like(conds): return (WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]])
def condition_has_sql(conds): for cond_unit in conds[::2]: (val1, val2) = (cond_unit[3], cond_unit[4]) if ((val1 is not None) and (type(val1) is dict)): return True if ((val2 is not None) and (type(val2) is dict)): return True return False
def val_has_op(val_unit): return (val_unit[0] != UNIT_OPS.index('none'))
def has_agg(unit): return (unit[0] != AGG_OPS.index('none'))
def accuracy(count, total): if (count == total): return 1 return 0
def recall(count, total): if (count == total): return 1 return 0
def F1(acc, rec): if ((acc + rec) == 0): return 0 return (((2.0 * acc) * rec) / (acc + rec))
def get_scores(count, pred_total, label_total): if (pred_total != label_total): return (0, 0, 0) elif (count == pred_total): return (1, 1, 1) return (0, 0, 0)
def eval_sel(pred, label): pred_sel = pred['select'][1] label_sel = label['select'][1] label_wo_agg = [unit[1] for unit in label_sel] pred_total = len(pred_sel) label_total = len(label_sel) cnt = 0 cnt_wo_agg = 0 for unit in pred_sel: if (unit in label_sel): cnt += 1 label_sel.remove(unit) if (unit[1] in label_wo_agg): cnt_wo_agg += 1 label_wo_agg.remove(unit[1]) return (label_total, pred_total, cnt, cnt_wo_agg)
def eval_where(pred, label): pred_conds = [unit for unit in pred['where'][::2]] label_conds = [unit for unit in label['where'][::2]] label_wo_agg = [unit[2] for unit in label_conds] pred_total = len(pred_conds) label_total = len(label_conds) cnt = 0 cnt_wo_agg = 0 for unit in pred_conds: if (unit in label_conds): cnt += 1 label_conds.remove(unit) if (unit[2] in label_wo_agg): cnt_wo_agg += 1 label_wo_agg.remove(unit[2]) return (label_total, pred_total, cnt, cnt_wo_agg)
def eval_group(pred, label): pred_cols = [unit[1] for unit in pred['groupBy']] label_cols = [unit[1] for unit in label['groupBy']] pred_total = len(pred_cols) label_total = len(label_cols) cnt = 0 pred_cols = [(pred.split('.')[1] if ('.' in pred) else pred) for pred in pred_cols] label_cols = [(label.split('.')[1] if ('.' in label) else label) for label in label_cols] for col in pred_cols: if (col in label_cols): cnt += 1 label_cols.remove(col) return (label_total, pred_total, cnt)
def eval_having(pred, label): pred_total = label_total = cnt = 0 if (len(pred['groupBy']) > 0): pred_total = 1 if (len(label['groupBy']) > 0): label_total = 1 pred_cols = [unit[1] for unit in pred['groupBy']] label_cols = [unit[1] for unit in label['groupBy']] if ((pred_total == label_total == 1) and (pred_cols == label_cols) and (pred['having'] == label['having'])): cnt = 1 return (label_total, pred_total, cnt)
def eval_order(pred, label): pred_total = label_total = cnt = 0 if (len(pred['orderBy']) > 0): pred_total = 1 if (len(label['orderBy']) > 0): label_total = 1 if ((len(label['orderBy']) > 0) and (pred['orderBy'] == label['orderBy']) and (((pred['limit'] is None) and (label['limit'] is None)) or ((pred['limit'] is not None) and (label['limit'] is not None)))): cnt = 1 return (label_total, pred_total, cnt)
def eval_and_or(pred, label): pred_ao = pred['where'][1::2] label_ao = label['where'][1::2] pred_ao = set(pred_ao) label_ao = set(label_ao) if (pred_ao == label_ao): return (1, 1, 1) return (len(pred_ao), len(label_ao), 0)
def get_nestedSQL(sql): nested = [] for cond_unit in ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]): if (type(cond_unit[3]) is dict): nested.append(cond_unit[3]) if (type(cond_unit[4]) is dict): nested.append(cond_unit[4]) if (sql['intersect'] is not None): nested.append(sql['intersect']) if (sql['except'] is not None): nested.append(sql['except']) if (sql['union'] is not None): nested.append(sql['union']) return nested
def eval_nested(pred, label): label_total = 0 pred_total = 0 cnt = 0 if (pred is not None): pred_total += 1 if (label is not None): label_total += 1 if ((pred is not None) and (label is not None)): cnt += Evaluator().eval_exact_match(pred, label) return (label_total, pred_total, cnt)
def eval_IUEN(pred, label): (lt1, pt1, cnt1) = eval_nested(pred['intersect'], label['intersect']) (lt2, pt2, cnt2) = eval_nested(pred['except'], label['except']) (lt3, pt3, cnt3) = eval_nested(pred['union'], label['union']) label_total = ((lt1 + lt2) + lt3) pred_total = ((pt1 + pt2) + pt3) cnt = ((cnt1 + cnt2) + cnt3) return (label_total, pred_total, cnt)
def get_keywords(sql): res = set() if (len(sql['where']) > 0): res.add('where') if (len(sql['groupBy']) > 0): res.add('group') if (len(sql['having']) > 0): res.add('having') if (len(sql['orderBy']) > 0): res.add(sql['orderBy'][0]) res.add('order') if (sql['limit'] is not None): res.add('limit') if (sql['except'] is not None): res.add('except') if (sql['union'] is not None): res.add('union') if (sql['intersect'] is not None): res.add('intersect') ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2]) if (len([token for token in ao if (token == 'or')]) > 0): res.add('or') cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]) if (len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0): res.add('not') if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('in'))]) > 0): res.add('in') if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))]) > 0): res.add('like') return res
def eval_keywords(pred, label): pred_keywords = get_keywords(pred) label_keywords = get_keywords(label) pred_total = len(pred_keywords) label_total = len(label_keywords) cnt = 0 for k in pred_keywords: if (k in label_keywords): cnt += 1 return (label_total, pred_total, cnt)
def count_agg(units): return len([unit for unit in units if has_agg(unit)])
def count_component1(sql): count = 0 if (len(sql['where']) > 0): count += 1 if (len(sql['groupBy']) > 0): count += 1 if (len(sql['orderBy']) > 0): count += 1 if (sql['limit'] is not None): count += 1 if (len(sql['from']['table_units']) > 0): count += (len(sql['from']['table_units']) - 1) ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2]) count += len([token for token in ao if (token == 'or')]) cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]) count += len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))]) return count
def count_component2(sql): nested = get_nestedSQL(sql) return len(nested)
def count_others(sql): count = 0 agg_count = count_agg(sql['select'][1]) agg_count += count_agg(sql['where'][::2]) agg_count += count_agg(sql['groupBy']) if (len(sql['orderBy']) > 0): agg_count += count_agg(([unit[1] for unit in sql['orderBy'][1] if unit[1]] + [unit[2] for unit in sql['orderBy'][1] if unit[2]])) agg_count += count_agg(sql['having']) if (agg_count > 1): count += 1 if (len(sql['select'][1]) > 1): count += 1 if (len(sql['where']) > 1): count += 1 if (len(sql['groupBy']) > 1): count += 1 return count
class Evaluator(): 'A simple evaluator' def __init__(self): self.partial_scores = None def eval_hardness(self, sql): count_comp1_ = count_component1(sql) count_comp2_ = count_component2(sql) count_others_ = count_others(sql) if ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ == 0)): return 'easy' elif (((count_others_ <= 2) and (count_comp1_ <= 1) and (count_comp2_ == 0)) or ((count_comp1_ <= 2) and (count_others_ < 2) and (count_comp2_ == 0))): return 'medium' elif (((count_others_ > 2) and (count_comp1_ <= 2) and (count_comp2_ == 0)) or ((2 < count_comp1_ <= 3) and (count_others_ <= 2) and (count_comp2_ == 0)) or ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ <= 1))): return 'hard' else: return 'extra' def eval_exact_match(self, pred, label): partial_scores = self.eval_partial_match(pred, label) self.partial_scores = partial_scores for (key, score) in partial_scores.items(): if (score['f1'] != 1): return 0 if (len(label['from']['table_units']) > 0): label_tables = sorted(label['from']['table_units']) pred_tables = sorted(pred['from']['table_units']) return (label_tables == pred_tables) return 1 def eval_partial_match(self, pred, label): res = {} (label_total, pred_total, cnt, cnt_wo_agg) = eval_sel(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['select'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total) res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt, cnt_wo_agg) = eval_where(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['where'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total) res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_group(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_having(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['group'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_order(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['order'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_and_or(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_IUEN(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_keywords(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} return res
def isValidSQL(sql, db): conn = sqlite3.connect(db) cursor = conn.cursor() try: cursor.execute(sql) except: return False return True
def print_formated_s(row_name, l, element_format): template = ('{:20} ' + ' '.join(([element_format] * len(l)))) print(template.format(row_name, *l))
def print_scores(scores, etype, include_turn_acc=True): turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn > 4'] levels = ['easy', 'medium', 'hard', 'extra', 'all'] if include_turn_acc: levels.append('joint_all') partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] print_formated_s('', levels, '{:20}') counts = [scores[level]['count'] for level in levels] print_formated_s('count', counts, '{:<20d}') if (etype in ['all', 'exec']): print('===================== EXECUTION ACCURACY =====================') exec_scores = [scores[level]['exec'] for level in levels] print_formated_s('execution', exec_scores, '{:<20.3f}') if (etype in ['all', 'match']): print('\n====================== EXACT MATCHING ACCURACY =====================') exact_scores = [scores[level]['exact'] for level in levels] print_formated_s('exact match', exact_scores, '{:<20.3f}') print('\n---------------------PARTIAL MATCHING ACCURACY----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['acc'] for level in levels] print_formated_s(type_, this_scores, '{:<20.3f}') print('---------------------- PARTIAL MATCHING RECALL ----------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['rec'] for level in levels] print_formated_s(type_, this_scores, '{:<20.3f}') print('---------------------- PARTIAL MATCHING F1 --------------------------') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['f1'] for level in levels] print_formated_s(type_, this_scores, '{:<20.3f}') if include_turn_acc: print() print() print_formated_s('', turns, '{:20}') counts = [scores[turn]['count'] for turn in turns] print_formated_s('count', counts, '{:<20d}') if (etype in ['all', 'exec']): print('===================== TURN EXECUTION ACCURACY =====================') exec_scores = [scores[turn]['exec'] for turn in turns] print_formated_s('execution', exec_scores, '{:<20.3f}') if (etype in ['all', 'match']): print('\n====================== TURN EXACT MATCHING ACCURACY =====================') exact_scores = [scores[turn]['exact'] for turn in turns] print_formated_s('exact match', exact_scores, '{:<20.3f}')
def evaluate(gold, predict, db_dir, etype, kmaps, plug_value, keep_distinct, progress_bar_for_each_datapoint): with open(gold) as f: glist = [] gseq_one = [] for l in f.readlines(): if (len(l.strip()) == 0): glist.append(gseq_one) gseq_one = [] else: lstrip = l.strip().split('\t') gseq_one.append(lstrip) if (len(gseq_one) != 0): glist.append(gseq_one) include_turn_acc = (len(glist) > 1) with open(predict) as f: plist = [] pseq_one = [] for l in f.readlines(): if (len(l.strip()) == 0): plist.append(pseq_one) pseq_one = [] else: pseq_one.append(l.strip().split('\t')) if (len(pseq_one) != 0): plist.append(pseq_one) assert (len(plist) == len(glist)), 'number of sessions must equal' evaluator = Evaluator() turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn > 4'] levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] entries = [] scores = {} for turn in turns: scores[turn] = {'count': 0, 'exact': 0.0} scores[turn]['exec'] = 0 for level in levels: scores[level] = {'count': 0, 'partial': {}, 'exact': 0.0} scores[level]['exec'] = 0 for type_ in partial_types: scores[level]['partial'][type_] = {'acc': 0.0, 'rec': 0.0, 'f1': 0.0, 'acc_count': 0, 'rec_count': 0} for (i, (p, g)) in enumerate(zip(plist, glist)): if (((i + 1) % 10) == 0): print(('Evaluating %dth prediction' % (i + 1))) scores['joint_all']['count'] += 1 turn_scores = {'exec': [], 'exact': []} for (idx, pg) in enumerate(zip(p, g)): (p, g) = pg p_str = p[0] p_str = p_str.replace('value', '1') (g_str, db) = g db_name = db db = os.path.join(db_dir, db, (db + '.sqlite')) schema = Schema(get_schema(db)) g_sql = get_sql(schema, g_str) hardness = evaluator.eval_hardness(g_sql) if (idx > 3): idx = '> 4' else: idx += 1 turn_id = ('turn ' + str(idx)) scores[turn_id]['count'] += 1 scores[hardness]['count'] += 1 scores['all']['count'] += 1 try: p_sql = get_sql(schema, p_str) except: p_sql = {'except': None, 'from': {'conds': [], 'table_units': []}, 'groupBy': [], 'having': [], 'intersect': None, 'limit': None, 'orderBy': [], 'select': [False, []], 'union': None, 'where': []} if (etype in ['all', 'exec']): exec_score = eval_exec_match(db=db, p_str=p_str, g_str=g_str, plug_value=plug_value, keep_distinct=keep_distinct, progress_bar_for_each_datapoint=progress_bar_for_each_datapoint) if exec_score: scores[hardness]['exec'] += 1 scores[turn_id]['exec'] += 1 scores['all']['exec'] += 1 turn_scores['exec'].append(1) else: turn_scores['exec'].append(0) if (etype in ['all', 'match']): kmap = kmaps[db_name] g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema) g_sql = rebuild_sql_val(g_sql) g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap) p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema) p_sql = rebuild_sql_val(p_sql) p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap) exact_score = evaluator.eval_exact_match(p_sql, g_sql) partial_scores = evaluator.partial_scores if (exact_score == 0): turn_scores['exact'].append(0) print('{} pred: {}'.format(hardness, p_str)) print('{} gold: {}'.format(hardness, g_str)) print('') else: turn_scores['exact'].append(1) scores[turn_id]['exact'] += exact_score scores[hardness]['exact'] += exact_score scores['all']['exact'] += exact_score for type_ in partial_types: if (partial_scores[type_]['pred_total'] > 0): scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores[hardness]['partial'][type_]['acc_count'] += 1 if (partial_scores[type_]['label_total'] > 0): scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores[hardness]['partial'][type_]['rec_count'] += 1 scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1'] if (partial_scores[type_]['pred_total'] > 0): scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc'] scores['all']['partial'][type_]['acc_count'] += 1 if (partial_scores[type_]['label_total'] > 0): scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec'] scores['all']['partial'][type_]['rec_count'] += 1 scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1'] entries.append({'predictSQL': p_str, 'goldSQL': g_str, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores}) if all(((v == 1) for v in turn_scores['exec'])): scores['joint_all']['exec'] += 1 if all(((v == 1) for v in turn_scores['exact'])): scores['joint_all']['exact'] += 1 for turn in turns: if (scores[turn]['count'] == 0): continue if (etype in ['all', 'exec']): scores[turn]['exec'] /= scores[turn]['count'] if (etype in ['all', 'match']): scores[turn]['exact'] /= scores[turn]['count'] for level in levels: if (scores[level]['count'] == 0): continue if (etype in ['all', 'exec']): scores[level]['exec'] /= scores[level]['count'] if (etype in ['all', 'match']): scores[level]['exact'] /= scores[level]['count'] for type_ in partial_types: if (scores[level]['partial'][type_]['acc_count'] == 0): scores[level]['partial'][type_]['acc'] = 0 else: scores[level]['partial'][type_]['acc'] = ((scores[level]['partial'][type_]['acc'] / scores[level]['partial'][type_]['acc_count']) * 1.0) if (scores[level]['partial'][type_]['rec_count'] == 0): scores[level]['partial'][type_]['rec'] = 0 else: scores[level]['partial'][type_]['rec'] = ((scores[level]['partial'][type_]['rec'] / scores[level]['partial'][type_]['rec_count']) * 1.0) if ((scores[level]['partial'][type_]['acc'] == 0) and (scores[level]['partial'][type_]['rec'] == 0)): scores[level]['partial'][type_]['f1'] = 1 else: scores[level]['partial'][type_]['f1'] = (((2.0 * scores[level]['partial'][type_]['acc']) * scores[level]['partial'][type_]['rec']) / (scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])) print_scores(scores, etype, include_turn_acc=include_turn_acc)
def rebuild_cond_unit_val(cond_unit): if ((cond_unit is None) or (not DISABLE_VALUE)): return cond_unit (not_op, op_id, val_unit, val1, val2) = cond_unit if (type(val1) is not dict): val1 = None else: val1 = rebuild_sql_val(val1) if (type(val2) is not dict): val2 = None else: val2 = rebuild_sql_val(val2) return (not_op, op_id, val_unit, val1, val2)
def rebuild_condition_val(condition): if ((condition is None) or (not DISABLE_VALUE)): return condition res = [] for (idx, it) in enumerate(condition): if ((idx % 2) == 0): res.append(rebuild_cond_unit_val(it)) else: res.append(it) return res
def rebuild_sql_val(sql): if ((sql is None) or (not DISABLE_VALUE)): return sql sql['from']['conds'] = rebuild_condition_val(sql['from']['conds']) sql['having'] = rebuild_condition_val(sql['having']) sql['where'] = rebuild_condition_val(sql['where']) sql['intersect'] = rebuild_sql_val(sql['intersect']) sql['except'] = rebuild_sql_val(sql['except']) sql['union'] = rebuild_sql_val(sql['union']) return sql
def build_valid_col_units(table_units, schema): col_ids = [table_unit[1] for table_unit in table_units if (table_unit[0] == TABLE_TYPE['table_unit'])] prefixs = [col_id[:(- 2)] for col_id in col_ids] valid_col_units = [] for value in schema.idMap.values(): if (('.' in value) and (value[:value.index('.')] in prefixs)): valid_col_units.append(value) return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap): if (col_unit is None): return col_unit (agg_id, col_id, distinct) = col_unit if ((col_id in kmap) and (col_id in valid_col_units)): col_id = kmap[col_id] if DISABLE_DISTINCT: distinct = None return (agg_id, col_id, distinct)
def rebuild_val_unit_col(valid_col_units, val_unit, kmap): if (val_unit is None): return val_unit (unit_op, col_unit1, col_unit2) = val_unit col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap) col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap) return (unit_op, col_unit1, col_unit2)
def rebuild_table_unit_col(valid_col_units, table_unit, kmap): if (table_unit is None): return table_unit (table_type, col_unit_or_sql) = table_unit if isinstance(col_unit_or_sql, tuple): col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap) return (table_type, col_unit_or_sql)
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap): if (cond_unit is None): return cond_unit (not_op, op_id, val_unit, val1, val2) = cond_unit val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap) return (not_op, op_id, val_unit, val1, val2)
def rebuild_condition_col(valid_col_units, condition, kmap): for idx in range(len(condition)): if ((idx % 2) == 0): condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap) return condition
def rebuild_select_col(valid_col_units, sel, kmap): if (sel is None): return sel (distinct, _list) = sel new_list = [] for it in _list: (agg_id, val_unit) = it new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap))) if DISABLE_DISTINCT: distinct = None return (distinct, new_list)
def rebuild_from_col(valid_col_units, from_, kmap): if (from_ is None): return from_ from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']] from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap) return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap): if (group_by is None): return group_by return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
def rebuild_order_by_col(valid_col_units, order_by, kmap): if ((order_by is None) or (len(order_by) == 0)): return order_by (direction, val_units) = order_by new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units] return (direction, new_val_units)
def rebuild_sql_col(valid_col_units, sql, kmap): if (sql is None): return sql sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap) sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap) sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap) sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap) sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap) sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap) sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap) sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap) sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap) return sql
def build_foreign_key_map(entry): cols_orig = entry['column_names_original'] tables_orig = entry['table_names_original'] cols = [] for col_orig in cols_orig: if (col_orig[0] >= 0): t = tables_orig[col_orig[0]] c = col_orig[1] cols.append((((('__' + t.lower()) + '.') + c.lower()) + '__')) else: cols.append('__all__') def keyset_in_list(k1, k2, k_list): for k_set in k_list: if ((k1 in k_set) or (k2 in k_set)): return k_set new_k_set = set() k_list.append(new_k_set) return new_k_set foreign_key_list = [] foreign_keys = entry['foreign_keys'] for fkey in foreign_keys: (key1, key2) = fkey key_set = keyset_in_list(key1, key2, foreign_key_list) key_set.add(key1) key_set.add(key2) foreign_key_map = {} for key_set in foreign_key_list: sorted_list = sorted(list(key_set)) midx = sorted_list[0] for idx in sorted_list: foreign_key_map[cols[idx]] = cols[midx] return foreign_key_map
def build_foreign_key_map_from_json(table): with open(table) as f: data = json.load(f) tables = {} for entry in data: tables[entry['db_id']] = build_foreign_key_map(entry) return tables
def permute_tuple(element: Tuple, perm: Tuple) -> Tuple: assert (len(element) == len(perm)) return tuple([element[i] for i in perm])
def unorder_row(row: Tuple) -> Tuple: return tuple(sorted(row, key=(lambda x: (str(x) + str(type(x))))))
def quick_rej(result1: List[Tuple], result2: List[Tuple], order_matters: bool) -> bool: s1 = [unorder_row(row) for row in result1] s2 = [unorder_row(row) for row in result2] if order_matters: return (s1 == s2) else: return (set(s1) == set(s2))
def multiset_eq(l1: List, l2: List) -> bool: if (len(l1) != len(l2)): return False d = defaultdict(int) for e in l1: d[e] = (d[e] + 1) for e in l2: d[e] = (d[e] - 1) if (d[e] < 0): return False return True
def get_constraint_permutation(tab1_sets_by_columns: List[Set], result2: List[Tuple]): num_cols = len(result2[0]) perm_constraints = [{i for i in range(num_cols)} for _ in range(num_cols)] if (num_cols <= 3): return product(*perm_constraints) for _ in range(20): random_tab2_row = random.choice(result2) for tab1_col in range(num_cols): for tab2_col in set(perm_constraints[tab1_col]): if (random_tab2_row[tab2_col] not in tab1_sets_by_columns[tab1_col]): perm_constraints[tab1_col].remove(tab2_col) return product(*perm_constraints)
def result_eq(result1: List[Tuple], result2: List[Tuple], order_matters: bool) -> bool: if ((len(result1) == 0) and (len(result2) == 0)): return True if (len(result1) != len(result2)): return False num_cols = len(result1[0]) if (len(result2[0]) != num_cols): return False if (not quick_rej(result1, result2, order_matters)): return False tab1_sets_by_columns = [{row[i] for row in result1} for i in range(num_cols)] for perm in get_constraint_permutation(tab1_sets_by_columns, result2): if (len(perm) != len(set(perm))): continue if (num_cols == 1): result2_perm = result2 else: result2_perm = [permute_tuple(element, perm) for element in result2] if order_matters: if (result1 == result2_perm): return True elif ((set(result1) == set(result2_perm)) and multiset_eq(result1, result2_perm)): return True return False
def clean_tmp_f(f_prefix: str): with threadLock: for suffix in ('.in', '.out'): f_path = (f_prefix + suffix) if os.path.exists(f_path): os.unlink(f_path)
def exec_on_db(sqlite_path: str, query: str, process_id: str='', timeout: int=TIMEOUT) -> Tuple[(str, Any)]: f_prefix = None with threadLock: while ((f_prefix is None) or os.path.exists((f_prefix + '.in'))): process_id += str(time.time()) process_id += str(random.randint(0, 10000000000)) f_prefix = os.path.join(EXEC_TMP_DIR, process_id) pkl.dump((sqlite_path, query), open((f_prefix + '.in'), 'wb')) try: subprocess.call(['python3', 'exec_subprocess.py', f_prefix], timeout=timeout, stderr=open('runerr.log', 'a')) except Exception as e: print(e) clean_tmp_f(f_prefix) return ('exception', e) result_path = (f_prefix + '.out') returned_val = ('exception', TimeoutError) try: if os.path.exists(result_path): returned_val = pkl.load(open(result_path, 'rb')) except: pass clean_tmp_f(f_prefix) return returned_val
def postprocess(query: str) -> str: query = query.replace('> =', '>=').replace('< =', '<=').replace('! =', '!=') return query
def eval_exec_match(db: str, p_str: str, g_str: str, plug_value: bool, keep_distinct: bool, progress_bar_for_each_datapoint: bool) -> int: (p_str, g_str) = (postprocess(p_str), postprocess(g_str)) if (not keep_distinct): p_str = remove_distinct(p_str) g_str = remove_distinct(g_str) order_matters = ('order by' in g_str.lower()) db_dir = os.path.dirname(db) db_paths = [os.path.join(db_dir, basename) for basename in os.listdir(db_dir) if ('.sqlite' in basename)] preds = [p_str] if plug_value: (_, preds) = get_all_preds_for_execution(g_str, p_str) preds = chain([p_str], preds) for pred in preds: pred_passes = 1 if progress_bar_for_each_datapoint: ranger = tqdm.tqdm(db_paths) else: ranger = db_paths for db_path in ranger: (g_flag, g_denotation) = exec_on_db(db_path, g_str) (p_flag, p_denotation) = exec_on_db(db_path, pred) assert (g_flag != 'exception'), ('gold query %s has error on database file %s' % (g_str, db_path)) if (p_flag == 'exception'): pred_passes = 0 elif (not result_eq(g_denotation, p_denotation, order_matters=order_matters)): pred_passes = 0 if (pred_passes == 0): break if (pred_passes == 1): return 1 return 0
def replace_cur_year(query: str) -> str: return re.sub('YEAR\\s*\\(\\s*CURDATE\\s*\\(\\s*\\)\\s*\\)\\s*', '2020', query, flags=re.IGNORECASE)
def get_cursor_from_path(sqlite_path: str): try: if (not os.path.exists(sqlite_path)): print(('Openning a new connection %s' % sqlite_path)) connection = sqlite3.connect(sqlite_path) except Exception as e: print(sqlite_path) raise e connection.text_factory = (lambda b: b.decode(errors='ignore')) cursor = connection.cursor() return cursor
def exec_on_db_(sqlite_path: str, query: str) -> Tuple[(str, Any)]: query = replace_cur_year(query) cursor = get_cursor_from_path(sqlite_path) try: cursor.execute(query) result = cursor.fetchall() cursor.close() cursor.connection.close() return ('result', result) except Exception as e: cursor.close() cursor.connection.close() return ('exception', e)
def tokenize(query: str) -> List[Token]: tokens = list([Token(t.ttype, t.value) for t in sqlparse.parse(query)[0].flatten()]) return tokens
def join_tokens(tokens: List[Token]) -> str: return ''.join([x.value for x in tokens]).strip().replace(' ', ' ')
def round_trip_test(query: str) -> None: tokens = tokenize(query) reconstructed = ''.join([token.value for token in tokens]) assert (query == reconstructed), ('Round trip test fails for string %s' % query)
def postprocess(query: str) -> str: query = query.replace('> =', '>=').replace('< =', '<=').replace('! =', '!=') return query
def strip_query(query: str) -> Tuple[(List[str], List[str])]: (query_keywords, all_values) = ([], []) '\n str_1 = re.findall(""[^"]*"", query)\n str_2 = re.findall("\'[^\']*\'", query)\n values = str_1 + str_2\n ' toks = sqlparse.parse(query)[0].flatten() values = [t.value for t in toks if ((t.ttype == sqlparse.tokens.Literal.String.Single) or (t.ttype == sqlparse.tokens.Literal.String.Symbol))] for val in values: all_values.append(val) query = query.replace(val.strip(), VALUE_NUM_SYMBOL) query_tokenized = query.split() float_nums = re.findall('[-+]?\\d*\\.\\d+', query) all_values += [qt for qt in query_tokenized if (qt in float_nums)] query_tokenized = [(VALUE_NUM_SYMBOL if (qt in float_nums) else qt) for qt in query_tokenized] query = ' '.join(query_tokenized) int_nums = [i.strip() for i in re.findall('[^tT]\\d+', query)] all_values += [qt for qt in query_tokenized if (qt in int_nums)] query_tokenized = [(VALUE_NUM_SYMBOL if (qt in int_nums) else qt) for qt in query_tokenized] for tok in query_tokenized: if ('.' in tok): table = re.findall('[Tt]\\d+\\.', tok) if (len(table) > 0): to = tok.replace('.', ' . ').split() to = [t.lower() for t in to if (len(t) > 0)] query_keywords.extend(to) else: query_keywords.append(tok.lower()) elif (len(tok) > 0): query_keywords.append(tok.lower()) return (query_keywords, all_values)
def reformat_query(query: str) -> str: query = query.strip().replace(';', '').replace('\t', '') query = ' '.join([t.value for t in tokenize(query) if (t.ttype != sqlparse.tokens.Whitespace)]) t_stars = ['t1.*', 't2.*', 't3.*', 'T1.*', 'T2.*', 'T3.*'] for ts in t_stars: query = query.replace(ts, '*') return query
def replace_values(sql: str) -> Tuple[(List[str], Set[str])]: sql = sqlparse.format(sql, reindent=False, keyword_case='upper') sql = re.sub('(T\\d+\\.)\\s', '\\1', sql) (query_toks_no_value, values) = strip_query(sql) return (query_toks_no_value, set(values))
def extract_query_values(sql: str) -> Tuple[(List[str], Set[str])]: reformated = reformat_query(query=sql) (query_value_replaced, values) = replace_values(reformated) return (query_value_replaced, values)
def plugin(query_value_replaced: List[str], values_in_order: List[str]) -> str: q_length = len(query_value_replaced) query_w_values = query_value_replaced[:] value_idx = [idx for idx in range(q_length) if (query_value_replaced[idx] == VALUE_NUM_SYMBOL.lower())] assert (len(value_idx) == len(values_in_order)) for (idx, value) in zip(value_idx, values_in_order): query_w_values[idx] = value return ' '.join(query_w_values)
def plugin_all_permutations(query_value_replaced: List[str], values: Set[str]) -> Iterator[str]: num_slots = len([v for v in query_value_replaced if (v == VALUE_NUM_SYMBOL.lower())]) for values in itertools.product(*[list(values) for _ in range(num_slots)]): (yield plugin(query_value_replaced, list(values)))
def get_all_preds_for_execution(gold: str, pred: str) -> Tuple[(int, Iterator[str])]: (_, gold_values) = extract_query_values(gold) (pred_query_value_replaced, _) = extract_query_values(pred) num_slots = len([v for v in pred_query_value_replaced if (v == VALUE_NUM_SYMBOL.lower())]) num_alternatives = (len(gold_values) ** num_slots) return (num_alternatives, plugin_all_permutations(pred_query_value_replaced, gold_values))
def remove_distinct(s): toks = [t.value for t in list(sqlparse.parse(s)[0].flatten())] return ''.join([t for t in toks if (t.lower() != 'distinct')])
def extract_all_comparison_from_node(node: Token) -> List[Comparison]: comparison_list = [] if hasattr(node, 'tokens'): for t in node.tokens: comparison_list.extend(extract_all_comparison_from_node(t)) if (type(node) == Comparison): comparison_list.append(node) return comparison_list
def extract_all_comparison(query: str) -> List[Comparison]: tree = sqlparse.parse(query)[0] comparison_list = extract_all_comparison_from_node(tree) return comparison_list
def extract_toks_from_comparison(comparison_node: Comparison) -> List[Token]: tokens = [t for t in comparison_node.tokens if (t.ttype != Whitespace)] return tokens
def extract_info_from_comparison(comparison_node: Comparison) -> Dict[(str, Any)]: tokens = extract_toks_from_comparison(comparison_node) (left, op, right) = tokens returned_dict = {'left': left, 'op': op.value, 'right': right} if (type(left) != Identifier): return returned_dict table = None if ((len(left.tokens) == 3) and (re.match('^[tT][0-9]$', left.tokens[0].value) is None)): table = left.tokens[0].value.lower() col = left.tokens[(- 1)].value if (type(right) == Identifier): if ((len(right.tokens) == 1) and (type(right.tokens[0]) == sqlparse.sql.Token)): right_val = right.tokens[0].value else: return returned_dict elif (type(right) == sqlparse.sql.Token): right_val = right.value else: return returned_dict (returned_dict['table_col'], returned_dict['val']) = ((table, col.upper()), process_str_value(right_val)) return returned_dict
def extract_all_comparison_from_query(query: str) -> List[Dict[(str, Any)]]: comparison_list = extract_all_comparison(query) return [extract_info_from_comparison(c) for c in comparison_list]
def extract_typed_value_in_comparison_from_query(query: str) -> List[Tuple[(Tuple[(Union[(str, None)], str)], str)]]: cmps = extract_all_comparison_from_query(query) typed_values = [(cmp['table_col'], cmp['val']) for cmp in cmps if ('table_col' in cmp)] for (table, col, val1, val2) in re.findall('(?:([^\\.\\s]*)\\.)?([^\\.\\s]+) between ([^\\s;]+) and ([^\\s;]+)', query, re.IGNORECASE): if (table == ''): table = None else: table = table.lower() col = col.upper() for v in [val1, val2]: typed_values.append(((table, col), v)) return typed_values
def process_str_value(v: str) -> str: if ((len(v) > 0) and (v[0] in QUOTE_CHARS)): v = v[1:] if ((len(v) > 0) and (v[(- 1)] in QUOTE_CHARS)): v = v[:(- 1)] for c in QUOTE_CHARS: v = v.replace((c + c), c) return v
class Schema(): '\n Simple schema which maps table&column to a unique identifier\n ' def __init__(self, schema): self._schema = schema self._idMap = self._map(self._schema) @property def schema(self): return self._schema @property def idMap(self): return self._idMap def _map(self, schema): idMap = {'*': '__all__'} id = 1 for (key, vals) in schema.items(): for val in vals: idMap[((key.lower() + '.') + val.lower())] = (((('__' + key.lower()) + '.') + val.lower()) + '__') id += 1 for key in schema: idMap[key.lower()] = (('__' + key.lower()) + '__') id += 1 return idMap
def get_schema(db): "\n Get database's schema, which is a dict with table name as key\n and list of column names as value\n :param db: database path\n :return: schema dict\n " schema = {} conn = sqlite3.connect(db) cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = [str(table[0].lower()) for table in cursor.fetchall()] for table in tables: cursor.execute('PRAGMA table_info({})'.format(table)) schema[table] = [str(col[1].lower()) for col in cursor.fetchall()] return schema
def get_schema_from_json(fpath): with open(fpath) as f: data = json.load(f) schema = {} for entry in data: table = str(entry['table'].lower()) cols = [str(col['column_name'].lower()) for col in entry['col_data']] schema[table] = cols return schema
def tokenize(string): string = str(string) string = string.replace("'", '"') quote_idxs = [idx for (idx, char) in enumerate(string) if (char == '"')] assert ((len(quote_idxs) % 2) == 0), 'Unexpected quote' vals = {} for i in range((len(quote_idxs) - 1), (- 1), (- 2)): qidx1 = quote_idxs[(i - 1)] qidx2 = quote_idxs[i] val = string[qidx1:(qidx2 + 1)] key = '__val_{}_{}__'.format(qidx1, qidx2) string = ((string[:qidx1] + key) + string[(qidx2 + 1):]) vals[key] = val toks = [word.lower() for word in word_tokenize(string)] for i in range(len(toks)): if (toks[i] in vals): toks[i] = vals[toks[i]] eq_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == '=')] eq_idxs.reverse() prefix = ('!', '>', '<') for eq_idx in eq_idxs: pre_tok = toks[(eq_idx - 1)] if (pre_tok in prefix): toks = ((toks[:(eq_idx - 1)] + [(pre_tok + '=')]) + toks[(eq_idx + 1):]) return toks
def scan_alias(toks): "Scan the index of 'as' and build the map for all alias" as_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == 'as')] alias = {} for idx in as_idxs: alias[toks[(idx + 1)]] = toks[(idx - 1)] return alias
def get_tables_with_alias(schema, toks): tables = scan_alias(toks) for key in schema: assert (key not in tables), 'Alias {} has the same name in table'.format(key) tables[key] = key return tables