code stringlengths 17 6.64M |
|---|
def expand_snippets(sequence, snippets):
' Given a sequence and a list of snippets, expand the snippets in the sequence.\n\n Inputs:\n sequence (list of str): Query containing snippet references.\n snippets (list of Snippet): List of available snippets.\n\n return list of str representing the expanded sequence\n '
snippet_id_to_snippet = {}
for snippet in snippets:
assert (snippet.name not in snippet_id_to_snippet)
snippet_id_to_snippet[snippet.name] = snippet
expanded_seq = []
for token in sequence:
if (token in snippet_id_to_snippet):
expanded_seq.extend(snippet_id_to_snippet[token].sequence)
else:
assert (not is_snippet(token))
expanded_seq.append(token)
return expanded_seq
|
def snippet_index(token):
' Returns the index of a snippet.\n\n Inputs:\n token (str): The snippet to check.\n\n Returns:\n integer, the index of the snippet.\n '
assert is_snippet(token)
return int(token.split('_')[(- 1)])
|
class Snippet():
' Contains a snippet. '
def __init__(self, sequence, startpos, sql, age=0):
self.sequence = sequence
self.startpos = startpos
self.sql = sql
self.age = age
self.index = 0
self.name = ''
self.embedding = None
self.endpos = (self.startpos + len(self.sequence))
assert (self.endpos < len(self.sql)), (((('End position of snippet is ' + str(self.endpos)) + ' which is greater than length of SQL (') + str(len(self.sql))) + ')')
assert (self.sequence == self.sql[self.startpos:self.endpos]), (((('Value of snippet (' + ' '.join(self.sequence)) + ') is not the same as SQL at the same positions (') + ' '.join(self.sql[self.startpos:self.endpos])) + ')')
def __str__(self):
return ((((self.name + '\t') + str(self.age)) + '\t') + ' '.join(self.sequence))
def __len__(self):
return len(self.sequence)
def increase_age(self):
' Ages a snippet by one. '
self.index += 1
def assign_id(self, number):
' Assigns the name of the snippet to be the prefix + the number. '
self.name = (SNIPPET_PREFIX + str(number))
def set_embedding(self, embedding):
' Sets the embedding of the snippet.\n\n Inputs:\n embedding (dy.Expression)\n\n '
self.embedding = embedding
|
def strip_whitespace_front(token_list):
new_token_list = []
found_valid = False
for token in token_list:
if ((not (token.is_whitespace or (token.ttype == token_types.Punctuation))) or found_valid):
found_valid = True
new_token_list.append(token)
return new_token_list
|
def strip_whitespace(token_list):
subtokens = strip_whitespace_front(token_list)
subtokens = strip_whitespace_front(subtokens[::(- 1)])[::(- 1)]
return subtokens
|
def token_list_to_seq(token_list):
subtokens = strip_whitespace(token_list)
seq = []
flat = sqlparse.sql.TokenList(subtokens).flatten()
for (i, token) in enumerate(flat):
strip_token = str(token).strip()
if (len(strip_token) > 0):
seq.append(strip_token)
if (len(seq) > 0):
if ((seq[0] == '(') and (seq[(- 1)] == ')')):
seq = seq[1:(- 1)]
return seq
|
def find_subtrees(sequence, current_subtrees, where_parent=False, keep_conj_subtrees=False):
if where_parent:
seq = token_list_to_seq(sequence.tokens[1:])
if ((len(seq) > 0) and (seq not in current_subtrees)):
current_subtrees.append(seq)
if sequence.is_group:
if keep_conj_subtrees:
subtokens = strip_whitespace(sequence.tokens)
has_and = False
for (i, token) in enumerate(subtokens):
if ((token.value == 'OR') or (token.value == 'AND')):
has_and = True
break
if has_and:
and_subtrees = []
current_subtree = []
for (i, token) in enumerate(subtokens):
if ((token.value == 'OR') or ((token.value == 'AND') and ((i - 4) >= 0) and ((i - 4) < len(subtokens)) and (subtokens[(i - 4)].value != 'BETWEEN'))):
and_subtrees.append(current_subtree)
current_subtree = []
else:
current_subtree.append(token)
and_subtrees.append(current_subtree)
for subtree in and_subtrees:
seq = token_list_to_seq(subtree)
if ((len(seq) > 0) and (seq[0] == 'WHERE')):
seq = seq[1:]
if (seq not in current_subtrees):
current_subtrees.append(seq)
in_select = False
select_toks = []
for (i, token) in enumerate(sequence.tokens):
is_where = isinstance(token, sql_types.Where)
if (token.value == 'SELECT'):
in_select = True
elif in_select:
select_toks.append(token)
if (token.value == 'FROM'):
in_select = False
seq = []
if (len(sequence.tokens) > (i + 2)):
seq = token_list_to_seq((select_toks + [sequence.tokens[(i + 2)]]))
if ((seq not in current_subtrees) and (len(seq) > 0) and (seq[0] in interesting_selects)):
current_subtrees.append(seq)
select_toks = []
find_subtrees(token, current_subtrees, is_where, (where_parent or keep_conj_subtrees))
|
def get_subtrees(sql, oldsnippets=[]):
parsed = sqlparse.parse(' '.join(sql))[0]
subtrees = []
find_subtrees(parsed, subtrees)
final_subtrees = []
for subtree in subtrees:
if (subtree not in ignored_subtrees):
final_version = []
keep = True
parens_counts = 0
for (i, token) in enumerate(subtree):
if (token == '.'):
newtoken = ((final_version[(- 1)] + '.') + subtree[(i + 1)])
final_version = (final_version[:(- 1)] + [newtoken])
keep = False
elif keep:
final_version.append(token)
else:
keep = True
if (token == '('):
parens_counts -= 1
elif (token == ')'):
parens_counts += 1
if (parens_counts == 0):
final_subtrees.append(final_version)
snippets = []
sql = [str(tok) for tok in sql]
for subtree in final_subtrees:
startpos = (- 1)
for i in range(((len(sql) - len(subtree)) + 1)):
if (sql[i:(i + len(subtree))] == subtree):
startpos = i
if ((startpos >= 0) and ((startpos + len(subtree)) < len(sql))):
age = 0
for prevsnippet in oldsnippets:
if (prevsnippet.sequence == subtree):
age = (prevsnippet.age + 1)
snippet = Snippet(subtree, startpos, sql, age=age)
snippets.append(snippet)
return snippets
|
def get_subtrees_simple(sql, oldsnippets=[]):
sql_string = ' '.join(sql)
format_sql = sqlparse.format(sql_string, reindent=True)
subtrees = []
for sub_sql in format_sql.split('\n'):
sub_sql = sub_sql.replace('(', ' ( ').replace(')', ' ) ').replace(',', ' , ')
subtree = sub_sql.strip().split()
if (len(subtree) > 1):
subtrees.append(subtree)
final_subtrees = subtrees
snippets = []
sql = [str(tok) for tok in sql]
for subtree in final_subtrees:
startpos = (- 1)
for i in range(((len(sql) - len(subtree)) + 1)):
if (sql[i:(i + len(subtree))] == subtree):
startpos = i
if ((startpos >= 0) and ((startpos + len(subtree)) <= len(sql))):
age = 0
for prevsnippet in oldsnippets:
if (prevsnippet.sequence == subtree):
age = (prevsnippet.age + 1)
new_sql = (sql + [';'])
snippet = Snippet(subtree, startpos, new_sql, age=age)
snippets.append(snippet)
return snippets
|
def get_all_in_parens(sequence):
if (sequence[(- 1)] == ';'):
sequence = sequence[:(- 1)]
if (not ('(' in sequence)):
return []
if ((sequence[0] == '(') and (sequence[(- 1)] == ')')):
in_parens = sequence[1:(- 1)]
return ([in_parens] + get_all_in_parens(in_parens))
else:
paren_subseqs = []
current_seq = []
num_parens = 0
in_parens = False
for token in sequence:
if in_parens:
current_seq.append(token)
if (token == ')'):
num_parens -= 1
if (num_parens == 0):
in_parens = False
paren_subseqs.append(current_seq)
current_seq = []
elif (token == '('):
in_parens = True
current_seq.append(token)
if (token == '('):
num_parens += 1
all_subseqs = []
for subseq in paren_subseqs:
all_subseqs.extend(get_all_in_parens(subseq))
return all_subseqs
|
def split_by_conj(sequence):
num_parens = 0
current_seq = []
subsequences = []
for token in sequence:
if (num_parens == 0):
if (token in conjunctions):
subsequences.append(current_seq)
current_seq = []
break
current_seq.append(token)
if (token == '('):
num_parens += 1
elif (token == ')'):
num_parens -= 1
assert (num_parens >= 0)
return subsequences
|
def get_sql_snippets(sequence):
all_in_parens = get_all_in_parens(sequence)
all_subseq = []
for seq in all_in_parens:
subsequences = split_by_conj(seq)
all_subseq.append(seq)
all_subseq.extend(subsequences)
for (i, seq) in enumerate(all_subseq):
print(((str(i) + '\t') + ' '.join(seq)))
exit()
|
def add_snippets_to_query(snippets, ignored_entities, query, prob_align=1.0):
query_copy = copy.copy(query)
sorted_snippets = sorted(snippets, key=(lambda s: len(s.sequence)))[::(- 1)]
for snippet in sorted_snippets:
ignore = False
snippet_seq = snippet.sequence
for entity in ignored_entities:
ignore = (ignore or util.subsequence(entity, snippet_seq))
if (not ignore):
snippet_length = len(snippet_seq)
for start_idx in range(((len(query_copy) - snippet_length) + 1)):
if (query_copy[start_idx:(start_idx + snippet_length)] == snippet_seq):
align = (random.random() < prob_align)
if align:
prev_length = len(query_copy)
query_copy[start_idx] = snippet.name
query_copy = (query_copy[:(start_idx + 1)] + query_copy[(start_idx + snippet_length):])
assert (len(query_copy) == (prev_length - (snippet_length - 1)))
return query_copy
|
def execution_results(query, username, password, timeout=3):
connection = pymysql.connect(user=username, password=password)
class TimeoutException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, timeout_handler)
syntactic = True
semantic = True
table = []
with connection.cursor() as cursor:
signal.alarm(timeout)
try:
cursor.execute("SET sql_mode='IGNORE_SPACE';")
cursor.execute('use atis3;')
cursor.execute(query)
table = cursor.fetchall()
cursor.close()
except TimeoutException:
signal.alarm(0)
cursor.close()
except pymysql.err.ProgrammingError:
syntactic = False
semantic = False
cursor.close()
except pymysql.err.InternalError:
semantic = False
cursor.close()
except Exception as e:
signal.alarm(0)
signal.alarm(0)
cursor.close()
signal.alarm(0)
connection.close()
return (syntactic, semantic, sorted(table))
|
def executable(query, username, password, timeout=2):
return execution_results(query, username, password, timeout)[1]
|
def fix_parentheses(sequence):
num_left = sequence.count('(')
num_right = sequence.count(')')
if (num_right < num_left):
fixed_sequence = ((sequence[:(- 1)] + [')' for _ in range((num_left - num_right))]) + [sequence[(- 1)]])
return fixed_sequence
return sequence
|
def nl_tokenize(string):
'Tokenizes a natural language string into tokens.\n\n Inputs:\n string: the string to tokenize.\n Outputs:\n a list of tokens.\n\n Assumes data is space-separated (this is true of ZC07 data in ATIS2/3).\n '
return nltk.word_tokenize(string)
|
def sql_tokenize(string):
' Tokenizes a SQL statement into tokens.\n\n Inputs:\n string: string to tokenize.\n\n Outputs:\n a list of tokens.\n '
tokens = []
statements = sqlparse.parse(string)
for statement in statements:
flat_tokens = sqlparse.sql.TokenList(statement.tokens).flatten()
for token in flat_tokens:
strip_token = str(token).strip()
if (len(strip_token) > 0):
tokens.append(strip_token)
newtokens = []
keep = True
for (i, token) in enumerate(tokens):
if (token == '.'):
newtoken = ((newtokens[(- 1)] + '.') + tokens[(i + 1)])
newtokens = (newtokens[:(- 1)] + [newtoken])
keep = False
elif keep:
newtokens.append(token)
else:
keep = True
return newtokens
|
def lambda_tokenize(string):
' Tokenizes a lambda-calculus statement into tokens.\n\n Inputs:\n string: a lambda-calculus string\n\n Outputs:\n a list of tokens.\n '
space_separated = string.split(' ')
new_tokens = []
for token in space_separated:
tokens = []
current_token = ''
for char in token:
if ((char == ')') or (char == '(')):
tokens.append(current_token)
tokens.append(char)
current_token = ''
else:
current_token += char
tokens.append(current_token)
new_tokens.extend([tok for tok in tokens if tok])
return new_tokens
|
def subsequence(first_sequence, second_sequence):
'\n Returns whether the first sequence is a subsequence of the second sequence.\n\n Inputs:\n first_sequence (list): A sequence.\n second_sequence (list): Another sequence.\n\n Returns:\n Boolean indicating whether first_sequence is a subsequence of second_sequence.\n '
for startidx in range(((len(second_sequence) - len(first_sequence)) + 1)):
if (second_sequence[startidx:(startidx + len(first_sequence))] == first_sequence):
return True
return False
|
class Utterance():
' Utterance class. '
def process_input_seq(self, anonymize, anonymizer, anon_tok_to_ent):
assert ((not anon_tok_to_ent) or anonymize)
assert ((not anonymize) or anonymizer)
if anonymize:
assert anonymizer
self.input_seq_to_use = anonymizer.anonymize(self.original_input_seq, anon_tok_to_ent, ANON_INPUT_KEY, add_new_anon_toks=True)
else:
self.input_seq_to_use = self.original_input_seq
def process_gold_seq(self, output_sequences, nl_to_sql_dict, available_snippets, anonymize, anonymizer, anon_tok_to_ent):
entities_in_input = [[tok] for tok in self.input_seq_to_use if (tok in anon_tok_to_ent)]
entities_in_input.extend(nl_to_sql_dict.get_sql_entities(self.input_seq_to_use))
shortest_gold_and_results = min(output_sequences, key=(lambda x: len(x[0])))
self.original_gold_query = shortest_gold_and_results[0]
self.gold_sql_results = shortest_gold_and_results[1]
self.contained_entities = entities_in_input
self.all_gold_queries = output_sequences
self.anonymized_gold_query = self.original_gold_query
if anonymize:
self.anonymized_gold_query = anonymizer.anonymize(self.original_gold_query, anon_tok_to_ent, OUTPUT_KEY, add_new_anon_toks=False)
self.gold_query_to_use = sql_util.add_snippets_to_query(available_snippets, entities_in_input, self.anonymized_gold_query)
def __init__(self, example, available_snippets, nl_to_sql_dict, params, anon_tok_to_ent={}, anonymizer=None):
output_sequences = example[OUTPUT_KEY]
self.original_input_seq = tokenizers.nl_tokenize(example[params.input_key])
self.available_snippets = available_snippets
self.keep = False
if ((len(output_sequences) > 0) and (len(self.original_input_seq) > 0)):
self.keep = True
if ((len(output_sequences) == 0) or (len(self.original_input_seq) == 0)):
return
self.process_input_seq(params.anonymize, anonymizer, anon_tok_to_ent)
self.process_gold_seq(output_sequences, nl_to_sql_dict, self.available_snippets, params.anonymize, anonymizer, anon_tok_to_ent)
def __str__(self):
string = (('Original input: ' + ' '.join(self.original_input_seq)) + '\n')
string += (('Modified input: ' + ' '.join(self.input_seq_to_use)) + '\n')
string += (('Original output: ' + ' '.join(self.original_gold_query)) + '\n')
string += (('Modified output: ' + ' '.join(self.gold_query_to_use)) + '\n')
string += 'Snippets:\n'
for snippet in self.available_snippets:
string += (str(snippet) + '\n')
return string
def length_valid(self, input_limit, output_limit):
return ((len(self.input_seq_to_use) < input_limit) and (len(self.gold_query_to_use) < output_limit))
|
class Vocabulary():
'Vocabulary class: stores information about words in a corpus.\n\n Members:\n functional_types (list of str): Functional vocabulary words, such as EOS.\n max_size (int): The maximum size of vocabulary to keep.\n min_occur (int): The minimum number of times a word should occur to keep it.\n id_to_token (list of str): Ordered list of word types.\n token_to_id (dict str->int): Maps from each unique word type to its index.\n '
def get_vocab(self, sequences, ignore_fn):
'Gets vocabulary from a list of sequences.\n\n Inputs:\n sequences (list of list of str): Sequences from which to compute the vocabulary.\n ignore_fn (lambda str: bool): Function used to tell whether to ignore a\n token during computation of the vocabulary.\n\n Returns:\n list of str, representing the unique word types in the vocabulary.\n '
type_counts = {}
for sequence in sequences:
for token in sequence:
if (not ignore_fn(token)):
if (token not in type_counts):
type_counts[token] = 0
type_counts[token] += 1
sorted_type_counts = sorted(sorted(type_counts.items()), key=operator.itemgetter(1))[::(- 1)]
sorted_types = [typecount[0] for typecount in sorted_type_counts if (typecount[1] >= self.min_occur)]
sorted_types = (self.functional_types + sorted_types)
if (self.max_size >= 0):
vocab = sorted_types[:max(self.max_size, len(sorted_types))]
else:
vocab = sorted_types
return vocab
def __init__(self, sequences, filename, functional_types=None, max_size=(- 1), min_occur=0, ignore_fn=(lambda x: False)):
self.functional_types = functional_types
self.max_size = max_size
self.min_occur = min_occur
vocab = self.get_vocab(sequences, ignore_fn)
self.id_to_token = []
self.token_to_id = {}
for (i, word_type) in enumerate(vocab):
self.id_to_token.append(word_type)
self.token_to_id[word_type] = i
if os.path.exists(filename):
infile = open(filename, 'rb')
loaded_vocab = pickle.load(infile)
infile.close()
print(('Loaded vocabulary from ' + str(filename)))
if ((loaded_vocab.id_to_token != self.id_to_token) or (loaded_vocab.token_to_id != self.token_to_id)):
print('Loaded vocabulary is different than generated vocabulary.')
else:
print(('Writing vocabulary to ' + str(filename)))
outfile = open(filename, 'wb')
pickle.dump(self, outfile)
outfile.close()
def __len__(self):
return len(self.id_to_token)
|
def condition_has_or(conds):
return ('or' in conds[1::2])
|
def condition_has_like(conds):
return (WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]])
|
def condition_has_sql(conds):
for cond_unit in conds[::2]:
(val1, val2) = (cond_unit[3], cond_unit[4])
if ((val1 is not None) and (type(val1) is dict)):
return True
if ((val2 is not None) and (type(val2) is dict)):
return True
return False
|
def val_has_op(val_unit):
return (val_unit[0] != UNIT_OPS.index('none'))
|
def has_agg(unit):
return (unit[0] != AGG_OPS.index('none'))
|
def accuracy(count, total):
if (count == total):
return 1
return 0
|
def recall(count, total):
if (count == total):
return 1
return 0
|
def F1(acc, rec):
if ((acc + rec) == 0):
return 0
return (((2.0 * acc) * rec) / (acc + rec))
|
def get_scores(count, pred_total, label_total):
if (pred_total != label_total):
return (0, 0, 0)
elif (count == pred_total):
return (1, 1, 1)
return (0, 0, 0)
|
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = label['select'][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if (unit in label_sel):
cnt += 1
label_sel.remove(unit)
if (unit[1] in label_wo_agg):
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return (label_total, pred_total, cnt, cnt_wo_agg)
|
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if (unit in label_conds):
cnt += 1
label_conds.remove(unit)
if (unit[2] in label_wo_agg):
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return (label_total, pred_total, cnt, cnt_wo_agg)
|
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [(pred.split('.')[1] if ('.' in pred) else pred) for pred in pred_cols]
label_cols = [(label.split('.')[1] if ('.' in label) else label) for label in label_cols]
for col in pred_cols:
if (col in label_cols):
cnt += 1
label_cols.remove(col)
return (label_total, pred_total, cnt)
|
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if (len(pred['groupBy']) > 0):
pred_total = 1
if (len(label['groupBy']) > 0):
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if ((pred_total == label_total == 1) and (pred_cols == label_cols) and (pred['having'] == label['having'])):
cnt = 1
return (label_total, pred_total, cnt)
|
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if (len(pred['orderBy']) > 0):
pred_total = 1
if (len(label['orderBy']) > 0):
label_total = 1
if ((len(label['orderBy']) > 0) and (pred['orderBy'] == label['orderBy']) and (((pred['limit'] is None) and (label['limit'] is None)) or ((pred['limit'] is not None) and (label['limit'] is not None)))):
cnt = 1
return (label_total, pred_total, cnt)
|
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if (pred_ao == label_ao):
return (1, 1, 1)
return (len(pred_ao), len(label_ao), 0)
|
def get_nestedSQL(sql):
nested = []
for cond_unit in ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]):
if (type(cond_unit[3]) is dict):
nested.append(cond_unit[3])
if (type(cond_unit[4]) is dict):
nested.append(cond_unit[4])
if (sql['intersect'] is not None):
nested.append(sql['intersect'])
if (sql['except'] is not None):
nested.append(sql['except'])
if (sql['union'] is not None):
nested.append(sql['union'])
return nested
|
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if (pred is not None):
pred_total += 1
if (label is not None):
label_total += 1
if ((pred is not None) and (label is not None)):
cnt += Evaluator().eval_exact_match(pred, label)
return (label_total, pred_total, cnt)
|
def eval_IUEN(pred, label):
(lt1, pt1, cnt1) = eval_nested(pred['intersect'], label['intersect'])
(lt2, pt2, cnt2) = eval_nested(pred['except'], label['except'])
(lt3, pt3, cnt3) = eval_nested(pred['union'], label['union'])
label_total = ((lt1 + lt2) + lt3)
pred_total = ((pt1 + pt2) + pt3)
cnt = ((cnt1 + cnt2) + cnt3)
return (label_total, pred_total, cnt)
|
def get_keywords(sql):
res = set()
if (len(sql['where']) > 0):
res.add('where')
if (len(sql['groupBy']) > 0):
res.add('group')
if (len(sql['having']) > 0):
res.add('having')
if (len(sql['orderBy']) > 0):
res.add(sql['orderBy'][0])
res.add('order')
if (sql['limit'] is not None):
res.add('limit')
if (sql['except'] is not None):
res.add('except')
if (sql['union'] is not None):
res.add('union')
if (sql['intersect'] is not None):
res.add('intersect')
ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2])
if (len([token for token in ao if (token == 'or')]) > 0):
res.add('or')
cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2])
if (len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0):
res.add('not')
if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('in'))]) > 0):
res.add('in')
if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))]) > 0):
res.add('like')
return res
|
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if (k in label_keywords):
cnt += 1
return (label_total, pred_total, cnt)
|
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
|
def count_component1(sql):
count = 0
if (len(sql['where']) > 0):
count += 1
if (len(sql['groupBy']) > 0):
count += 1
if (len(sql['orderBy']) > 0):
count += 1
if (sql['limit'] is not None):
count += 1
if (len(sql['from']['table_units']) > 0):
count += (len(sql['from']['table_units']) - 1)
ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2])
count += len([token for token in ao if (token == 'or')])
cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2])
count += len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))])
return count
|
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
|
def count_others(sql):
count = 0
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if (len(sql['orderBy']) > 0):
agg_count += count_agg(([unit[1] for unit in sql['orderBy'][1] if unit[1]] + [unit[2] for unit in sql['orderBy'][1] if unit[2]]))
agg_count += count_agg(sql['having'])
if (agg_count > 1):
count += 1
if (len(sql['select'][1]) > 1):
count += 1
if (len(sql['where']) > 1):
count += 1
if (len(sql['groupBy']) > 1):
count += 1
return count
|
class Evaluator():
'A simple evaluator'
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ == 0)):
return 'easy'
elif (((count_others_ <= 2) and (count_comp1_ <= 1) and (count_comp2_ == 0)) or ((count_comp1_ <= 2) and (count_others_ < 2) and (count_comp2_ == 0))):
return 'medium'
elif (((count_others_ > 2) and (count_comp1_ <= 2) and (count_comp2_ == 0)) or ((2 < count_comp1_ <= 3) and (count_others_ <= 2) and (count_comp2_ == 0)) or ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ <= 1))):
return 'hard'
else:
return 'extra'
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for (_, score) in partial_scores.items():
if (score['f1'] != 1):
return 0
if (len(label['from']['table_units']) > 0):
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return (label_tables == pred_tables)
return 1
def eval_partial_match(self, pred, label):
res = {}
(label_total, pred_total, cnt, cnt_wo_agg) = eval_sel(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt, cnt_wo_agg) = eval_where(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_group(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_having(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_order(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_and_or(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_IUEN(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_keywords(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
return res
|
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True
|
def print_scores(scores, etype):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords']
print('{:20} {:20} {:20} {:20} {:20} {:20}'.format('', *levels))
counts = [scores[level]['count'] for level in levels]
print('{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}'.format('count', *counts))
if (etype in ['all', 'exec']):
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('execution', *this_scores))
if (etype in ['all', 'match']):
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('exact match', *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores))
|
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if (len(l.strip()) > 0)]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if (len(l.strip()) > 0)]
evaluator = Evaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.0}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0.0, 'rec': 0.0, 'f1': 0.0, 'acc_count': 0, 'rec_count': 0}
eval_err_num = 0
for (p, g) in zip(plist, glist):
p_str = p[0]
(g_str, db) = g
db_name = db
db = os.path.join(db_dir, db, (db + '.sqlite'))
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
p_sql = {'except': None, 'from': {'conds': [], 'table_units': []}, 'groupBy': [], 'having': [], 'intersect': None, 'limit': None, 'orderBy': [], 'select': [False, []], 'union': None, 'where': []}
eval_err_num += 1
print('eval_err_num:{}'.format(eval_err_num))
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if (etype in ['all', 'exec']):
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
if (etype in ['all', 'match']):
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if (exact_score == 0):
print('{} pred: {}'.format(hardness, p_str))
print('{} gold: {}'.format(hardness, g_str))
print('')
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if (partial_scores[type_]['pred_total'] > 0):
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if (partial_scores[type_]['label_total'] > 0):
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if (partial_scores[type_]['pred_total'] > 0):
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if (partial_scores[type_]['label_total'] > 0):
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({'predictSQL': p_str, 'goldSQL': g_str, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores})
for level in levels:
if (scores[level]['count'] == 0):
continue
if (etype in ['all', 'exec']):
scores[level]['exec'] /= scores[level]['count']
if (etype in ['all', 'match']):
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if (scores[level]['partial'][type_]['acc_count'] == 0):
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = ((scores[level]['partial'][type_]['acc'] / scores[level]['partial'][type_]['acc_count']) * 1.0)
if (scores[level]['partial'][type_]['rec_count'] == 0):
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = ((scores[level]['partial'][type_]['rec'] / scores[level]['partial'][type_]['rec_count']) * 1.0)
if ((scores[level]['partial'][type_]['acc'] == 0) and (scores[level]['partial'][type_]['rec'] == 0)):
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = (((2.0 * scores[level]['partial'][type_]['acc']) * scores[level]['partial'][type_]['rec']) / (scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc']))
print_scores(scores, etype)
|
def eval_exec_match(db, p_str, g_str, pred, gold):
'\n return 1 if the values between prediction and gold are matching\n in the corresponding index. Currently not support multiple col_unit(pairs).\n '
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for (idx, val_unit) in enumerate(val_units):
key = (tuple(val_unit[1]) if (not val_unit[2]) else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2])))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return (res_map(p_res, p_val_units) == res_map(q_res, q_val_units))
|
def rebuild_cond_unit_val(cond_unit):
if ((cond_unit is None) or (not DISABLE_VALUE)):
return cond_unit
(not_op, op_id, val_unit, val1, val2) = cond_unit
if (type(val1) is not dict):
val1 = None
else:
val1 = rebuild_sql_val(val1)
if (type(val2) is not dict):
val2 = None
else:
val2 = rebuild_sql_val(val2)
return (not_op, op_id, val_unit, val1, val2)
|
def rebuild_condition_val(condition):
if ((condition is None) or (not DISABLE_VALUE)):
return condition
res = []
for (idx, it) in enumerate(condition):
if ((idx % 2) == 0):
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
|
def rebuild_sql_val(sql):
if ((sql is None) or (not DISABLE_VALUE)):
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
|
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if (table_unit[0] == TABLE_TYPE['table_unit'])]
prefixs = [col_id[:(- 2)] for col_id in col_ids]
valid_col_units = []
for value in schema.idMap.values():
if (('.' in value) and (value[:value.index('.')] in prefixs)):
valid_col_units.append(value)
return valid_col_units
|
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if (col_unit is None):
return col_unit
(agg_id, col_id, distinct) = col_unit
if ((col_id in kmap) and (col_id in valid_col_units)):
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return (agg_id, col_id, distinct)
|
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if (val_unit is None):
return val_unit
(unit_op, col_unit1, col_unit2) = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return (unit_op, col_unit1, col_unit2)
|
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if (table_unit is None):
return table_unit
(table_type, col_unit_or_sql) = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return (table_type, col_unit_or_sql)
|
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if (cond_unit is None):
return cond_unit
(not_op, op_id, val_unit, val1, val2) = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
return (not_op, op_id, val_unit, val1, val2)
|
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if ((idx % 2) == 0):
condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)
return condition
|
def rebuild_select_col(valid_col_units, sel, kmap):
if (sel is None):
return sel
(distinct, _list) = sel
new_list = []
for it in _list:
(agg_id, val_unit) = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return (distinct, new_list)
|
def rebuild_from_col(valid_col_units, from_, kmap):
if (from_ is None):
return from_
from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']]
from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)
return from_
|
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if (group_by is None):
return group_by
return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
|
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if ((order_by is None) or (len(order_by) == 0)):
return order_by
(direction, val_units) = order_by
new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units]
return (direction, new_val_units)
|
def rebuild_sql_col(valid_col_units, sql, kmap):
if (sql is None):
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
|
def build_foreign_key_map(entry):
cols_orig = entry['column_names_original']
tables_orig = entry['table_names_original']
cols = []
for col_orig in cols_orig:
if (col_orig[0] >= 0):
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append((((('__' + t.lower()) + '.') + c.lower()) + '__'))
else:
cols.append('__all__')
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if ((k1 in k_set) or (k2 in k_set)):
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry['foreign_keys']
for fkey in foreign_keys:
(key1, key2) = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
|
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables
|
def condition_has_or(conds):
return ('or' in conds[1::2])
|
def condition_has_like(conds):
return (WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]])
|
def condition_has_sql(conds):
for cond_unit in conds[::2]:
(val1, val2) = (cond_unit[3], cond_unit[4])
if ((val1 is not None) and (type(val1) is dict)):
return True
if ((val2 is not None) and (type(val2) is dict)):
return True
return False
|
def val_has_op(val_unit):
return (val_unit[0] != UNIT_OPS.index('none'))
|
def has_agg(unit):
return (unit[0] != AGG_OPS.index('none'))
|
def accuracy(count, total):
if (count == total):
return 1
return 0
|
def recall(count, total):
if (count == total):
return 1
return 0
|
def F1(acc, rec):
if ((acc + rec) == 0):
return 0
return (((2.0 * acc) * rec) / (acc + rec))
|
def get_scores(count, pred_total, label_total):
if (pred_total != label_total):
return (0, 0, 0)
elif (count == pred_total):
return (1, 1, 1)
return (0, 0, 0)
|
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = label['select'][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if (unit in label_sel):
cnt += 1
label_sel.remove(unit)
if (unit[1] in label_wo_agg):
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return (label_total, pred_total, cnt, cnt_wo_agg)
|
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if (unit in label_conds):
cnt += 1
label_conds.remove(unit)
if (unit[2] in label_wo_agg):
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return (label_total, pred_total, cnt, cnt_wo_agg)
|
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [(pred.split('.')[1] if ('.' in pred) else pred) for pred in pred_cols]
label_cols = [(label.split('.')[1] if ('.' in label) else label) for label in label_cols]
for col in pred_cols:
if (col in label_cols):
cnt += 1
label_cols.remove(col)
return (label_total, pred_total, cnt)
|
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if (len(pred['groupBy']) > 0):
pred_total = 1
if (len(label['groupBy']) > 0):
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if ((pred_total == label_total == 1) and (pred_cols == label_cols) and (pred['having'] == label['having'])):
cnt = 1
return (label_total, pred_total, cnt)
|
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if (len(pred['orderBy']) > 0):
pred_total = 1
if (len(label['orderBy']) > 0):
label_total = 1
if ((len(label['orderBy']) > 0) and (pred['orderBy'] == label['orderBy']) and (((pred['limit'] is None) and (label['limit'] is None)) or ((pred['limit'] is not None) and (label['limit'] is not None)))):
cnt = 1
return (label_total, pred_total, cnt)
|
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if (pred_ao == label_ao):
return (1, 1, 1)
return (len(pred_ao), len(label_ao), 0)
|
def get_nestedSQL(sql):
nested = []
for cond_unit in ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]):
if (type(cond_unit[3]) is dict):
nested.append(cond_unit[3])
if (type(cond_unit[4]) is dict):
nested.append(cond_unit[4])
if (sql['intersect'] is not None):
nested.append(sql['intersect'])
if (sql['except'] is not None):
nested.append(sql['except'])
if (sql['union'] is not None):
nested.append(sql['union'])
return nested
|
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if (pred is not None):
pred_total += 1
if (label is not None):
label_total += 1
if ((pred is not None) and (label is not None)):
cnt += Evaluator().eval_exact_match(pred, label)
return (label_total, pred_total, cnt)
|
def eval_IUEN(pred, label):
(lt1, pt1, cnt1) = eval_nested(pred['intersect'], label['intersect'])
(lt2, pt2, cnt2) = eval_nested(pred['except'], label['except'])
(lt3, pt3, cnt3) = eval_nested(pred['union'], label['union'])
label_total = ((lt1 + lt2) + lt3)
pred_total = ((pt1 + pt2) + pt3)
cnt = ((cnt1 + cnt2) + cnt3)
return (label_total, pred_total, cnt)
|
def get_keywords(sql):
res = set()
if (len(sql['where']) > 0):
res.add('where')
if (len(sql['groupBy']) > 0):
res.add('group')
if (len(sql['having']) > 0):
res.add('having')
if (len(sql['orderBy']) > 0):
res.add(sql['orderBy'][0])
res.add('order')
if (sql['limit'] is not None):
res.add('limit')
if (sql['except'] is not None):
res.add('except')
if (sql['union'] is not None):
res.add('union')
if (sql['intersect'] is not None):
res.add('intersect')
ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2])
if (len([token for token in ao if (token == 'or')]) > 0):
res.add('or')
cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2])
if (len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0):
res.add('not')
if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('in'))]) > 0):
res.add('in')
if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))]) > 0):
res.add('like')
return res
|
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if (k in label_keywords):
cnt += 1
return (label_total, pred_total, cnt)
|
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
|
def count_component1(sql):
count = 0
if (len(sql['where']) > 0):
count += 1
if (len(sql['groupBy']) > 0):
count += 1
if (len(sql['orderBy']) > 0):
count += 1
if (sql['limit'] is not None):
count += 1
if (len(sql['from']['table_units']) > 0):
count += (len(sql['from']['table_units']) - 1)
ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2])
count += len([token for token in ao if (token == 'or')])
cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2])
count += len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))])
return count
|
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
|
def count_others(sql):
count = 0
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if (len(sql['orderBy']) > 0):
agg_count += count_agg(([unit[1] for unit in sql['orderBy'][1] if unit[1]] + [unit[2] for unit in sql['orderBy'][1] if unit[2]]))
agg_count += count_agg(sql['having'])
if (agg_count > 1):
count += 1
if (len(sql['select'][1]) > 1):
count += 1
if (len(sql['where']) > 1):
count += 1
if (len(sql['groupBy']) > 1):
count += 1
return count
|
class Evaluator():
'A simple evaluator'
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ == 0)):
return 'easy'
elif (((count_others_ <= 2) and (count_comp1_ <= 1) and (count_comp2_ == 0)) or ((count_comp1_ <= 2) and (count_others_ < 2) and (count_comp2_ == 0))):
return 'medium'
elif (((count_others_ > 2) and (count_comp1_ <= 2) and (count_comp2_ == 0)) or ((2 < count_comp1_ <= 3) and (count_others_ <= 2) and (count_comp2_ == 0)) or ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ <= 1))):
return 'hard'
else:
return 'extra'
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for (key, score) in partial_scores.items():
if (score['f1'] != 1):
return 0
if (len(label['from']['table_units']) > 0):
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return (label_tables == pred_tables)
return 1
def eval_partial_match(self, pred, label):
res = {}
(label_total, pred_total, cnt, cnt_wo_agg) = eval_sel(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt, cnt_wo_agg) = eval_where(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_group(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_having(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_order(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_and_or(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_IUEN(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_keywords(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
return res
|
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True
|
def print_scores(scores, etype):
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords']
print('{:20} {:20} {:20} {:20} {:20} {:20} {:20}'.format('', *levels))
counts = [scores[level]['count'] for level in levels]
print('{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}'.format('count', *counts))
if (etype in ['all', 'exec']):
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('execution', *this_scores))
if (etype in ['all', 'match']):
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('exact match', *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores))
print('\n\n{:20} {:20} {:20} {:20} {:20} {:20}'.format('', *turns))
counts = [scores[turn]['count'] for turn in turns]
print('{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}'.format('count', *counts))
if (etype in ['all', 'exec']):
print('===================== TRUN XECUTION ACCURACY =====================')
this_scores = [scores[turn]['exec'] for turn in turns]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('execution', *this_scores))
if (etype in ['all', 'match']):
print('\n====================== TRUN EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[turn]['exact'] for turn in turns]
print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('exact match', *exact_scores))
|
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if (len(l.strip()) == 0):
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
with open(predict) as f:
plist = []
pseq_one = []
for l in f.readlines():
if (len(l.strip()) == 0):
plist.append(pseq_one)
pseq_one = []
else:
pseq_one.append(l.strip().split('\t'))
evaluator = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.0}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.0}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0.0, 'rec': 0.0, 'f1': 0.0, 'acc_count': 0, 'rec_count': 0}
eval_err_num = 0
for (p, g) in zip(plist, glist):
scores['joint_all']['count'] += 1
turn_scores = {'exec': [], 'exact': []}
for (idx, pg) in enumerate(zip(p, g)):
(p, g) = pg
p_str = p[0]
p_str = p_str.replace('value', '1')
(g_str, db) = g
db_name = db
db = os.path.join(db_dir, db, (db + '.sqlite'))
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
if (idx > 3):
idx = '>4'
else:
idx += 1
turn_id = ('turn ' + str(idx))
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
p_sql = {'except': None, 'from': {'conds': [], 'table_units': []}, 'groupBy': [], 'having': [], 'intersect': None, 'limit': None, 'orderBy': [], 'select': [False, []], 'union': None, 'where': []}
eval_err_num += 1
print('eval_err_num:{}'.format(eval_err_num))
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if (etype in ['all', 'exec']):
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if (etype in ['all', 'match']):
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if (exact_score == 0):
turn_scores['exact'].append(0)
print('{} pred: {}'.format(hardness, p_str))
print('{} gold: {}'.format(hardness, g_str))
print('')
else:
turn_scores['exact'].append(1)
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if (partial_scores[type_]['pred_total'] > 0):
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if (partial_scores[type_]['label_total'] > 0):
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if (partial_scores[type_]['pred_total'] > 0):
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if (partial_scores[type_]['label_total'] > 0):
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({'predictSQL': p_str, 'goldSQL': g_str, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores})
if all(((v == 1) for v in turn_scores['exec'])):
scores['joint_all']['exec'] += 1
if all(((v == 1) for v in turn_scores['exact'])):
scores['joint_all']['exact'] += 1
for turn in turns:
if (scores[turn]['count'] == 0):
continue
if (etype in ['all', 'exec']):
scores[turn]['exec'] /= scores[turn]['count']
if (etype in ['all', 'match']):
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if (scores[level]['count'] == 0):
continue
if (etype in ['all', 'exec']):
scores[level]['exec'] /= scores[level]['count']
if (etype in ['all', 'match']):
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if (scores[level]['partial'][type_]['acc_count'] == 0):
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = ((scores[level]['partial'][type_]['acc'] / scores[level]['partial'][type_]['acc_count']) * 1.0)
if (scores[level]['partial'][type_]['rec_count'] == 0):
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = ((scores[level]['partial'][type_]['rec'] / scores[level]['partial'][type_]['rec_count']) * 1.0)
if ((scores[level]['partial'][type_]['acc'] == 0) and (scores[level]['partial'][type_]['rec'] == 0)):
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = (((2.0 * scores[level]['partial'][type_]['acc']) * scores[level]['partial'][type_]['rec']) / (scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc']))
print_scores(scores, etype)
|
def eval_exec_match(db, p_str, g_str, pred, gold):
'\n return 1 if the values between prediction and gold are matching\n in the corresponding index. Currently not support multiple col_unit(pairs).\n '
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for (idx, val_unit) in enumerate(val_units):
key = (tuple(val_unit[1]) if (not val_unit[2]) else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2])))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return (res_map(p_res, p_val_units) == res_map(q_res, q_val_units))
|
def rebuild_cond_unit_val(cond_unit):
if ((cond_unit is None) or (not DISABLE_VALUE)):
return cond_unit
(not_op, op_id, val_unit, val1, val2) = cond_unit
if (type(val1) is not dict):
val1 = None
else:
val1 = rebuild_sql_val(val1)
if (type(val2) is not dict):
val2 = None
else:
val2 = rebuild_sql_val(val2)
return (not_op, op_id, val_unit, val1, val2)
|
def rebuild_condition_val(condition):
if ((condition is None) or (not DISABLE_VALUE)):
return condition
res = []
for (idx, it) in enumerate(condition):
if ((idx % 2) == 0):
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
|
def rebuild_sql_val(sql):
if ((sql is None) or (not DISABLE_VALUE)):
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
|
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if (table_unit[0] == TABLE_TYPE['table_unit'])]
prefixs = [col_id[:(- 2)] for col_id in col_ids]
valid_col_units = []
for value in schema.idMap.values():
if (('.' in value) and (value[:value.index('.')] in prefixs)):
valid_col_units.append(value)
return valid_col_units
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.