code
stringlengths
17
6.64M
def _get_string_replacement(tok: Token) -> List[Token]: result = [] if ((tok.ttype == tokens.Token.Literal.String.Symbol) or (tok.ttype == tokens.Token.Literal.String.Single)): v = tok.value result.append((v[0] + v[(- 1)])) (start, end) = (1, (len(v) - 1)) for span_start in range(start, end): for span_end in range((span_start + 1), end): v_new = ((v[0] + v[span_start:span_end]) + v[(- 1)]) result.append(v_new) v_new = (v[:span_start] + v[span_end:]) result.append(v_new) v = v.replace('%', '') for add_percent_last in ('%', ''): for add_percent_first in ('%', ''): result.append(((((v[0] + add_percent_first) + v[1:(- 1)]) + add_percent_last) + v[(- 1)])) result = [Token(tok.ttype, v_new) for v_new in set(result) if (v_new != v)] return result
def get_possible_replacement(tok: Token, column_names: Set[str]) -> List[Token]: possible_replacement = [] for family in [AGG_OP, CMP_OP, ORDER, NUM_OP, LOGICAL_OP, LIKE_OP, IN_OP, column_names]: possible_replacement += _other_toks_same_family(tok, family) for get_family_function in [_get_int_replacement, _get_float_replacement, _get_string_replacement]: possible_replacement += get_family_function(tok) return possible_replacement
def span_droppable(span: List[Token]) -> bool: span = [tok for tok in span if (tok.ttype != Whitespace)] if (len(span) == 0): return False if (len(span) == 1): if ((span[0].ttype == Keyword.Order) and (span[0].value.lower() == 'asc')): return False if (span[0].ttype == Wildcard): return False if ((span[0].ttype == Keyword) and (span[0].value.lower() == 'as')): return False if ((span[0].ttype == Punctuation) and (span[0].value == ';')): return False if (len(span) == 2): if ((span[0].ttype == Name) and (span[1].ttype == Punctuation) and (span[1].value == '.')): return False if ((span[0].value.lower() == 'as') and (span[1].value.lower() == 'result')): return False return True
def drop_any_span(toks: List[Token]) -> Set[str]: num_toks = len(toks) all_s = set() for span_start in range(num_toks): for span_end in range((span_start + 1), (num_toks + 1)): removed_tok = toks[span_start:span_end] if span_droppable(removed_tok): toks_left = (toks[:span_start] + toks[span_end:]) q = join_tokens(toks_left) all_s.add(q) return all_s
def rm_count_in_column(s): return re.sub('COUNT\\s?\\((.*?)\\)', 'COUNT()', s, flags=re.IGNORECASE)
def equivalent_count(s1, s2): return (rm_count_in_column(s1.strip()) == rm_count_in_column(s2.strip()))
def generate_neighbor_queries_path(sqlite_path: str, query: str) -> List[str]: (table2column2properties, _) = extract_table_column_properties_path(sqlite_path) column_names = set([column_name for table_name in table2column2properties for column_name in table2column2properties[table_name]]) sql_toks = tokenize(query) spans_dropped = drop_any_span(sql_toks) tok_replaced = set() for (idx, tok) in enumerate(sql_toks): if (tok.ttype != tokens.Whitespace): replacement_toks = get_possible_replacement(tok, column_names) for corrupted_tok in replacement_toks: perturbed_q = join_tokens(((sql_toks[:idx] + [corrupted_tok]) + sql_toks[(idx + 1):])) tok_replaced.add(perturbed_q) empty_path = init_empty_db_from_orig_(sqlite_path) all_neighbor_queries = (spans_dropped | tok_replaced) all_neighbor_queries = [q for q in all_neighbor_queries if can_execute_path(empty_path, q)] results = [] os.unlink(empty_path) for neighbor_query in all_neighbor_queries: if ((neighbor_query == query) or equivalent_count(neighbor_query, query)): continue results.append(neighbor_query) return results
def isint(x): try: int(x) return True except: return False
def tofloat(x): try: return float(x) except ValueError: return None
def perturb(x): if (random.random() < (1.0 / 6)): return (x + 1) elif (random.random() < (1.0 / 5)): return (x - 1) elif (random.random() < (1.0 / 4)): return (x + 2) elif (random.random() < (1.0 / 3)): return (x - 2) return x
def perturb_float(x): if (random.random() < (1.0 / 6)): return (x + 0.01) elif (random.random() < (1.0 / 5)): return (x - 0.01) elif (random.random() < (1.0 / 4)): return (x + 0.02) elif (random.random() < (1.0 / 3)): return (x - 0.02) return x
class NumberFuzzer(BaseFuzzer): def __init__(self, elements, p=0.5, max_l0=float('inf'), scale=10, unsigned=False, is_int=False, precision=0): super(NumberFuzzer, self).__init__(elements, p, max_l0) self.elements = [x for x in [tofloat(x) for x in self.elements] if (x is not None)] self.scale = scale (self.min, self.max) = (((- (10 ** scale)) + epsilon), ((10 ** scale) - epsilon)) if unsigned: self.min = 0 self.max *= 2 if (len(self.elements) != 0): (self.list_min, self.list_max) = (min(self.elements), (max(self.elements) + 1)) else: (self.list_min, self.list_max) = (self.min, self.max) self.valid_sample_from_elements = ((self.list_min > (- MAX)) and (self.list_max < MAX)) self.min = max(self.min, (- MAX)) self.max = min(self.max, MAX) self.list_min = max(self.list_min, (- MAX)) self.list_max = min(self.max, MAX) self.is_int = is_int self.precision = precision def one_sample(self): if ((len(self.elements) == 0) or (random.random() < self.p) or (not self.valid_sample_from_elements)): (start, end) = ((self.min, self.max) if ((random.random() < self.p) or (len(self.elements) <= 1)) else (self.list_min, self.list_max)) if (len(self.rand_elements) <= self.max_l0): result = ((random.random() * (end - start)) + start) self.rand_elements.append(result) else: result = random.choice(self.rand_elements) else: result = random.choice(self.elements) if self.is_int: return perturb(int(result)) else: return perturb_float(result)
class SemesterFuzzer(BaseFuzzer): def __init__(self, elements, p): super(SemesterFuzzer, self).__init__(elements, p) self.semesters = ['Fall', 'Winter', 'Spring', 'Summer'] def one_sample(self): return random.choice(self.semesters)
class AdvisingTimeFuzzer(BaseFuzzer): def rand_time_in_day(self): randtime = (start + (random.random() * timedelta(days=1))) s = randtime.strftime('%H-%M-%S') return s def __init__(self, elements, p): super(AdvisingTimeFuzzer, self).__init__(elements, p) def one_sample(self): if (random.random() > self.p): return random.choice(self.elements) else: return self.rand_time_in_day()
def represents_int(s): try: int(s) return True except ValueError: return False
def rand_string(length: int) -> str: return ''.join([random.choice(CHARSET) for _ in range(length)])
def contaminate(s: str) -> str: p = random.random() if (p < 0.2): return (s + rand_string(5)) if (p < 0.4): return (rand_string(5) + s) if (p < 0.6): return ((rand_string(3) + s) + rand_string(3)) if (p < 0.8): return s[:(- 1)] return s[1:]
class StringFuzzer(BaseFuzzer): def __init__(self, elements, p, max_l0, length=20): super(StringFuzzer, self).__init__(elements, p, max_l0) self.allow_none = (random.random() < 0.1) self.length = length self.elements = [str(e) for e in self.elements] for e in self.elements: if ('%' in e): self.elements.append(e.replace('%', '')) for s in set(self.elements): if represents_int(s): i = int(s) self.elements.append(str((i + 1))) self.elements.append(str((i - 1))) all_lengths = [len(e) for e in self.elements] if (len(all_lengths) != 0): (self.min_length, self.max_length) = (min(all_lengths), (max(all_lengths) + 1)) else: (self.min_length, self.max_length) = (0, 20) def one_sample(self): if (random.random() < self.p): if (len(self.rand_elements) <= self.max_l0): length = random.randint(self.min_length, self.max_length) r = rand_string(length) self.rand_elements.append(r) result = r else: result = random.choice(self.rand_elements) else: result = random.choice(self.elements) if (random.random() < 0.5): return contaminate(result) else: return result
def random_time(): return ':'.join([random.choice(l) for l in space])
def perturb(t): nums = [int(x) for x in t.split(':')] change_digit = random.randint(0, 2) nums[change_digit] = (nums[change_digit] - 1) return ':'.join([space[i][nums[i]] for i in range(3)])
class TimeFuzzer(BaseFuzzer): def __init__(self, elements, p=0.5, max_l0=float('inf')): super(TimeFuzzer, self).__init__(elements, p, max_l0) self.elements = elements def one_sample(self): if ((random.random() > self.p) and (len(self.elements) != 0)): t = random.choice(self.elements) if (random.random() < self.p): return perturb(t) else: return t else: return random_time()
def get_values(db_name: str) -> Set[str]: values = pkl.load(open(get_value_path(db_name), 'rb')) return values
def get_schema_path(sqlite_path: str, table_name: str) -> str: (_, schema) = exec_db_path_(sqlite_path, (table_schema_query % table_name)) schema = schema[0][0] return schema
def get_unique_keys(schema: str) -> Set[str]: schema_by_list = schema.split('\n') unique_keys = set() for r in schema_by_list: if ('unique' in r.lower()): unique_keys.add(r.strip().split()[0].upper().replace('"', '').replace('`', '')) return unique_keys
def get_checked_keys(schema: str) -> Set[str]: schema_by_list = schema.split('\n') checked_keys = set() for r in schema_by_list: if (('check (' in r) or ('check(' in r)): checked_keys.add(r.strip().split()[0].upper().replace('"', '').replace('`', '')) return checked_keys
def get_table_names_path(sqlite_path: str) -> List[str]: table_names = [x[0] for x in exec_db_path_(sqlite_path, table_name_query)[1]] return table_names
def extract_table_column_properties_path(sqlite_path: str) -> Tuple[(Dict[(str, Dict[(str, Any)])], Dict[(Tuple[(str, str)], Tuple[(str, str)])])]: table_names = get_table_names_path(sqlite_path) table_name2column_properties = OrderedDict() child2parent = OrderedDict() for table_name in table_names: schema = get_schema_path(sqlite_path, table_name) (unique_keys, checked_keys) = (get_unique_keys(schema), get_checked_keys(schema)) table_name = table_name.lower() column_properties = OrderedDict() (result_type, result) = exec_db_path_(sqlite_path, (column_type_query % table_name)) for (columnID, column_name, columnType, columnNotNull, columnDefault, columnPK) in result: column_name = column_name.upper() column_properties[column_name] = {'ID': columnID, 'name': column_name, 'type': columnType, 'notnull': columnNotNull, 'default': columnDefault, 'PK': columnPK, 'unique': (column_name in unique_keys), 'checked': (column_name in checked_keys)} table_name2column_properties[table_name.lower()] = column_properties (result_type, result) = exec_db_path_(sqlite_path, (foreign_key_query % table_name)) for (keyid, column_seq_id, other_tab_name, this_column_name, other_column_name, on_update, on_delete, match) in result: if (other_column_name is None): other_column_name = this_column_name (table_name, other_tab_name) = (table_name.lower(), other_tab_name.lower()) (this_column_name, other_column_name) = (this_column_name.upper(), other_column_name.upper()) if ((other_tab_name == 'author') and (other_column_name == 'IDAUTHORA')): other_column_name = 'IDAUTHOR' child2parent[(table_name, this_column_name)] = (other_tab_name, other_column_name) dep_table_columns = (set(child2parent.keys()) | set(child2parent.values())) for (table_name, column_name) in dep_table_columns: assert (table_name.lower() == table_name), 'table name should be lower case' assert (column_name.upper() == column_name), 'column name should be upper case' assert (table_name in table_name2column_properties), ('table name %s missing.' % table_name) assert (column_name in table_name2column_properties[table_name]), ('column name %s should be present in table %s' % (column_name, table_name)) return (table_name2column_properties, child2parent)
def collapse_key(d: Dict[(str, Dict[(str, T)])]) -> Dict[(Tuple[(str, str)], T)]: result = OrderedDict() for (k1, v1) in d.items(): for (k2, v2) in v1.items(): result[(k1, k2)] = v2 return result
def process_order_helper(dep: Dict[(E, Set[E])], all: Set[E]) -> List[Set[E]]: dep_ks = set(dep.keys()) for k in dep.values(): dep_ks |= set(k) assert (len((dep_ks - all)) == 0), (dep_ks - all) order = list(my_top_sort({k: v for (k, v) in dep.items()})) if (len(order) == 0): order.append(set()) for k in all: if (k not in dep_ks): order[0].add(k) s = set() for o in order: s |= set(o) assert (len(s) == len(all)), ((s - all), (all - s)) return order
def my_top_sort(dep: Dict[(E, Set[E])]) -> List[Set[E]]: order = [] elements_left = set() for (child, parents) in dep.items(): elements_left.add(child) elements_left |= parents while (len(elements_left) != 0): level_set = set() for e in elements_left: if (e not in dep.keys()): level_set.add(e) elif all(((parent not in elements_left) for parent in dep[e])): level_set.add(e) for e in level_set: elements_left.remove(e) order.append(level_set) return order
def get_process_order(child2parent: Dict[(Tuple[(str, str)], Tuple[(str, str)])], table_column_properties: Dict[(Tuple[(str, str)], Dict[(str, Any)])]) -> Tuple[(List[Set[Tuple[(str, str)]]], List[Set[str]])]: all_table_column = set(table_column_properties.keys()) dep_child2parent = {c: {p} for (c, p) in child2parent.items()} table_column_order = process_order_helper(dep_child2parent, all_table_column) all_table = set([k[0] for k in all_table_column]) table_child2parent = defaultdict(set) for (k1, k2) in child2parent.items(): table_child2parent[k1[0]].add(k2[0]) table_order = process_order_helper(table_child2parent, all_table) return (table_column_order, table_order)
def get_all_db_info_path(sqlite_path: str) -> Tuple[(Dict[(Tuple[(str, str)], Dict[(str, Any)])], Dict[(Tuple[(str, str)], Tuple[(str, str)])], Dict[(Tuple[(str, str)], List)])]: (table_name2column_properties, child2parent) = extract_table_column_properties_path(sqlite_path) table_name2content = OrderedDict() for table_name in table_name2column_properties: (result_type, result) = exec_db_path_(sqlite_path, (select_all_query % table_name)) if (result_type == 'exception'): raise result table_name2content[table_name] = result table_name2column_name2elements = OrderedDict() for table_name in table_name2column_properties: (column_properties, content) = (table_name2column_properties[table_name], table_name2content[table_name]) table_name2column_name2elements[table_name] = OrderedDict(((column_name, []) for column_name in column_properties)) if (len(content) > 0): assert (len(content[0]) == len(column_properties)) for row in content: for (column_name, element) in zip(column_properties, row): table_name2column_name2elements[table_name][column_name].append(element) return (collapse_key(table_name2column_properties), child2parent, collapse_key(table_name2column_name2elements))
def get_table_size(table_column_elements: Dict[(Tuple[(str, str)], List)]) -> Dict[(str, int)]: table_name2size = OrderedDict() for (k, elements) in table_column_elements.items(): table_name = k[0] if (table_name not in table_name2size): table_name2size[table_name] = len(elements) return table_name2size
def get_primary_keys(table_column_properties: Dict[(Tuple[(str, str)], Dict[(str, Any)])]) -> Dict[(str, List[str])]: table_name2primary_keys = OrderedDict() for ((table_name, column_name), property) in table_column_properties.items(): if (table_name not in table_name2primary_keys): table_name2primary_keys[table_name] = [] if (property['PK'] != 0): table_name2primary_keys[table_name].append(column_name) return table_name2primary_keys
def get_indexing_from_db(db_path: str, shuffle=True) -> Dict[(str, List[Dict[(str, Any)]])]: (table_column_properties, _, _) = get_all_db_info_path(db_path) all_tables_names = {t_c[0] for t_c in table_column_properties} table_name2indexes = {} for table_name in all_tables_names: column_names = [t_c[1] for t_c in table_column_properties if (t_c[0] == table_name)] selection_query = (((('select ' + ', '.join([('"%s"' % c) for c in column_names])) + ' from "') + table_name) + '";') retrieved_results = exec_db_path_(db_path, selection_query)[1] table_name2indexes[table_name] = [{name: e for (name, e) in zip(column_names, row)} for row in retrieved_results] if shuffle: random.shuffle(table_name2indexes[table_name]) return table_name2indexes
def print_table(table_name, column_names, rows): print('table:', table_name) num_cols = len(column_names) template = ' '.join((['{:20}'] * num_cols)) print(template.format(*column_names)) for row in rows: print(template.format(*[str(r) for r in row]))
def database_pprint(path): (tc2_, _, _) = get_all_db_info_path(path) table_column_names = [tc for tc in tc2_.keys()] table_names = {t_c[0] for t_c in table_column_names} for table_name in table_names: column_names = [c for (t, c) in table_column_names if (t == table_name)] elements_by_column = [] for column_name in column_names: (_, elements) = exec_db_path_(path, 'select {column_name} from {table_name}'.format(column_name=column_name, table_name=table_name)) elements_by_column.append([e[0] for e in elements]) rows = [row for row in zip(*elements_by_column)] print_table(table_name, column_names, rows)
def get_total_size_from_indexes(table_name2indexes: Dict[(str, List[Dict[(str, Any)]])]) -> int: return sum([len(v) for (_, v) in table_name2indexes.items()])
def get_total_size_from_path(path): (_, _, table_column2elements) = get_all_db_info_path(path) return sum([v for (_, v) in get_table_size(table_column2elements).items()])
def get_db_path(db_name: str, testcase_name: Union[(str, None)]=None) -> str: sqlite_path = os.path.join(DB_DIR, db_name, ((db_name if (testcase_name is None) else testcase_name) + '.sqlite')) return sqlite_path
def get_all_dbnames() -> Set[str]: return set([db_name for db_name in os.listdir(DB_DIR)])
def get_value_path(db_name: str) -> str: return os.path.join(DB_DIR, db_name, 'values.pkl')
def get_skipped_dbnames() -> Set[str]: return {'baseball_1', 'imdb', 'restaurants'}
def orig2test(orig_db_path: str, testcase_name: str) -> str: return ((orig_db_path.replace('.sqlite', '') + testcase_name) + '.sqlite')
def permute_tuple(element: Tuple, perm: Tuple) -> Tuple: assert (len(element) == len(perm)) return tuple([element[i] for i in perm])
def unorder_row(row: Tuple) -> Tuple: return tuple(sorted(row, key=(lambda x: (str(x) + str(type(x))))))
def quick_rej(result1: List[Tuple], result2: List[Tuple], order_matters: bool) -> bool: s1 = [unorder_row(row) for row in result1] s2 = [unorder_row(row) for row in result2] if order_matters: return (s1 == s2) else: return (set(s1) == set(s2))
def multiset_eq(l1: List, l2: List) -> bool: if (len(l1) != len(l2)): return False d = defaultdict(int) for e in l1: d[e] = (d[e] + 1) for e in l2: d[e] = (d[e] - 1) if (d[e] < 0): return False return True
def get_constraint_permutation(tab1_sets_by_columns: List[Set], result2: List[Tuple]): num_cols = len(result2[0]) perm_constraints = [{i for i in range(num_cols)} for _ in range(num_cols)] if (num_cols <= 3): return product(*perm_constraints) for _ in range(20): random_tab2_row = random.choice(result2) for tab1_col in range(num_cols): for tab2_col in set(perm_constraints[tab1_col]): if (random_tab2_row[tab2_col] not in tab1_sets_by_columns[tab1_col]): perm_constraints[tab1_col].remove(tab2_col) return product(*perm_constraints)
def result_eq(result1: List[Tuple], result2: List[Tuple], order_matters: bool) -> bool: if ((len(result1) == 0) and (len(result2) == 0)): return True if (len(result1) != len(result2)): return False num_cols = len(result1[0]) if (len(result2[0]) != num_cols): return False if (not quick_rej(result1, result2, order_matters)): return False tab1_sets_by_columns = [{row[i] for row in result1} for i in range(num_cols)] for perm in get_constraint_permutation(tab1_sets_by_columns, result2): if (len(perm) != len(set(perm))): continue if (num_cols == 1): result2_perm = result2 else: result2_perm = [permute_tuple(element, perm) for element in result2] if order_matters: if (result1 == result2_perm): return True elif ((set(result1) == set(result2_perm)) and multiset_eq(result1, result2_perm)): return True return False
def load_sql_file(f_path: str) -> List[str]: with open(f_path, 'r') as in_file: lines = list(in_file.readlines()) lines = [l.strip().split('\t')[0] for l in lines if (len(l.strip()) > 0)] return lines
def tokenize(query: str) -> List[Token]: tokens = list([Token(t.ttype, t.value) for t in sqlparse.parse(query)[0].flatten()]) return tokens
def join_tokens(tokens: List[Token]) -> str: return ''.join([x.value for x in tokens]).strip().replace(' ', ' ')
def round_trip_test(query: str) -> None: tokens = tokenize(query) reconstructed = ''.join([token.value for token in tokens]) assert (query == reconstructed), ('Round trip test fails for string %s' % query)
def postprocess(query: str) -> str: query = query.replace('> =', '>=').replace('< =', '<=').replace('! =', '!=') return query
def strip_query(query: str) -> Tuple[(List[str], List[str])]: (query_keywords, all_values) = ([], []) '\n str_1 = re.findall(""[^"]*"", query)\n str_2 = re.findall("\'[^\']*\'", query)\n values = str_1 + str_2\n ' toks = sqlparse.parse(query)[0].flatten() values = [t.value for t in toks if ((t.ttype == sqlparse.tokens.Literal.String.Single) or (t.ttype == sqlparse.tokens.Literal.String.Symbol))] for val in values: all_values.append(val) query = query.replace(val.strip(), VALUE_NUM_SYMBOL) query_tokenized = query.split() float_nums = re.findall('[-+]?\\d*\\.\\d+', query) all_values += [qt for qt in query_tokenized if (qt in float_nums)] query_tokenized = [(VALUE_NUM_SYMBOL if (qt in float_nums) else qt) for qt in query_tokenized] query = ' '.join(query_tokenized) int_nums = [i.strip() for i in re.findall('[^tT]\\d+', query)] all_values += [qt for qt in query_tokenized if (qt in int_nums)] query_tokenized = [(VALUE_NUM_SYMBOL if (qt in int_nums) else qt) for qt in query_tokenized] for tok in query_tokenized: if ('.' in tok): table = re.findall('[Tt]\\d+\\.', tok) if (len(table) > 0): to = tok.replace('.', ' . ').split() to = [t.lower() for t in to if (len(t) > 0)] query_keywords.extend(to) else: query_keywords.append(tok.lower()) elif (len(tok) > 0): query_keywords.append(tok.lower()) return (query_keywords, all_values)
def reformat_query(query: str) -> str: query = query.strip().replace(';', '').replace('\t', '') query = ' '.join([t.value for t in tokenize(query) if (t.ttype != sqlparse.tokens.Whitespace)]) t_stars = ['t1.*', 't2.*', 't3.*', 'T1.*', 'T2.*', 'T3.*'] for ts in t_stars: query = query.replace(ts, '*') return query
def replace_values(sql: str) -> Tuple[(List[str], Set[str])]: sql = sqlparse.format(sql, reindent=False, keyword_case='upper') sql = re.sub('(T\\d+\\.)\\s', '\\1', sql) (query_toks_no_value, values) = strip_query(sql) return (query_toks_no_value, set(values))
def extract_query_values(sql: str) -> Tuple[(List[str], Set[str])]: reformated = reformat_query(query=sql) (query_value_replaced, values) = replace_values(reformated) return (query_value_replaced, values)
def plugin(query_value_replaced: List[str], values_in_order: List[str]) -> str: q_length = len(query_value_replaced) query_w_values = query_value_replaced[:] value_idx = [idx for idx in range(q_length) if (query_value_replaced[idx] == VALUE_NUM_SYMBOL.lower())] assert (len(value_idx) == len(values_in_order)) for (idx, value) in zip(value_idx, values_in_order): query_w_values[idx] = value return ' '.join(query_w_values)
def plugin_all_permutations(query_value_replaced: List[str], values: Set[str]) -> Iterator[str]: num_slots = len([v for v in query_value_replaced if (v == VALUE_NUM_SYMBOL.lower())]) for values in itertools.product(*[list(values) for _ in range(num_slots)]): (yield plugin(query_value_replaced, list(values)))
def get_all_preds_for_execution(gold: str, pred: str) -> Tuple[(int, Iterator[str])]: (_, gold_values) = extract_query_values(gold) (pred_query_value_replaced, _) = extract_query_values(pred) num_slots = len([v for v in pred_query_value_replaced if (v == VALUE_NUM_SYMBOL.lower())]) num_alternatives = (len(gold_values) ** num_slots) return (num_alternatives, plugin_all_permutations(pred_query_value_replaced, gold_values))
def remove_distinct(s): toks = [t.value for t in list(sqlparse.parse(s)[0].flatten())] return ''.join([t for t in toks if (t.lower() != 'distinct')])
def extract_all_comparison_from_node(node: Token) -> List[Comparison]: comparison_list = [] if hasattr(node, 'tokens'): for t in node.tokens: comparison_list.extend(extract_all_comparison_from_node(t)) if (type(node) == Comparison): comparison_list.append(node) return comparison_list
def extract_all_comparison(query: str) -> List[Comparison]: tree = sqlparse.parse(query)[0] comparison_list = extract_all_comparison_from_node(tree) return comparison_list
def extract_toks_from_comparison(comparison_node: Comparison) -> List[Token]: tokens = [t for t in comparison_node.tokens if (t.ttype != Whitespace)] return tokens
def extract_info_from_comparison(comparison_node: Comparison) -> Dict[(str, Any)]: tokens = extract_toks_from_comparison(comparison_node) (left, op, right) = tokens returned_dict = {'left': left, 'op': op.value, 'right': right} if (type(left) != Identifier): return returned_dict table = None if ((len(left.tokens) == 3) and (re.match('^[tT][0-9]$', left.tokens[0].value) is None)): table = left.tokens[0].value.lower() col = left.tokens[(- 1)].value if (type(right) == Identifier): if ((len(right.tokens) == 1) and (type(right.tokens[0]) == sqlparse.sql.Token)): right_val = right.tokens[0].value else: return returned_dict elif (type(right) == sqlparse.sql.Token): right_val = right.value else: return returned_dict (returned_dict['table_col'], returned_dict['val']) = ((rm_placeholder(table), rm_placeholder(col.upper())), rm_placeholder(process_str_value(right_val))) return returned_dict
def extract_all_comparison_from_query(query: str) -> List[Dict[(str, Any)]]: comparison_list = extract_all_comparison(query) return [extract_info_from_comparison(c) for c in comparison_list]
def rm_placeholder(s: Union[(str, None)]) -> Union[(str, None)]: if (s is None): return None return re.sub('placeholderrare', '', s, flags=re.IGNORECASE)
def typed_values_in_tuples(query): groups = in_tuple_pattern.findall(query) typed_values = [] for group in groups: if ('SELECT' in group[1].upper()): continue tab_col = (None, rm_placeholder(group[0].upper())) vals = [x.strip().replace('"', '') for x in group[1].split(',')] for val in vals: typed_values.append((tab_col, val)) return typed_values
def extract_typed_value_in_comparison_from_query(query: str) -> List[Tuple[(Tuple[(Union[(str, None)], str)], str)]]: query = re.sub('\\byear\\b', 'yearplaceholderrare', query, flags=re.IGNORECASE) query = re.sub('\\bnumber\\b', 'numberplaceholderrare', query, flags=re.IGNORECASE) query = re.sub('\\blength\\b', 'lengthplaceholderrare', query, flags=re.IGNORECASE) cmps = extract_all_comparison_from_query(query) typed_values = [(cmp['table_col'], cmp['val']) for cmp in cmps if ('table_col' in cmp)] typed_values.extend(typed_values_in_tuples(query)) for (table, col, val1, val2) in re.findall('(?:([^\\.\\s]*)\\.)?([^\\.\\s]+) (?:not )between ([^\\s;]+) and ([^\\s;]+)', query, re.IGNORECASE): if (table == ''): table = None else: table = table.lower() col = col.upper() for v in [val1, val2]: typed_values.append(((table, col), v)) typed_values = [(((t if ((t is None) or (not re.match('^T\\d+$', t, flags=re.IGNORECASE))) else None), c), v) for ((t, c), v) in typed_values] return typed_values
def process_str_value(v: str) -> str: if ((len(v) > 0) and (v[0] in QUOTE_CHARS)): v = v[1:] if ((len(v) > 0) and (v[(- 1)] in QUOTE_CHARS)): v = v[:(- 1)] for c in QUOTE_CHARS: v = v.replace((c + c), c) return v
def parse_type_in_schema(schema_type: str): if ('(' in schema_type): paren_idx = schema_type.index('(') body = schema_type[:paren_idx].strip() close_paren_idx = schema_type.index(')') parameter_str = schema_type[(paren_idx + 1):close_paren_idx] arguments = [int(x) for x in parameter_str.split(',')] else: body = schema_type.strip().lower() arguments = [] return (body, arguments)
class Schema(): '\n Simple schema which maps table&column to a unique identifier\n ' def __init__(self, schema): self._schema = schema self._idMap = self._map(self._schema) @property def schema(self): return self._schema @property def idMap(self): return self._idMap def _map(self, schema): idMap = {'*': '__all__'} id = 1 for (key, vals) in schema.items(): for val in vals: idMap[((key.lower() + '.') + val.lower())] = (((('__' + key.lower()) + '.') + val.lower()) + '__') id += 1 for key in schema: idMap[key.lower()] = (('__' + key.lower()) + '__') id += 1 return idMap
def get_schema(db): "\n Get database's schema, which is a dict with table name as key\n and list of column names as value\n :param db: database path\n :return: schema dict\n " schema = {} conn = sqlite3.connect(db) cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = [str(table[0].lower()) for table in cursor.fetchall()] for table in tables: cursor.execute('PRAGMA table_info({})'.format(table)) schema[table] = [str(col[1].lower()) for col in cursor.fetchall()] return schema
def get_schema_from_json(fpath): with open(fpath) as f: data = json.load(f) schema = {} for entry in data: table = str(entry['table'].lower()) cols = [str(col['column_name'].lower()) for col in entry['col_data']] schema[table] = cols return schema
def tokenize(string): string = str(string) string = string.replace("'", '"') quote_idxs = [idx for (idx, char) in enumerate(string) if (char == '"')] assert ((len(quote_idxs) % 2) == 0), 'Unexpected quote' vals = {} for i in range((len(quote_idxs) - 1), (- 1), (- 2)): qidx1 = quote_idxs[(i - 1)] qidx2 = quote_idxs[i] val = string[qidx1:(qidx2 + 1)] key = '__val_{}_{}__'.format(qidx1, qidx2) string = ((string[:qidx1] + key) + string[(qidx2 + 1):]) vals[key] = val toks = [word.lower() for word in word_tokenize(string)] for i in range(len(toks)): if (toks[i] in vals): toks[i] = vals[toks[i]] eq_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == '=')] eq_idxs.reverse() prefix = ('!', '>', '<') for eq_idx in eq_idxs: pre_tok = toks[(eq_idx - 1)] if (pre_tok in prefix): toks = ((toks[:(eq_idx - 1)] + [(pre_tok + '=')]) + toks[(eq_idx + 1):]) return toks
def scan_alias(toks): "Scan the index of 'as' and build the map for all alias" as_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == 'as')] alias = {} for idx in as_idxs: alias[toks[(idx + 1)]] = toks[(idx - 1)] return alias
def get_tables_with_alias(schema, toks): tables = scan_alias(toks) for key in schema: assert (key not in tables), 'Alias {} has the same name in table'.format(key) tables[key] = key return tables
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None): '\n :returns next idx, column id\n ' tok = toks[start_idx] if (tok == '*'): return ((start_idx + 1), schema.idMap[tok]) if ('.' in tok): (alias, col) = tok.split('.') key = ((tables_with_alias[alias] + '.') + col) return ((start_idx + 1), schema.idMap[key]) assert ((default_tables is not None) and (len(default_tables) > 0)), 'Default tables should not be None or empty' for alias in default_tables: table = tables_with_alias[alias] if (tok in schema.schema[table]): key = ((table + '.') + tok) return ((start_idx + 1), schema.idMap[key]) assert False, 'Error col: {}'.format(tok)
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None): '\n :returns next idx, (agg_op id, col_id)\n ' idx = start_idx len_ = len(toks) isBlock = False isDistinct = False if (toks[idx] == '('): isBlock = True idx += 1 if (toks[idx] in AGG_OPS): agg_id = AGG_OPS.index(toks[idx]) idx += 1 assert ((idx < len_) and (toks[idx] == '(')) idx += 1 if (toks[idx] == 'distinct'): idx += 1 isDistinct = True (idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables) assert ((idx < len_) and (toks[idx] == ')')) idx += 1 return (idx, (agg_id, col_id, isDistinct)) if (toks[idx] == 'distinct'): idx += 1 isDistinct = True agg_id = AGG_OPS.index('none') (idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables) if isBlock: assert (toks[idx] == ')') idx += 1 return (idx, (agg_id, col_id, isDistinct))
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) isBlock = False if (toks[idx] == '('): isBlock = True idx += 1 col_unit1 = None col_unit2 = None unit_op = UNIT_OPS.index('none') (idx, col_unit1) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables) if ((idx < len_) and (toks[idx] in UNIT_OPS)): unit_op = UNIT_OPS.index(toks[idx]) idx += 1 (idx, col_unit2) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables) if isBlock: assert (toks[idx] == ')') idx += 1 return (idx, (unit_op, col_unit1, col_unit2))
def parse_table_unit(toks, start_idx, tables_with_alias, schema): '\n :returns next idx, table id, table name\n ' idx = start_idx len_ = len(toks) key = tables_with_alias[toks[idx]] if (((idx + 1) < len_) and (toks[(idx + 1)] == 'as')): idx += 3 else: idx += 1 return (idx, schema.idMap[key], key)
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) isBlock = False if (toks[idx] == '('): isBlock = True idx += 1 if (toks[idx] == 'select'): (idx, val) = parse_sql(toks, idx, tables_with_alias, schema) elif ('"' in toks[idx]): val = toks[idx] idx += 1 else: try: val = float(toks[idx]) idx += 1 except: end_idx = idx while ((end_idx < len_) and (toks[end_idx] != ',') and (toks[end_idx] != ')') and (toks[end_idx] != 'and') and (toks[end_idx] not in CLAUSE_KEYWORDS) and (toks[end_idx] not in JOIN_KEYWORDS)): end_idx += 1 (idx, val) = parse_col_unit(toks[start_idx:end_idx], 0, tables_with_alias, schema, default_tables) idx = end_idx if isBlock: assert (toks[idx] == ')') idx += 1 return (idx, val)
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) conds = [] while (idx < len_): (idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables) not_op = False if (toks[idx] == 'not'): not_op = True idx += 1 assert ((idx < len_) and (toks[idx] in WHERE_OPS)), 'Error condition: idx: {}, tok: {}'.format(idx, toks[idx]) op_id = WHERE_OPS.index(toks[idx]) idx += 1 val1 = val2 = None if (op_id == WHERE_OPS.index('between')): (idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables) assert (toks[idx] == 'and') idx += 1 (idx, val2) = parse_value(toks, idx, tables_with_alias, schema, default_tables) else: (idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables) val2 = None conds.append((not_op, op_id, val_unit, val1, val2)) if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')) or (toks[idx] in JOIN_KEYWORDS))): break if ((idx < len_) and (toks[idx] in COND_OPS)): conds.append(toks[idx]) idx += 1 return (idx, conds)
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) assert (toks[idx] == 'select'), "'select' not found" idx += 1 isDistinct = False if ((idx < len_) and (toks[idx] == 'distinct')): idx += 1 isDistinct = True val_units = [] while ((idx < len_) and (toks[idx] not in CLAUSE_KEYWORDS)): agg_id = AGG_OPS.index('none') if (toks[idx] in AGG_OPS): agg_id = AGG_OPS.index(toks[idx]) idx += 1 (idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables) val_units.append((agg_id, val_unit)) if ((idx < len_) and (toks[idx] == ',')): idx += 1 return (idx, (isDistinct, val_units))
def parse_from(toks, start_idx, tables_with_alias, schema): '\n Assume in the from clause, all table units are combined with join\n ' assert ('from' in toks[start_idx:]), "'from' not found" len_ = len(toks) idx = (toks.index('from', start_idx) + 1) default_tables = [] table_units = [] conds = [] while (idx < len_): isBlock = False if (toks[idx] == '('): isBlock = True idx += 1 if (toks[idx] == 'select'): (idx, sql) = parse_sql(toks, idx, tables_with_alias, schema) table_units.append((TABLE_TYPE['sql'], sql)) else: if ((idx < len_) and (toks[idx] == 'join')): idx += 1 (idx, table_unit, table_name) = parse_table_unit(toks, idx, tables_with_alias, schema) table_units.append((TABLE_TYPE['table_unit'], table_unit)) default_tables.append(table_name) if ((idx < len_) and (toks[idx] == 'on')): idx += 1 (idx, this_conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables) if (len(conds) > 0): conds.append('and') conds.extend(this_conds) if isBlock: assert (toks[idx] == ')') idx += 1 if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')))): break return (idx, table_units, conds, default_tables)
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) if ((idx >= len_) or (toks[idx] != 'where')): return (idx, []) idx += 1 (idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables) return (idx, conds)
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) col_units = [] if ((idx >= len_) or (toks[idx] != 'group')): return (idx, col_units) idx += 1 assert (toks[idx] == 'by') idx += 1 while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))): (idx, col_unit) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables) col_units.append(col_unit) if ((idx < len_) and (toks[idx] == ',')): idx += 1 else: break return (idx, col_units)
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) val_units = [] order_type = 'asc' if ((idx >= len_) or (toks[idx] != 'order')): return (idx, val_units) idx += 1 assert (toks[idx] == 'by') idx += 1 while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))): (idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables) val_units.append(val_unit) if ((idx < len_) and (toks[idx] in ORDER_OPS)): order_type = toks[idx] idx += 1 if ((idx < len_) and (toks[idx] == ',')): idx += 1 else: break return (idx, (order_type, val_units))
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) if ((idx >= len_) or (toks[idx] != 'having')): return (idx, []) idx += 1 (idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables) return (idx, conds)
def parse_limit(toks, start_idx): idx = start_idx len_ = len(toks) if ((idx < len_) and (toks[idx] == 'limit')): idx += 2 return (idx, int(toks[(idx - 1)])) return (idx, None)
def parse_sql(toks, start_idx, tables_with_alias, schema): isBlock = False len_ = len(toks) idx = start_idx sql = {} if (toks[idx] == '('): isBlock = True idx += 1 (from_end_idx, table_units, conds, default_tables) = parse_from(toks, start_idx, tables_with_alias, schema) sql['from'] = {'table_units': table_units, 'conds': conds} (_, select_col_units) = parse_select(toks, idx, tables_with_alias, schema, default_tables) idx = from_end_idx sql['select'] = select_col_units (idx, where_conds) = parse_where(toks, idx, tables_with_alias, schema, default_tables) sql['where'] = where_conds (idx, group_col_units) = parse_group_by(toks, idx, tables_with_alias, schema, default_tables) sql['groupBy'] = group_col_units (idx, having_conds) = parse_having(toks, idx, tables_with_alias, schema, default_tables) sql['having'] = having_conds (idx, order_col_units) = parse_order_by(toks, idx, tables_with_alias, schema, default_tables) sql['orderBy'] = order_col_units (idx, limit_val) = parse_limit(toks, idx) sql['limit'] = limit_val idx = skip_semicolon(toks, idx) if isBlock: assert (toks[idx] == ')') idx += 1 idx = skip_semicolon(toks, idx) for op in SQL_OPS: sql[op] = None if ((idx < len_) and (toks[idx] in SQL_OPS)): sql_op = toks[idx] idx += 1 (idx, IUE_sql) = parse_sql(toks, idx, tables_with_alias, schema) sql[sql_op] = IUE_sql return (idx, sql)
def load_data(fpath): with open(fpath) as f: data = json.load(f) return data
def get_sql(schema, query): toks = tokenize(query) tables_with_alias = get_tables_with_alias(schema.schema, toks) (_, sql) = parse_sql(toks, 0, tables_with_alias, schema) return sql
def skip_semicolon(toks, start_idx): idx = start_idx while ((idx < len(toks)) and (toks[idx] == ';')): idx += 1 return idx
def get_cursor_path(sqlite_path: str): try: if (not os.path.exists(sqlite_path)): print(('Openning a new connection %s' % sqlite_path)) connection = sqlite3.connect(sqlite_path) except Exception as e: print(sqlite_path) raise e connection.text_factory = (lambda b: b.decode(errors='ignore')) cursor = connection.cursor() return cursor
def can_execute_path(sqlite_path: str, q: str) -> bool: (flag, result) = exec_db_path_(sqlite_path, q) return (flag == 'result')
def clean_tmp_f(f_prefix: str): with threadLock: for suffix in ('.in', '.out'): f_path = (f_prefix + suffix) if os.path.exists(f_path): os.unlink(f_path)
def exec_db_path(sqlite_path: str, query: str, process_id: str='', timeout: int=TIMEOUT) -> Tuple[(str, Any)]: f_prefix = None with threadLock: while ((f_prefix is None) or os.path.exists((f_prefix + '.in'))): process_id += str(time.time()) process_id += str(random.randint(0, 10000000000)) f_prefix = os.path.join(EXEC_TMP_DIR, process_id) pkl.dump((sqlite_path, query), open((f_prefix + '.in'), 'wb')) try: subprocess.call(['python3', 'sql_util/exec_subprocess.py', f_prefix], timeout=timeout, stderr=open('runerr.log', 'a')) except Exception as e: clean_tmp_f(f_prefix) return ('exception', e) result_path = (f_prefix + '.out') returned_val = ('exception', TimeoutError) try: if os.path.exists(result_path): returned_val = pkl.load(open(result_path, 'rb')) except: pass clean_tmp_f(f_prefix) return returned_val