Datasets:

Languages:
English
ArXiv:
License:
dp-bench / evaluation_code /sql_comparison.py
faisalchow's picture
Evaluation codes
116cc62 verified
from sqlglot.optimizer.qualify import qualify
from sqlglot.optimizer.normalize import normalize
from sqlglot.optimizer import optimize
from sqlglot import exp
from sqlglot import parse_one, diff
from sqlglot.errors import ParseError, OptimizeError
import sqlite3
import threading
from ortools.linear_solver import pywraplp
from sqlglot.diff import Insert, Remove, Update, Move, Keep
from sqlglot.expressions import Alias, TableAlias
from utils import SQLITE_DB_DIR
def get_bird_db(db_name):
return (SQLITE_DB_DIR / db_name / f"{db_name}.sqlite").resolve()
class QueryThread(threading.Thread):
def __init__(self, query, db_str):
threading.Thread.__init__(self)
self.db_str = db_str
self.query = query
self.results = 'Failed to run query'
self.daemon = True # this is maybe needed to ensure that still-alive-threads end when main ends
def run(self):
conn = sqlite3.connect(self.db_str)
cursor = conn.cursor()
try:
cursor.execute(self.query)
self.results = cursor.fetchall()
except (sqlite3.OperationalError, sqlite3.ProgrammingError) as e:
self.results = "Failed to run query"
conn.close()
def execute_sql(query_str, db_str):
TIMEOUT_SECONDS = 45.0
try:
db_location = get_bird_db(db_str)
# a bit of a hack - using a thread to run queries so that i can set a timeout.
thread = QueryThread(query_str, db_location)
thread.start()
thread.join(TIMEOUT_SECONDS)
if not thread.is_alive():
generated_result = set([tuple(str(c) for c in r) for r in thread.results])
else:
generated_result = "Failed to run query"
except (sqlite3.OperationalError, sqlite3.ProgrammingError) as e:
generated_result = "Failed to run query"
return generated_result
def normalize_commutative_ordering(expression):
normalize_types = {exp.Add, exp.SafeMultiply, exp.Select}
for node in expression.walk(bfs=True):
if type(node) in normalize_types:
operands = [node.this, node.args.get('expression')]
# this sorting isn't so proper/meaningful, we just want _some_ order to be present to make comparisons easier.
try:
operands.sort(key=lambda x: x.sql())
node.set(arg_key='this', value=operands[0])
node.set(arg_key='expression', value=operands[1])
except Exception as e:
# Handle cases where sql() conversion might fail or not be appropriate
# print(f"Could not sort operands for node {node}: {e}")
pass
# continue
# # print(expression)
# # print(node)
# # normalize_commutative_ordering(node)
return expression
def remove_table_aliases(sql):
# remove aliases from tables, assign any columns referring to table aliases with direct references to tables
tab_aliases = {}
for table_exp in sql.find_all(exp.Table):
if table_exp.alias:
tab_aliases[table_exp.alias] = table_exp
for node in sql.walk():
if isinstance(node, exp.Table) and node.alias:
node.set("alias", None)
elif isinstance(node, exp.Column) and node.table:
if node.table in tab_aliases:
node.args['table'] = exp.Table(this=tab_aliases[node.table], quoted=node.args['table'].quoted)
# re-order any content where the order shouldn't matterg
return sql
def cleanup_query(query, schema):
query = query.replace("`", "\"")
try:
parse = parse_one(query)
except ParseError as e:
return None
try:
try:
qual = qualify(parse, dialect='sqlite', schema=schema)
except OptimizeError as e:
qual = qualify(parse, dialect='sqlite')
except OptimizeError as e: # OptimizeError,
return None
opt_norm = optimize(normalize(qual, dnf=True))
cleaner_sql = remove_table_aliases(opt_norm)
cleaner_sql = normalize_commutative_ordering(cleaner_sql)
return cleaner_sql
def get_nonalias_diff(q1, q2, schema):
try:
clean_q1 = cleanup_query(q1, schema)
clean_q2 = cleanup_query(q2, schema)
except Exception as e:
# print(e)
return None, None
if clean_q1 is None or clean_q2 is None:
return None, None
totalcount = 0
diffcount = 0
for d in diff(clean_q1, clean_q2):
totalcount += 1
if isinstance(d, Keep):
continue
elif isinstance(d, Remove) or isinstance(d, Insert):
if not (isinstance(d.expression, Alias) or isinstance(d.expression,
TableAlias)): # or isinstance(d.expression, ColumnAlias)):
diffcount += 1
elif isinstance(d, Update) or isinstance(d, Move):
# there is some edge case where we get an update but the source and target are equivalent? if so just ignore.
if d.source == d.target:
continue
if not (isinstance(d.source, Alias) or isinstance(d.source,
TableAlias)): # or isinstance(d.source, ColumnAlias)):
diffcount += 1
elif not (isinstance(d.target, Alias) or isinstance(d.target,
TableAlias)): # or isinstance(d.target, ColumnAlias)):
diffcount += 1
return diffcount, totalcount
def sql_diff_sim(q1, q2, schema):
diff, tot = get_nonalias_diff(q1, q2, schema)
if diff is None or tot is None:
return None
return 1 - (diff / tot)
def solve_optimal_mapping(score_matrix, debug_solver: bool = False):
# score matrix should be GT x PRED
solver = pywraplp.Solver.CreateSolver("SCIP")
var_list = []
to_gt_constraints = []
objective_terms = []
p_list = [[] for _ in range(len(score_matrix[0]))]
for i in range(len(score_matrix)):
c_list = []
for j, scoreval in enumerate(score_matrix[i]):
new_var = solver.IntVar(0, 1, "")
var_list.append((new_var, i, j))
c_list.append(new_var)
p_list[j].append(new_var)
objective_terms.append(new_var * scoreval)
solver.Add(sum(c_list) <= 1)
for pl in p_list:
solver.Add(sum(pl) <= 1)
solver.Maximize(sum(objective_terms))
status = solver.Solve()
if debug_solver:
if status == pywraplp.Solver.INFEASIBLE:
print("INFEASIBLE")
elif status == pywraplp.Solver.FEASIBLE:
print("FEASIBLE")
soln_gt_to_pred = {}
for var, gt_ind, pred_ind in var_list:
if var.solution_value() == 1:
soln_gt_to_pred[gt_ind] = pred_ind
soln_total_score = solver.Objective().Value()
return soln_gt_to_pred