Datasets:

Languages:
English
ArXiv:
License:
File size: 6,928 Bytes
116cc62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
from sqlglot.optimizer.qualify import qualify
from sqlglot.optimizer.normalize import normalize
from sqlglot.optimizer import optimize
from sqlglot import exp
from sqlglot import parse_one, diff
from sqlglot.errors import ParseError, OptimizeError
import sqlite3
import threading
from ortools.linear_solver import pywraplp
from sqlglot.diff import Insert, Remove, Update, Move, Keep
from sqlglot.expressions import Alias, TableAlias
from utils import SQLITE_DB_DIR


def get_bird_db(db_name):
    return (SQLITE_DB_DIR / db_name / f"{db_name}.sqlite").resolve()

class QueryThread(threading.Thread):
    def __init__(self, query, db_str):
        threading.Thread.__init__(self)
        self.db_str = db_str
        self.query = query
        self.results = 'Failed to run query'
        self.daemon = True # this is maybe needed to ensure that still-alive-threads end when main ends

    def run(self):
        conn = sqlite3.connect(self.db_str)
        cursor = conn.cursor()
        try:
            cursor.execute(self.query)
            self.results = cursor.fetchall()
        except (sqlite3.OperationalError, sqlite3.ProgrammingError) as e:
            self.results = "Failed to run query"
        conn.close()

def execute_sql(query_str, db_str):
    TIMEOUT_SECONDS = 45.0
    try:
        db_location = get_bird_db(db_str)
        # a bit of a hack - using a thread to run queries so that i can set a timeout.
        thread = QueryThread(query_str, db_location)
        thread.start()
        thread.join(TIMEOUT_SECONDS)
        if not thread.is_alive():
            generated_result = set([tuple(str(c) for c in r) for r in thread.results])
        else:
            generated_result = "Failed to run query"
    except (sqlite3.OperationalError, sqlite3.ProgrammingError) as e:
        generated_result = "Failed to run query"
    return generated_result


def normalize_commutative_ordering(expression):
    normalize_types = {exp.Add, exp.SafeMultiply, exp.Select}

    for node in expression.walk(bfs=True):
        if type(node) in normalize_types:

            operands = [node.this, node.args.get('expression')]
            # this sorting isn't so proper/meaningful, we just want _some_ order to be present to make comparisons easier.
            try:
                operands.sort(key=lambda x: x.sql())
                node.set(arg_key='this', value=operands[0])
                node.set(arg_key='expression', value=operands[1])
            except Exception as e:
                # Handle cases where sql() conversion might fail or not be appropriate
                # print(f"Could not sort operands for node {node}: {e}")
                pass
        # continue
        # # print(expression)
        # # print(node)
        # # normalize_commutative_ordering(node)
    return expression


def remove_table_aliases(sql):
    # remove aliases from tables, assign any columns referring to table aliases with direct references to tables
    tab_aliases = {}
    for table_exp in sql.find_all(exp.Table):
        if table_exp.alias:
            tab_aliases[table_exp.alias] = table_exp
    for node in sql.walk():
        if isinstance(node, exp.Table) and node.alias:
            node.set("alias", None)
        elif isinstance(node, exp.Column) and node.table:

            if node.table in tab_aliases:
                node.args['table'] = exp.Table(this=tab_aliases[node.table], quoted=node.args['table'].quoted)

    # re-order any content where the order shouldn't matterg

    return sql


def cleanup_query(query, schema):
    query = query.replace("`", "\"")
    try:
        parse = parse_one(query)
    except ParseError as e:
        return None
    try:
        try:
            qual = qualify(parse, dialect='sqlite', schema=schema)
        except OptimizeError as e:
            qual = qualify(parse, dialect='sqlite')
    except OptimizeError as e:  # OptimizeError,
        return None
    opt_norm = optimize(normalize(qual, dnf=True))

    cleaner_sql = remove_table_aliases(opt_norm)
    cleaner_sql = normalize_commutative_ordering(cleaner_sql)
    return cleaner_sql


def get_nonalias_diff(q1, q2, schema):
    try:
        clean_q1 = cleanup_query(q1, schema)
        clean_q2 = cleanup_query(q2, schema)
    except Exception as e:
        # print(e)
        return None, None
    if clean_q1 is None or clean_q2 is None:
        return None, None
    totalcount = 0
    diffcount = 0
    for d in diff(clean_q1, clean_q2):
        totalcount += 1
        if isinstance(d, Keep):
            continue
        elif isinstance(d, Remove) or isinstance(d, Insert):
            if not (isinstance(d.expression, Alias) or isinstance(d.expression,
                                                                  TableAlias)):  # or isinstance(d.expression, ColumnAlias)):
                diffcount += 1
        elif isinstance(d, Update) or isinstance(d, Move):
            # there is some edge case where we get an update but the source and target are equivalent? if so just ignore.
            if d.source == d.target:
                continue
            if not (isinstance(d.source, Alias) or isinstance(d.source,
                                                              TableAlias)):  # or isinstance(d.source, ColumnAlias)):
                diffcount += 1
            elif not (isinstance(d.target, Alias) or isinstance(d.target,
                                                                TableAlias)):  # or isinstance(d.target, ColumnAlias)):
                diffcount += 1
    return diffcount, totalcount


def sql_diff_sim(q1, q2, schema):
    diff, tot = get_nonalias_diff(q1, q2, schema)
    if diff is None or tot is None:
        return None
    return 1 - (diff / tot)

def solve_optimal_mapping(score_matrix, debug_solver: bool = False):
    # score matrix should be GT x PRED
    solver = pywraplp.Solver.CreateSolver("SCIP")

    var_list = []
    to_gt_constraints = []
    objective_terms = []
    p_list = [[] for _ in range(len(score_matrix[0]))]
    for i in range(len(score_matrix)):
        c_list = []
        for j, scoreval in enumerate(score_matrix[i]):
            new_var = solver.IntVar(0, 1, "")
            var_list.append((new_var, i, j))
            c_list.append(new_var)
            p_list[j].append(new_var)
            objective_terms.append(new_var * scoreval)
        solver.Add(sum(c_list) <= 1)
    for pl in p_list:
        solver.Add(sum(pl) <= 1)
    solver.Maximize(sum(objective_terms))

    status = solver.Solve()

    if debug_solver:
        if status == pywraplp.Solver.INFEASIBLE:
            print("INFEASIBLE")
        elif status == pywraplp.Solver.FEASIBLE:
            print("FEASIBLE")

    soln_gt_to_pred = {}
    for var, gt_ind, pred_ind in var_list:
        if var.solution_value() == 1:
            soln_gt_to_pred[gt_ind] = pred_ind
    soln_total_score = solver.Objective().Value()
    return soln_gt_to_pred