SQL_INTRO / app.py
jtdearmon's picture
Update app.py
20c4c8c verified
raw
history blame
50.8 kB
# Adaptive SQL Trainer β€” Domain Randomized with OpenAI (Gradio + SQLite)
# - OpenAI randomizes a domain and questions (fallback dataset if unavailable).
# - 3–4 related tables with seed rows installed in SQLite.
# - Students practice SELECT, WHERE, JOINs (INNER/LEFT), aliases, views, CTAS/SELECT INTO.
# - Validator enforces columns only when the prompt asks; otherwise focuses on rows.
# - ERD shows all FK edges in light gray and dynamically HIGHLIGHTS edges implied by JOINs.
import os
import re
import json
import time
import random
import sqlite3
import threading
from datetime import datetime, timezone
from typing import List, Dict, Any, Tuple, Optional, Set
import gradio as gr
import pandas as pd
# -------------------- OpenAI (optional) --------------------
OPENAI_AVAILABLE = True
DEFAULT_MODEL = os.getenv("OPENAI_MODEL") # optional override
try:
from openai import OpenAI
_client = OpenAI() # requires OPENAI_API_KEY
except Exception:
OPENAI_AVAILABLE = False
_client = None
def _candidate_models():
base = [
(DEFAULT_MODEL or "").strip() or None,
"gpt-4o-mini",
"gpt-4o",
"gpt-4.1-mini",
]
seen = set()
return [m for m in base if m and (m not in seen and not seen.add(m))]
# -------------------- ERD drawing (headless) --------------------
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from io import BytesIO
from PIL import Image
PLOT_FIGSIZE = (7.6, 3.8)
PLOT_DPI = 120
PLOT_HEIGHT = 300
def _fig_to_pil(fig) -> Image.Image:
buf = BytesIO()
fig.tight_layout()
fig.savefig(buf, format="png", dpi=PLOT_DPI, bbox_inches="tight")
plt.close(fig)
buf.seek(0)
return Image.open(buf)
def draw_dynamic_erd(
schema: Dict[str, Any],
highlight_tables: Optional[Set[str]] = None,
highlight_edges: Optional[Set[Tuple[str, str]]] = None,
) -> Image.Image:
"""
Draw tables + FK edges. If highlight_* provided, overlay those tables/edges in bold.
highlight_edges uses (src_table, dst_table) with dst_table = referenced table.
"""
highlight_tables = set(highlight_tables or [])
def _norm_edge(a, b): return tuple(sorted([a, b]))
H = set(_norm_edge(*e) for e in (highlight_edges or set()))
tables = schema.get("tables", [])
fig, ax = plt.subplots(figsize=PLOT_FIGSIZE); ax.axis("off")
if not tables:
ax.text(0.5, 0.5, "No tables to diagram.", ha="center", va="center")
return _fig_to_pil(fig)
n = len(tables)
margin = 0.03
width = (1 - margin * (n + 1)) / max(n, 1)
height = 0.70
y = 0.20
fk_edges = []
for t in tables:
for fk in t.get("fks", []) or []:
dst = fk.get("ref_table")
if dst:
fk_edges.append((t["name"], dst))
boxes: Dict[str, Tuple[float,float,float,float]] = {}
for i, t in enumerate(tables):
tx = margin + i * (width + margin)
boxes[t["name"]] = (tx, y, width, height)
lw = 2.0 if t["name"] in highlight_tables else 1.2
ax.add_patch(Rectangle((tx, y), width, height, fill=False, lw=lw))
ax.text(tx + 0.01, y + height - 0.04, t["name"], fontsize=10, ha="left", va="top", weight="bold")
yy = y + height - 0.09
pkset = set(t.get("pk", []) or [])
fk_map: Dict[str, List[Tuple[str, str]]] = {}
for fk in t.get("fks", []) or []:
ref_tbl = fk.get("ref_table", "")
for c, rc in zip(fk.get("columns", []) or [], fk.get("ref_columns", []) or []):
fk_map.setdefault(c, []).append((ref_tbl, rc))
for col in t.get("columns", []):
nm = col.get("name", "")
tag = ""
if nm in pkset:
tag = " (PK)"
if nm in fk_map:
ref = fk_map[nm][0]
tag = f" (FK→{ref[0]}.{ref[1]})" if not tag else tag.replace(")", f", FK→{ref[0]}.{ref[1]})")
ax.text(tx + 0.016, yy, f"{nm}{tag}", fontsize=9, ha="left", va="top")
yy -= 0.055
for (src, dst) in fk_edges:
if src not in boxes or dst not in boxes:
continue
(x1, y1, w1, h1) = boxes[src]
(x2, y2, w2, h2) = boxes[dst]
ax.annotate("",
xy=(x2 + w2/2.0, y2 + h2),
xytext=(x1 + w1/2.0, y1),
arrowprops=dict(arrowstyle="->", lw=1.0, color="#cccccc"))
for (src, dst) in fk_edges:
if _norm_edge(src, dst) in H:
(x1, y1, w1, h1) = boxes[src]
(x2, y2, w2, h2) = boxes[dst]
ax.annotate("",
xy=(x2 + w2/2.0, y2 + h2),
xytext=(x1 + w1/2.0, y1),
arrowprops=dict(arrowstyle="->", lw=2.6, color="#2b6cb0"))
ax.text(0.5, 0.06, f"Domain: {schema.get('domain','unknown')}", fontsize=9, ha="center")
return _fig_to_pil(fig)
JOIN_TBL_RE = re.compile(r"\b(?:from|join)\s+([a-z_]\w*)(?:\s+(?:as\s+)?([a-z_]\w*))?", re.IGNORECASE)
EQ_ON_RE = re.compile(r"([a-z_]\w*)\.[a-z_]\w*\s*=\s*([a-z_]\w*)\.[a-z_]\w*", re.IGNORECASE)
USING_RE = re.compile(r"\bjoin\s+([a-z_]\w*)(?:\s+(?:as\s+)?([a-z_]\w*))?\s+using\s*\(", re.IGNORECASE)
def sql_highlights(sql: str, schema: Dict[str, Any]) -> Tuple[Set[str], Set[Tuple[str, str]]]:
if not sql:
return set(), set()
low = " ".join(sql.strip().split())
alias_to_table: Dict[str, str] = {}
join_order: List[str] = []
for m in JOIN_TBL_RE.finditer(low):
table = m.group(1)
alias = m.group(2) or table
alias_to_table[alias] = table
join_order.append(alias)
edges: Set[Tuple[str, str]] = set()
for a1, a2 in EQ_ON_RE.findall(low):
t1 = alias_to_table.get(a1, a1)
t2 = alias_to_table.get(a2, a2)
if t1 != t2:
edges.add((t1, t2))
if USING_RE.search(low) and len(join_order) >= 2:
for i in range(1, len(join_order)):
t_left = alias_to_table.get(join_order[i-1], join_order[i-1])
t_right = alias_to_table.get(join_order[i], join_order[i])
if t_left != t_right:
edges.add((t_left, t_right))
used_tables = {alias_to_table.get(a, a) for a in join_order}
schema_tables = {t["name"] for t in schema.get("tables", [])}
edges = { (a, b) for (a, b) in edges if a in schema_tables and b in schema_tables }
used_tables = { t for t in used_tables if t in schema_tables }
return used_tables, edges
# -------------------- SQLite + locking --------------------
DB_DIR = "/data" if os.path.exists("/data") else "."
DB_PATH = os.path.join(DB_DIR, "sql_trainer_dynamic.db")
EXPORT_DIR = "."
RANDOM_SEED = int(os.getenv("RANDOM_SEED", "7"))
random.seed(RANDOM_SEED)
SYS_RAND = random.SystemRandom()
DB_LOCK = threading.RLock()
def connect_db():
con = sqlite3.connect(DB_PATH, check_same_thread=False)
con.execute("PRAGMA journal_mode=WAL;")
con.execute("PRAGMA synchronous=NORMAL;")
con.execute("PRAGMA foreign_keys = ON;")
return con
CONN = connect_db()
def init_progress_tables(con: sqlite3.Connection):
with DB_LOCK:
cur = con.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS users (
user_id TEXT PRIMARY KEY,
name TEXT,
created_at TEXT
)
""")
cur.execute("""
CREATE TABLE IF NOT EXISTS attempts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id TEXT,
question_id TEXT,
category TEXT,
correct INTEGER,
sql_text TEXT,
timestamp TEXT,
time_taken REAL,
difficulty INTEGER,
source TEXT,
notes TEXT
)
""")
cur.execute("""
CREATE TABLE IF NOT EXISTS session_meta (
id INTEGER PRIMARY KEY CHECK (id=1),
domain TEXT,
schema_json TEXT
)
""")
con.commit()
init_progress_tables(CONN)
# -------------------- Fallback dataset & questions --------------------
FALLBACK_SCHEMA = {
"domain": "bookstore",
"tables": [
{
"name": "authors",
"pk": ["author_id"],
"columns": [
{"name":"author_id","type":"INTEGER"},
{"name":"name","type":"TEXT"},
{"name":"country","type":"TEXT"},
{"name":"birth_year","type":"INTEGER"},
],
"fks": [],
"rows": [
{"author_id":1,"name":"Isaac Asimov","country":"USA","birth_year":1920},
{"author_id":2,"name":"Ursula K. Le Guin","country":"USA","birth_year":1929},
{"author_id":3,"name":"Haruki Murakami","country":"Japan","birth_year":1949},
{"author_id":4,"name":"Chinua Achebe","country":"Nigeria","birth_year":1930},
{"author_id":5,"name":"Jane Austen","country":"UK","birth_year":1775},
{"author_id":6,"name":"J.K. Rowling","country":"UK","birth_year":1965},
{"author_id":7,"name":"Yuval Noah Harari","country":"Israel","birth_year":1976},
{"author_id":8,"name":"New Author","country":"Nowhere","birth_year":1990},
],
},
{
"name": "bookstores",
"pk": ["store_id"],
"columns": [
{"name":"store_id","type":"INTEGER"},
{"name":"name","type":"TEXT"},
{"name":"city","type":"TEXT"},
{"name":"state","type":"TEXT"},
],
"fks": [],
"rows": [
{"store_id":1,"name":"Downtown Books","city":"Oklahoma City","state":"OK"},
{"store_id":2,"name":"Harbor Books","city":"Seattle","state":"WA"},
{"store_id":3,"name":"Desert Pages","city":"Phoenix","state":"AZ"},
],
},
{
"name": "books",
"pk": ["book_id"],
"columns": [
{"name":"book_id","type":"INTEGER"},
{"name":"title","type":"TEXT"},
{"name":"author_id","type":"INTEGER"},
{"name":"store_id","type":"INTEGER"},
{"name":"category","type":"TEXT"},
{"name":"price","type":"REAL"},
{"name":"published_year","type":"INTEGER"},
],
"fks": [
{"columns":["author_id"],"ref_table":"authors","ref_columns":["author_id"]},
{"columns":["store_id"],"ref_table":"bookstores","ref_columns":["store_id"]},
],
"rows": [
{"book_id":101,"title":"Foundation","author_id":1,"store_id":1,"category":"Sci-Fi","price":14.99,"published_year":1951},
{"book_id":102,"title":"I, Robot","author_id":1,"store_id":1,"category":"Sci-Fi","price":12.50,"published_year":1950},
{"book_id":103,"title":"The Left Hand of Darkness","author_id":2,"store_id":2,"category":"Sci-Fi","price":16.00,"published_year":1969},
{"book_id":104,"title":"A Wizard of Earthsea","author_id":2,"store_id":2,"category":"Fantasy","price":11.50,"published_year":1968},
{"book_id":105,"title":"Norwegian Wood","author_id":3,"store_id":3,"category":"Fiction","price":18.00,"published_year":1987},
{"book_id":106,"title":"Kafka on the Shore","author_id":3,"store_id":1,"category":"Fiction","price":21.00,"published_year":2002},
{"book_id":107,"title":"Things Fall Apart","author_id":4,"store_id":1,"category":"Fiction","price":10.00,"published_year":1958},
{"book_id":108,"title":"Pride and Prejudice","author_id":5,"store_id":2,"category":"Fiction","price":9.00,"published_year":1813},
{"book_id":109,"title":"Harry Potter and the Sorcerer's Stone","author_id":6,"store_id":3,"category":"Children","price":22.00,"published_year":1997},
{"book_id":110,"title":"Harry Potter and the Chamber of Secrets","author_id":6,"store_id":3,"category":"Children","price":23.00,"published_year":1998},
{"book_id":111,"title":"Sapiens","author_id":7,"store_id":1,"category":"History","price":26.00,"published_year":2011},
{"book_id":112,"title":"Homo Deus","author_id":7,"store_id":2,"category":"History","price":28.00,"published_year":2015},
],
},
]
}
FALLBACK_QUESTIONS = [
{"id":"Q01","category":"SELECT *","difficulty":1,
"prompt_md":"Select all rows and columns from `authors`.",
"answer_sql":["SELECT * FROM authors;"],
"requires_aliases":False,"required_aliases":[]},
{"id":"Q02","category":"SELECT columns","difficulty":1,
"prompt_md":"Show `title` and `price` from `books`.",
"answer_sql":["SELECT title, price FROM books;"],
"requires_aliases":False,"required_aliases":[]},
{"id":"Q03","category":"WHERE","difficulty":1,
"prompt_md":"List Sci‑Fi books under $15 (show title, price).",
"answer_sql":["SELECT title, price FROM books WHERE category='Sci-Fi' AND price < 15;"],
"requires_aliases":False,"required_aliases":[]},
{"id":"Q04","category":"Aliases","difficulty":1,
"prompt_md":"Using aliases `b` and `a`, join `books` to `authors` and show `b.title` and `a.name` as `author_name`.",
"answer_sql":["SELECT b.title, a.name AS author_name FROM books b JOIN authors a ON b.author_id=a.author_id;"],
"requires_aliases":True,"required_aliases":["a","b"]},
{"id":"Q05","category":"JOIN (INNER)","difficulty":2,
"prompt_md":"Inner join `books` and `bookstores`. Return `title`, `name` as `store`.",
"answer_sql":["SELECT b.title, s.name AS store FROM books b INNER JOIN bookstores s ON b.store_id=s.store_id;"],
"requires_aliases":False,"required_aliases":[]},
{"id":"Q06","category":"JOIN (LEFT)","difficulty":2,
"prompt_md":"List each author and their number of books (include authors with zero): columns `name`, `book_count`.",
"answer_sql":["SELECT a.name, COUNT(b.book_id) AS book_count FROM authors a LEFT JOIN books b ON a.author_id=b.author_id GROUP BY a.name;"],
"requires_aliases":False,"required_aliases":[]},
{"id":"Q07","category":"VIEW","difficulty":2,
"prompt_md":"Create a view `vw_pricy` with `title`, `price` for books priced > 25.",
"answer_sql":["CREATE VIEW vw_pricy AS SELECT title, price FROM books WHERE price > 25;"],
"requires_aliases":False,"required_aliases":[]},
{"id":"Q08","category":"CTAS / SELECT INTO","difficulty":2,
"prompt_md":"Create a table `cheap_books` containing books priced < 12. Use CTAS or SELECT INTO.",
"answer_sql":[
"CREATE TABLE cheap_books AS SELECT * FROM books WHERE price < 12;",
"SELECT * INTO cheap_books FROM books WHERE price < 12;"
],
"requires_aliases":False,"required_aliases":[]},
]
# --------------- OpenAI prompts + parsing helpers ---------------
DOMAIN_AND_QUESTIONS_SCHEMA = {"required": ["domain", "tables", "questions"]}
def _domain_prompt(prev_domain: Optional[str]) -> str:
extra = f" Avoid using the previous domain '{prev_domain}' if possible." if prev_domain else ""
return f"""
Return ONLY a valid JSON object (no markdown, no prose).
The JSON must have: domain (string), tables (3–4 table objects), and questions (8–12 question objects).{extra}
Rules:
- One domain chosen from: bookstore, retail sales, wholesaler, sales tax, oil and gas wells, marketing.
- Tables: SQLite-friendly. Use snake_case. Each table has: name, pk (list of column names),
columns (list of {{name,type}}), fks (list of {{columns,ref_table,ref_columns}}), rows (8–15 small seed rows).
- Questions: categories among "SELECT *", "SELECT columns", "WHERE", "Aliases",
"JOIN (INNER)", "JOIN (LEFT)", "Aggregation", "VIEW", "CTAS / SELECT INTO".
Include at least one LEFT JOIN, one VIEW creation, one CTAS or SELECT INTO.
Provide 1–3 'answer_sql' strings per question. Prefer SQLite-compatible SQL. Do NOT use RIGHT/FULL OUTER JOIN.
For 1–2 questions, set requires_aliases=true and list required_aliases.
Example top-level keys:
{{"domain":"retail sales","tables":[...],"questions":[...]}}
"""
def _loose_json_parse(s: str) -> Optional[dict]:
try:
return json.loads(s)
except Exception:
pass
start = s.find("{"); end = s.rfind("}")
if start != -1 and end != -1 and end > start:
try:
return json.loads(s[start:end+1])
except Exception:
return None
return None
_SQL_FENCE = re.compile(r"```sql(.*?)```", re.IGNORECASE | re.DOTALL)
_CODE_FENCE = re.compile(r"```(.*?)```", re.DOTALL)
def _strip_code_fences(txt: str) -> str:
if txt is None: return ""
m = _SQL_FENCE.findall(txt)
if m: return "\n".join([x.strip() for x in m if x.strip()])
m2 = _CODE_FENCE.findall(txt)
if m2: return "\n".join([x.strip() for x in m2 if x.strip()])
return txt.strip()
def _as_list_of_sql(val) -> List[str]:
if val is None: return []
if isinstance(val, str):
s = _strip_code_fences(val)
parts = [p.strip() for p in s.split("\n") if p.strip()]
return parts or ([s] if s else [])
if isinstance(val, list):
out = []
for v in val:
if isinstance(v, str):
s = _strip_code_fences(v)
if s: out.append(s)
return out
return []
def _canon_question(q: Dict[str, Any]) -> Optional[Dict[str, Any]]:
if not isinstance(q, dict): return None
cat = q.get("category") or q.get("type") or q.get("topic")
prompt = q.get("prompt_md") or q.get("prompt") or q.get("question") or q.get("text")
answer_sql = q.get("answer_sql") or q.get("answers") or q.get("solutions") or q.get("sql")
diff = q.get("difficulty") or 1
req_alias = bool(q.get("requires_aliases", False))
req_aliases = q.get("required_aliases") or []
cat = str(cat).strip() if cat is not None else ""
prompt = str(prompt).strip() if prompt is not None else ""
answers = _as_list_of_sql(answer_sql)
if not cat or not prompt or not answers: return None
known = {
"SELECT *","SELECT columns","WHERE","Aliases",
"JOIN (INNER)","JOIN (LEFT)","Aggregation","VIEW","CTAS / SELECT INTO"
}
if cat not in known:
low = cat.lower()
if "select *" in low: cat = "SELECT *"
elif "columns" in low: cat = "SELECT columns"
elif "where" in low or "filter" in low: cat = "WHERE"
elif "alias" in low: cat = "Aliases"
elif "left" in low: cat = "JOIN (LEFT)"
elif "inner" in low or "join" in low: cat = "JOIN (INNER)"
elif "agg" in low or "group" in low: cat = "Aggregation"
elif "view" in low: cat = "VIEW"
elif "into" in low or "ctas" in low: cat = "CTAS / SELECT INTO"
if isinstance(req_aliases, str):
req_aliases = [a.strip() for a in re.split(r"[,\s]+", req_aliases) if a.strip()]
elif not isinstance(req_aliases, list):
req_aliases = []
return {
"id": str(q.get("id") or f"LLM_{int(time.time()*1000)}_{random.randint(100,999)}"),
"category": cat,
"difficulty": int(diff) if str(diff).isdigit() else 1,
"prompt_md": prompt,
"answer_sql": answers,
"requires_aliases": bool(req_alias),
"required_aliases": req_aliases,
}
def _canon_tables(tables: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
out = []
for t in (tables or []):
if not isinstance(t, dict): continue
name = str(t.get("name","")).strip()
if not name: continue
cols = t.get("columns") or []
good_cols = []
for c in cols:
if not isinstance(c, dict): continue
cname = str(c.get("name","")).strip()
ctype = str(c.get("type","TEXT")).strip() or "TEXT"
if cname: good_cols.append({"name": cname, "type": ctype})
if not good_cols: continue
pk = t.get("pk") or []
if isinstance(pk, str): pk = [pk]
fks = t.get("fks") or []
rows = t.get("rows") or []
out.append({
"name": name,
"pk": [str(x) for x in pk],
"columns": good_cols,
"fks": fks if isinstance(fks, list) else [],
"rows": rows if isinstance(rows, list) else [],
})
return out
def llm_generate_domain_and_questions(prev_domain: Optional[str]):
if not OPENAI_AVAILABLE or not os.getenv("OPENAI_API_KEY"):
return None, "OpenAI client not available or OPENAI_API_KEY missing.", None, {"accepted_questions":0,"dropped_questions":0}
errors = []
prompt = _domain_prompt(prev_domain)
for model in _candidate_models():
try:
try:
chat = _client.chat.completions.create(
model=model,
messages=[{"role":"user","content": prompt}],
temperature=0.6,
response_format={"type":"json_object"}
)
data_text = chat.choices[0].message.content
except TypeError:
chat = _client.chat.completions.create(
model=model,
messages=[{"role":"system","content":"Return ONLY a JSON object. No markdown."},
{"role":"user","content": prompt}],
temperature=0.6
)
data_text = chat.choices[0].message.content
obj_raw = _loose_json_parse(data_text or "")
if not obj_raw:
raise RuntimeError("Could not parse JSON from model output.")
for k in DOMAIN_AND_QUESTIONS_SCHEMA["required"]:
if k not in obj_raw:
raise RuntimeError(f"Missing key '{k}'")
tables = _canon_tables(obj_raw.get("tables", []))
if not tables: raise RuntimeError("No usable tables in LLM output.")
obj_raw["tables"] = tables
dropped = 0
clean_qs = []
for q in obj_raw.get("questions", []):
cq = _canon_question(q)
if not cq: dropped += 1; continue
answers = [a for a in cq["answer_sql"] if " right join " not in a.lower() and " full " not in a.lower()]
if not answers: dropped += 1; continue
cq["answer_sql"] = answers
clean_qs.append(cq)
if not clean_qs:
raise RuntimeError("No usable questions after canonicalization.")
stats = {"accepted_questions": len(clean_qs), "dropped_questions": dropped}
obj_raw["questions"] = clean_qs
return obj_raw, None, model, stats
except Exception as e:
errors.append(f"{model}: {e}")
continue
return None, "; ".join(errors) if errors else "Unknown LLM error.", None, {"accepted_questions":0,"dropped_questions":0}
# -------------------- Install schema & prepare questions --------------------
def drop_existing_domain_tables(con: sqlite3.Connection, keep_internal=True):
with DB_LOCK:
cur = con.cursor()
cur.execute("SELECT name, type FROM sqlite_master WHERE type IN ('table','view')")
items = cur.fetchall()
for name, typ in items:
if keep_internal and name in ("users","attempts","session_meta"):
continue
try:
cur.execute(f"DROP {typ.upper()} IF EXISTS {name}")
except Exception:
pass
con.commit()
def install_schema(con: sqlite3.Connection, schema: Dict[str,Any]):
drop_existing_domain_tables(con, keep_internal=True)
with DB_LOCK:
cur = con.cursor()
for t in schema.get("tables", []):
cols_sql = []
pk = t.get("pk", [])
for c in t.get("columns", []):
cols_sql.append(f"{c['name']} {c.get('type','TEXT')}")
if pk: cols_sql.append(f"PRIMARY KEY ({', '.join(pk)})")
create_sql = f"CREATE TABLE {t['name']} ({', '.join(cols_sql)})"
cur.execute(create_sql)
for t in schema.get("tables", []):
if not t.get("rows"): continue
cols = [c["name"] for c in t.get("columns", [])]
qmarks = ",".join(["?"]*len(cols))
insert_sql = f"INSERT INTO {t['name']} ({', '.join(cols)}) VALUES ({qmarks})"
for r in t["rows"]:
if isinstance(r, dict): vals = [r.get(col, None) for col in cols]
elif isinstance(r, (list, tuple)):
vals = list(r) + [None]*(len(cols)-len(r)); vals = vals[:len(cols)]
else: continue
cur.execute(insert_sql, vals)
con.commit()
cur.execute("INSERT OR REPLACE INTO session_meta(id, domain, schema_json) VALUES (1, ?, ?)",
(schema.get("domain","unknown"), json.dumps(schema)))
con.commit()
def bootstrap_domain_with_llm_or_fallback(prev_domain: Optional[str]):
obj, err, model_used, stats = llm_generate_domain_and_questions(prev_domain)
if obj is None:
return FALLBACK_SCHEMA, FALLBACK_QUESTIONS, {"source":"fallback","model":None,"error":err,"accepted":0,"dropped":0}
return obj, obj["questions"], {"source":"openai","model":model_used,"error":None,"accepted":stats["accepted_questions"],"dropped":stats["dropped_questions"]}
def install_schema_and_prepare_questions(prev_domain: Optional[str]):
schema, questions, info = bootstrap_domain_with_llm_or_fallback(prev_domain)
install_schema(CONN, schema)
if not questions:
questions = FALLBACK_QUESTIONS
return schema, questions, info
# -------------------- Session globals --------------------
CURRENT_SCHEMA, CURRENT_QS, CURRENT_INFO = install_schema_and_prepare_questions(prev_domain=None)
# -------------------- Progress + mastery --------------------
def upsert_user(con: sqlite3.Connection, user_id: str, name: str):
with DB_LOCK:
cur = con.cursor()
cur.execute("SELECT user_id FROM users WHERE user_id = ?", (user_id,))
if cur.fetchone() is None:
cur.execute("INSERT INTO users (user_id, name, created_at) VALUES (?, ?, ?)",
(user_id, name, datetime.now(timezone.utc).isoformat()))
else:
cur.execute("UPDATE users SET name=? WHERE user_id=?", (name, user_id))
con.commit()
CATEGORIES_ORDER = [
"SELECT *", "SELECT columns", "WHERE", "Aliases",
"JOIN (INNER)", "JOIN (LEFT)", "Aggregation", "VIEW", "CTAS / SELECT INTO"
]
def topic_stats(df_attempts: pd.DataFrame) -> pd.DataFrame:
rows = []
for cat in CATEGORIES_ORDER:
sub = df_attempts[df_attempts["category"] == cat] if not df_attempts.empty else pd.DataFrame()
att = int(sub.shape[0]) if not sub.empty else 0
cor = int(sub["correct"].sum()) if not sub.empty else 0
acc = float(cor / max(att, 1))
rows.append({"category":cat,"attempts":att,"correct":cor,"accuracy":acc})
return pd.DataFrame(rows)
def fetch_attempts(con: sqlite3.Connection, user_id: str) -> pd.DataFrame:
with DB_LOCK:
return pd.read_sql_query("SELECT * FROM attempts WHERE user_id=? ORDER BY id DESC", con, params=(user_id,))
def pick_next_question(user_id: str) -> Dict[str,Any]:
pool = CURRENT_QS if CURRENT_QS else FALLBACK_QUESTIONS
df = fetch_attempts(CONN, user_id)
stats = topic_stats(df)
stats = stats.sort_values(by=["accuracy","attempts"], ascending=[True, True]) if not stats.empty else stats
weakest = stats.iloc[0]["category"] if not stats.empty else CATEGORIES_ORDER[0]
cands = [q for q in pool if str(q.get("category","")).strip() == weakest] or pool
return dict(random.choice(cands))
# -------------------- SQL execution & grading --------------------
def run_df(con: sqlite3.Connection, sql: str) -> pd.DataFrame:
with DB_LOCK:
return pd.read_sql_query(sql, con)
def rewrite_select_into(sql: str) -> Tuple[str, Optional[str]]:
s = sql.strip().strip(";")
if re.search(r"\bselect\b.+\binto\b.+\bfrom\b", s, flags=re.IGNORECASE|re.DOTALL):
m = re.match(r"(?is)^\s*select\s+(.*?)\s+into\s+([A-Za-z_][A-Za-z0-9_]*)\s+from\s+(.*)$", s)
if m:
cols, tbl, rest = m.groups()
return f"CREATE TABLE {tbl} AS SELECT {cols} FROM {rest}", tbl
return sql, None
def detect_unsupported_joins(sql: str) -> Optional[str]:
low = sql.lower()
if " right join " in low:
return "SQLite does not support RIGHT JOIN. Use LEFT JOIN in the opposite direction."
if " full join " in low or " full outer join " in low:
return "SQLite does not support FULL OUTER JOIN. Use LEFT JOIN plus UNION."
if " ilike " in low:
return "SQLite has no ILIKE. Use LOWER(col) LIKE LOWER('%pattern%')."
return None
def detect_cartesian(con: sqlite3.Connection, sql: str, df_result: pd.DataFrame) -> Optional[str]:
low = sql.lower()
if " cross join " in low: return "Query uses CROSS JOIN (cartesian product). Ensure this is intended."
comma_from = re.search(r"\bfrom\b\s+([a-z_]\w*)\s*,\s*([a-z_]\w*)", low)
missing_on = (" join " in low) and (" on " not in low) and (" using " not in low) and (" natural " not in low)
if comma_from or missing_on:
try:
with DB_LOCK:
cur = con.cursor()
if comma_from: t1, t2 = comma_from.groups()
else:
m = re.search(r"\bfrom\b\s+([a-z_]\w*)", low); j = re.search(r"\bjoin\b\s+([a-z_]\w*)", low)
if not m or not j: return "Possible cartesian product: no join condition detected."
t1, t2 = m.group(1), j.group(1)
cur.execute(f"SELECT COUNT(*) FROM {t1}"); n1 = cur.fetchone()[0]
cur.execute(f"SELECT COUNT(*) FROM {t2}"); n2 = cur.fetchone()[0]
prod = n1 * n2
if len(df_result) == prod and prod > 0:
return f"Result row count equals {n1}Γ—{n2}={prod}. Likely cartesian product (missing join)."
except Exception:
return "Possible cartesian product: no join condition detected."
return None
def should_enforce_columns(q: Dict[str, Any]) -> bool:
cat = (q.get("category") or "").strip()
if cat in ("SELECT columns", "Aggregation", "VIEW", "CTAS / SELECT INTO"):
return True
prompt = (q.get("prompt_md") or "").lower()
if re.search(r"`[^`]+`", q.get("prompt_md") or ""):
return True
if re.search(r"\((?:show|return|display)[^)]+\)", prompt):
return True
if re.search(r"\b(show|return|display|select)\b[^.]{0,100}\b(columns?|fields?|name|title|price)\b", prompt):
return True
return False
def _normalize_columns(df: pd.DataFrame) -> pd.DataFrame:
out = df.copy()
out.columns = [str(c).strip().lower() for c in out.columns]
return out
def results_equal_or_superset(df_student: pd.DataFrame, df_expected: pd.DataFrame) -> Tuple[bool, Optional[str]]:
a = _normalize_columns(df_student); b = _normalize_columns(df_expected)
if set(a.columns) == set(b.columns):
a2 = a[sorted(a.columns)].sort_values(sorted(a.columns)).reset_index(drop=True)
b2 = b[sorted(a.columns)].sort_values(sorted(a.columns)).reset_index(drop=True)
return (a2.equals(b2), None)
if set(b.columns).issubset(set(a.columns)):
a_proj = a[b.columns]
a2 = a_proj.sort_values(list(b.columns)).reset_index(drop=True)
b2 = b.sort_values(list(b.columns)).reset_index(drop=True)
if a2.equals(b2):
return True, "extra_columns"
return False, None
def results_equal_rowcount_only(df_student: pd.DataFrame, df_expected: pd.DataFrame) -> bool:
return df_student.shape[0] == df_expected.shape[0]
def aliases_present(sql: str, required_aliases: List[str]) -> bool:
low = re.sub(r"\s+", " ", (sql or "").lower())
for al in (required_aliases or []):
if f" {al}." not in low and f" as {al} " not in low:
return False
return True
def exec_student_sql(sql_text: str):
if not sql_text or not sql_text.strip():
return None, "Enter a SQL statement.", None, None
sql_raw = sql_text.strip().rstrip(";")
sql_rew, created_tbl = rewrite_select_into(sql_raw)
note = "Rewrote `SELECT ... INTO` to `CREATE TABLE ... AS SELECT ...` for SQLite." if sql_rew != sql_raw else None
unsup = detect_unsupported_joins(sql_rew)
if unsup: return None, unsup, None, note
try:
low = sql_rew.lower()
if low.startswith("select"):
df = run_df(CONN, sql_rew)
warn = detect_cartesian(CONN, sql_rew, df)
return df, None, warn, note
else:
with DB_LOCK:
cur = CONN.cursor()
cur.execute(sql_rew); CONN.commit()
if low.startswith("create view"):
m = re.match(r"(?is)^\s*create\s+view\s+(if\s+not\s+exists\s+)?([a-z_]\w*)\s+as\s+(select.*)$", low)
name = m.group(2) if m else None
if name:
try: return pd.read_sql_query(f"SELECT * FROM {name}", CONN), None, None, note
except Exception: return None, "View created but could not be queried.", None, note
if low.startswith("create table"):
tbl = created_tbl
if not tbl:
m = re.match(r"(?is)^\s*create\s+table\s+(if\s+not\s+exists\s+)?([a-z_]\w*)\s+as\s+select.*$", low)
tbl = m.group(2) if m else None
if tbl:
try: return pd.read_sql_query(f"SELECT * FROM {tbl}", CONN), None, None, note
except Exception: return None, "Table created but could not be queried.", None, note
return pd.DataFrame(), None, None, note
except Exception as e:
msg = str(e)
if "no such table" in msg.lower(): return None, f"{msg}. Check table names for this randomized domain.", None, note
if "no such column" in msg.lower(): return None, f"{msg}. Use correct column names or prefixes (alias.column).", None, note
if "ambiguous column name" in msg.lower(): return None, f"{msg}. Qualify the column with a table alias.", None, note
if "misuse of aggregate" in msg.lower() or "aggregate functions are not allowed in" in msg.lower():
return None, f"{msg}. You might need a GROUP BY for non-aggregated columns.", None, note
if "near \"into\"" in msg.lower() and "syntax error" in msg.lower():
return None, "SQLite doesn’t support `SELECT ... INTO`. I can rewrite it automaticallyβ€”try again.", None, note
if "syntax error" in msg.lower():
return None, f"Syntax error. Check commas, keywords, parentheses. Raw error: {msg}", None, note
return None, f"SQL error: {msg}", None, note
def answer_df(answer_sql: List[str]) -> Optional[pd.DataFrame]:
for sql in answer_sql:
try:
low = sql.strip().lower()
if low.startswith("select"): return run_df(CONN, sql)
if low.startswith("create view"):
m = re.match(r"(?is)^\s*create\s+view\s+(if\s+not\s+exists\s+)?([a-z_]\w*)\s+as\s+select.*$", low)
view_name = m.group(2) if m else "vw_tmp"
with DB_LOCK:
cur = CONN.cursor()
cur.execute(f"DROP VIEW IF EXISTS {view_name}")
cur.execute(sql); CONN.commit()
return run_df(CONN, f"SELECT * FROM {view_name}")
if low.startswith("create table"):
m = re.match(r"(?is)^\s*create\s+table\s+(if\s+not\s+exists\s+)?([a-z_]\w*)\s+as\s+select.*$", low)
tbl = m.group(2) if m else None
with DB_LOCK:
cur = CONN.cursor()
if tbl: cur.execute(f"DROP TABLE IF EXISTS {tbl}")
cur.execute(sql); CONN.commit()
if tbl: return run_df(CONN, f"SELECT * FROM {tbl}")
except Exception:
continue
return None
def validate_answer(q: Dict[str,Any], student_sql: str, df_student: Optional[pd.DataFrame]) -> Tuple[bool, str]:
df_expected = answer_df(q["answer_sql"])
if df_expected is None:
return (df_student is not None), f"**Explanation:** Your statement executed successfully for this task."
if df_student is None:
return False, f"**Explanation:** Expected data result differs."
if should_enforce_columns(q):
ok, note = results_equal_or_superset(df_student, df_expected)
if ok and note == "extra_columns":
return True, "**Note:** You returned extra columns. The rows match; try selecting only the requested columns next time."
if ok:
return True, "**Explanation:** Your result matches a canonical solution."
return False, f"**Explanation:** Compare your result to a canonical solution."
else:
ok = results_equal_rowcount_only(df_student, df_expected)
if ok:
return True, "**Explanation:** Columns weren’t specified for this task; row count matches the canonical answer."
return False, "**Explanation:** For this task we compared row counts (projection not enforced) and they didn’t match."
def log_attempt(user_id: str, qid: str, category: str, correct: bool, sql_text: str,
time_taken: float, difficulty: int, source: str, notes: str):
with DB_LOCK:
cur = CONN.cursor()
cur.execute("""
INSERT INTO attempts (user_id, question_id, category, correct, sql_text, timestamp, time_taken, difficulty, source, notes)
VALUES (?,?,?,?,?,?,?,?,?,?)
""", (user_id, qid, category, int(correct), sql_text, datetime.now(timezone.utc).isoformat(),
time_taken, difficulty, source, notes))
CONN.commit()
# -------------------- UI callbacks --------------------
def start_session(name: str, session: dict):
name = (name or "").strip()
if not name:
return (session,
gr.update(value="Please enter your name to begin.", visible=True),
gr.update(visible=False),
gr.update(visible=False),
draw_dynamic_erd(CURRENT_SCHEMA),
gr.update(visible=False),
pd.DataFrame(),
pd.DataFrame())
slug = "-".join(name.lower().split())
user_id = slug[:64] if slug else f"user-{int(time.time())}"
upsert_user(CONN, user_id, name)
q = pick_next_question(user_id)
session = {"user_id": user_id, "name": name, "qid": q["id"], "start_ts": time.time(), "q": q}
prompt = q["prompt_md"]
stats = topic_stats(fetch_attempts(CONN, user_id))
erd = draw_dynamic_erd(CURRENT_SCHEMA)
return (session,
gr.update(value=f"**Question {q['id']}**\n\n{prompt}", visible=True),
gr.update(visible=True),
gr.update(value="", visible=True),
erd,
gr.update(visible=False),
stats,
pd.DataFrame())
def render_preview(sql_text: str, session: dict):
if not session or "q" not in session:
return gr.update(value="", visible=False), draw_dynamic_erd(CURRENT_SCHEMA)
s = (sql_text or "").strip()
if not s:
return gr.update(value="", visible=False), draw_dynamic_erd(CURRENT_SCHEMA)
hi_tables, hi_edges = sql_highlights(s, CURRENT_SCHEMA)
erd = draw_dynamic_erd(CURRENT_SCHEMA, highlight_tables=hi_tables, highlight_edges=hi_edges)
return gr.update(value=f"**Preview:**\n\n```sql\n{s}\n```", visible=True), erd
def submit_answer(sql_text: str, session: dict):
if not session or "user_id" not in session or "q" not in session:
return gr.update(value="Start a session first.", visible=True), pd.DataFrame(), gr.update(visible=False), pd.DataFrame()
user_id = session["user_id"]
q = session["q"]
elapsed = max(0.0, time.time() - session.get("start_ts", time.time()))
df, err, warn, note = exec_student_sql(sql_text)
details = []
if note: details.append(f"ℹ️ {note}")
if err:
fb = f"❌ **Did not run**\n\n{err}"
if details: fb += "\n\n" + "\n".join(details)
log_attempt(user_id, q.get("id","?"), q.get("category","?"), False, sql_text, elapsed, int(q.get("difficulty",1)), "bank", " | ".join([err] + details))
stats = topic_stats(fetch_attempts(CONN, user_id))
return gr.update(value=fb, visible=True), pd.DataFrame(), gr.update(visible=True), stats
alias_msg = None
if q.get("requires_aliases") and not aliases_present(sql_text, q.get("required_aliases", [])):
alias_msg = f"⚠️ This task asked for aliases {q.get('required_aliases', [])}. I didn’t detect them."
is_correct, explanation = validate_answer(q, sql_text, df)
if warn: details.append(f"⚠️ {warn}")
if alias_msg: details.append(alias_msg)
prefix = "βœ… **Correct!**" if is_correct else "❌ **Not quite.**"
feedback = prefix
if details: feedback += "\n\n" + "\n".join(details)
feedback += "\n\n" + explanation + "\n\n**One acceptable solution:**\n```sql\n" + q["answer_sql"][0].rstrip(";") + ";\n```"
log_attempt(user_id, q["id"], q.get("category","?"), bool(is_correct), sql_text, elapsed, int(q.get("difficulty",1)), "bank", " | ".join(details))
stats = topic_stats(fetch_attempts(CONN, user_id))
return gr.update(value=feedback, visible=True), (df if df is not None else pd.DataFrame()), gr.update(visible=True), stats
def next_question(session: dict):
if not session or "user_id" not in session:
return session, gr.update(value="Start a session first.", visible=True), gr.update(visible=False), draw_dynamic_erd(CURRENT_SCHEMA), gr.update(visible=False)
user_id = session["user_id"]
q = pick_next_question(user_id)
session["qid"] = q["id"]; session["q"] = q; session["start_ts"] = time.time()
return session, gr.update(value=f"**Question {q['id']}**\n\n{q['prompt_md']}", visible=True), gr.update(value="", visible=True), draw_dynamic_erd(CURRENT_SCHEMA), gr.update(visible=False)
def show_hint(session: dict):
if not session or "q" not in session:
return gr.update(value="Start a session first.", visible=True)
cat = session["q"].get("category","?")
hint = {
"SELECT *": "Use `SELECT * FROM table_name`.",
"SELECT columns": "List columns: `SELECT col1, col2 FROM table_name`.",
"WHERE": "Filter with `WHERE` and combine conditions using AND/OR.",
"Aliases": "Use `table_name t` and qualify as `t.col`.",
"JOIN (INNER)": "Join with `... INNER JOIN ... ON left.key = right.key`.",
"JOIN (LEFT)": "LEFT JOIN keeps all rows from the left table.",
"Aggregation": "Use aggregates and `GROUP BY` non-aggregated columns.",
"VIEW": "`CREATE VIEW view_name AS SELECT ...`.",
"CTAS / SELECT INTO": "SQLite uses `CREATE TABLE name AS SELECT ...`."
}.get(cat, "Identify keys from the schema and join on them.")
return gr.update(value=f"**Hint:** {hint}", visible=True)
def _domain_status_md():
if CURRENT_INFO.get("source","openai"):
accepted = CURRENT_INFO.get("accepted",0); dropped = CURRENT_INFO.get("dropped",0)
return (f"βœ… **Domain via OpenAI** `{CURRENT_INFO.get('model','?')}` β†’ **{CURRENT_SCHEMA.get('domain','?')}**. "
f"Accepted questions: {accepted}, dropped: {dropped}. \n"
f"Tables: {', '.join(t['name'] for t in CURRENT_SCHEMA.get('tables', []))}.")
err = CURRENT_INFO.get("error",""); err_short = (err[:160] + "…") if len(err) > 160 else err
return f"⚠️ **OpenAI randomization unavailable** β†’ using fallback **{CURRENT_SCHEMA.get('domain','?')}**.\n\n> Reason: {err_short}"
def list_tables_for_preview():
df = run_df(CONN, """
SELECT name FROM sqlite_master
WHERE type in ('table','view')
AND name NOT LIKE 'sqlite_%'
AND name NOT IN ('users','attempts','session_meta')
ORDER BY type, name
""")
return df["name"].tolist() if not df.empty else ["(no tables)"]
# >>> FIX: Always reseed a question on randomize (creates a guest session if needed)
def regenerate_domain(session: dict):
global CURRENT_SCHEMA, CURRENT_QS, CURRENT_INFO
prev = CURRENT_SCHEMA.get("domain") if CURRENT_SCHEMA else None
CURRENT_SCHEMA, CURRENT_QS, CURRENT_INFO = install_schema_and_prepare_questions(prev_domain=prev)
erd = draw_dynamic_erd(CURRENT_SCHEMA)
status = _domain_status_md()
# Ensure a session (guest if needed)
if not session or not session.get("user_id"):
user_id = f"guest-{int(time.time())}"
upsert_user(CONN, user_id, "Guest")
session = {"user_id": user_id, "name": "Guest", "qid": None, "start_ts": time.time(), "q": None}
# Seed next question for this session
q = pick_next_question(session["user_id"])
session.update({"qid": q["id"], "q": q, "start_ts": time.time()})
# Fresh mastery and cleared result preview
stats = topic_stats(fetch_attempts(CONN, session["user_id"]))
empty_df = pd.DataFrame()
# Refresh dropdown
dd_update = gr.update(choices=list_tables_for_preview(), value=None)
return (
gr.update(value=status, visible=True), # regen_fb
erd, # er_image
gr.update(value=f"**Question {q['id']}**\n\n{q['prompt_md']}", visible=True), # prompt_md
gr.update(value="", visible=True), # sql_input
dd_update, # tbl_dd
stats, # mastery_df
empty_df, # result_df
session # session_state
)
def preview_table(tbl: str):
try:
if not tbl or tbl=="(no tables)":
return pd.DataFrame()
return run_df(CONN, f"SELECT * FROM {tbl} LIMIT 20")
except Exception as e:
return pd.DataFrame([{"error": str(e)}])
# -------------------- UI --------------------
with gr.Blocks(title="Adaptive SQL Trainer β€” Randomized Domains") as demo:
gr.Markdown(
"""
# πŸ§ͺ Adaptive SQL Trainer β€” Randomized Domains (SQLite)
- Uses **OpenAI** (if configured) to randomize a domain (bookstore, retail sales, wholesaler,
sales tax, oil & gas wells, marketing), generate **3–4 tables** and **8–12** questions.
- Practice `SELECT`, `WHERE`, `JOIN` (INNER/LEFT), **aliases**, **views**, and **CTAS / SELECT INTO**.
- **ERD highlights your JOINs** as you type; all FK edges remain visible in light gray.
"""
)
with gr.Row():
with gr.Column(scale=1):
name_box = gr.Textbox(label="Your Name", placeholder="e.g., Jordan Alvarez")
start_btn = gr.Button("Start / Resume Session", variant="primary")
session_state = gr.State({"user_id": None, "name": None, "qid": None, "start_ts": None, "q": None})
gr.Markdown("---")
gr.Markdown("### Dataset Controls")
regen_btn = gr.Button("πŸ”€ Randomize Dataset (OpenAI)")
regen_fb = gr.Markdown(_domain_status_md(), visible=True)
gr.Markdown("---")
gr.Markdown("### Instructor Tools")
export_name = gr.Textbox(label="Export a student's progress (enter name)")
export_btn = gr.Button("Export CSV")
export_file = gr.File(label="Download progress")
gr.Markdown("---")
gr.Markdown("### Quick Table/View Preview (top 20 rows)")
tbl_dd = gr.Dropdown(choices=list_tables_for_preview(), label="Pick table/view", interactive=True)
tbl_btn = gr.Button("Preview")
preview_df = gr.Dataframe(value=pd.DataFrame(), interactive=False)
with gr.Column(scale=2):
prompt_md = gr.Markdown(visible=False)
sql_input = gr.Textbox(label="Your SQL", placeholder="Type SQL here (end ; optional).", lines=6, visible=False)
preview_md = gr.Markdown(visible=False)
er_image = gr.Image(label="Entity Diagram", value=draw_dynamic_erd(CURRENT_SCHEMA), height=PLOT_HEIGHT)
with gr.Row():
submit_btn = gr.Button("Run & Submit", variant="primary")
hint_btn = gr.Button("Hint")
next_btn = gr.Button("Next Question β–Ά", visible=False)
feedback_md = gr.Markdown("")
gr.Markdown("---")
gr.Markdown("### Your Progress by Category")
mastery_df = gr.Dataframe(headers=["category","attempts","correct","accuracy"],
col_count=(4,"dynamic"), row_count=(0,"dynamic"), interactive=False)
gr.Markdown("---")
gr.Markdown("### Result Preview")
result_df = gr.Dataframe(value=pd.DataFrame(), interactive=False)
start_btn.click(
start_session,
inputs=[name_box, session_state],
outputs=[session_state, prompt_md, sql_input, preview_md, er_image, next_btn, mastery_df, result_df],
)
sql_input.change(
render_preview,
inputs=[sql_input, session_state],
outputs=[preview_md, er_image],
)
submit_btn.click(
submit_answer,
inputs=[sql_input, session_state],
outputs=[feedback_md, result_df, next_btn, mastery_df],
)
next_btn.click(
next_question,
inputs=[session_state],
outputs=[session_state, prompt_md, sql_input, er_image, next_btn],
)
hint_btn.click(
show_hint,
inputs=[session_state],
outputs=[feedback_md],
)
export_btn.click(
lambda user: os.path.abspath(os.path.join(EXPORT_DIR, f"{'-'.join((user or '').lower().split())[:64]}_progress.csv")),
inputs=[export_name],
outputs=[export_file],
)
regen_btn.click( # one callback: reseed question, refresh dropdown, clear previews
regenerate_domain,
inputs=[session_state],
outputs=[regen_fb, er_image, prompt_md, sql_input, tbl_dd, mastery_df, result_df, session_state],
)
tbl_btn.click(
preview_table,
inputs=[tbl_dd],
outputs=[preview_df]
)
if __name__ == "__main__":
demo.launch()