Spaces:
Sleeping
Sleeping
File size: 34,919 Bytes
8627a2a e353047 8627a2a 4dd3e68 8627a2a e353047 8627a2a e353047 ab1ed64 8627a2a 07b95a6 ab1ed64 8627a2a 07b95a6 8627a2a 26624b5 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a 4dd3e68 07b95a6 26624b5 8627a2a 07b95a6 ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a ab1ed64 8627a2a e353047 8627a2a 07b95a6 8627a2a 07b95a6 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff c31fde2 8627a2a c31fde2 8627a2a c31fde2 8627a2a c31fde2 8627a2a 5a1e22a 8627a2a c31fde2 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a c31fde2 8627a2a c31fde2 0c94bff 8627a2a 4dd3e68 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff ab1ed64 0c94bff 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a c31fde2 8627a2a c31fde2 8627a2a 0c94bff 8627a2a 0c94bff 8627a2a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 | # -*- coding: utf-8 -*-
"""genai-raggysequel-final.ipynb
"""
import os
# Commented out IPython magic to ensure Python compatibility.
# %pip -q uninstall -y cudf-cu12 dask-cudf-cu12 ibis-framework
# Commented out IPython magic to ensure Python compatibility.
# Embeddings provider: "openai" (recommended) or "local"
EMBEDDINGS_PROVIDER = "openai"
EMBEDDING_MODEL_NAME = "text-embedding-3-large" # if provider == openai
LOCAL_EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2" # if provider == local
# LLM provider/model for SQL generation (OpenAI shown here)
LLM_PROVIDER = "openai"
LLM_MODEL_NAME = "gpt-4o-mini"
# Database settings
DB_DIALECT = "sqlite" # "sqlite", "postgresql", "mysql", "mssql"
SQLITE_PATH = "salesdatabig.db"
POSTGRES_URL = "postgresql+psycopg2://user:password@host:5432/dbname"
MYSQL_URL = "mysql+pymysql://user:password@host:3306/dbname"
MSSQL_URL = "mssql+pyodbc://user:password@dsn"
# Safety knobs
MAX_RETURN_ROWS = 200 # hard cap on returned rows
TOP_K = 10 # retrieved schema chunks for context
from typing import List, Tuple
import pandas as pd
from tabulate import tabulate
import sqlalchemy as sa
from sqlalchemy import text, inspect
import sqlglot
from sqlglot import parse_one
from sqlglot.errors import ParseError
import chromadb
from chromadb.utils import embedding_functions
import uuid, re
# Commented out IPython magic to ensure Python compatibility.
import random
import sqlalchemy as sa
from sqlalchemy import text
from collections import defaultdict
from datetime import datetime, timedelta
import faker
CREATE_DEMO_DATA = False # set to True only when you WANT to (re)create data
if CREATE_DEMO_DATA:
# Safety: keep this generator for SQLite demo
if DB_DIALECT != "sqlite":
raise RuntimeError("This generator is intended for SQLite. Set DB_DIALECT='sqlite' first.")
engine = sa.create_engine(f"sqlite:///{SQLITE_PATH}")
# --- Parameters you can tweak ---
NUM_CUSTOMERS = 100 # number of customers
ORDERS_PER_CUSTOMER = (1, 10) # min/max orders per customer (inclusive)
ITEMS_PER_ORDER = (1, 8) # min/max line items per order
PRICE_RANGE = (5.0, 150.0) # unit price range
QTY_RANGE = (1, 5) # quantity per line
WINDOW_DAYS = 248 # place orders within the last 30 days
US_STATES = [
"AL","AK","AZ","AR","CA","CO","CT","DE","FL","GA",
"HI","ID","IL","IN","IA","KS","KY","LA","ME","MD",
"MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ",
"NM","NY","NC","ND","OH","OK","OR","PA","RI","SC",
"SD","TN","TX","UT","VT","VA","WA","WV","WI","WY"
]
# Reproducibility (optional)
random.seed(1337)
fake = faker.Faker()
fake.seed_instance(1337)
# --- Recreate schema (adds state, age, order_ts) ---
with engine.begin() as conn:
conn.exec_driver_sql("DROP TABLE IF EXISTS order_items")
conn.exec_driver_sql("DROP TABLE IF EXISTS orders")
conn.exec_driver_sql("DROP TABLE IF EXISTS customers")
conn.exec_driver_sql("""
CREATE TABLE customers (
id INTEGER PRIMARY KEY,
name TEXT,
email TEXT,
joined_at TEXT,
state TEXT, -- 2-letter US state
age INTEGER -- randomized age
)
""")
conn.exec_driver_sql("""
CREATE TABLE orders (
id INTEGER PRIMARY KEY,
customer_id INTEGER,
order_ts TEXT, -- ISO8601 timestamp
total_amount REAL, -- computed from order_items
FOREIGN KEY(customer_id) REFERENCES customers(id)
)
""")
conn.exec_driver_sql("""
CREATE TABLE order_items (
id INTEGER PRIMARY KEY,
order_id INTEGER,
sku TEXT,
quantity INTEGER,
price REAL,
FOREIGN KEY(order_id) REFERENCES orders(id)
)
""")
# --- Generate Customers ----
customers = []
for cid in range(1, NUM_CUSTOMERS + 1):
name = fake.name()
email = fake.unique.email()
joined = fake.date_between(start_date="-2y", end_date="today").isoformat()
state = random.choice(US_STATES)
age = random.randint(18, 85)
customers.append((cid, name, email, joined, state, age))
# --- Generate Orders within last 30 days (random count per customer) ---
end = datetime.now()
start = end - timedelta(days=WINDOW_DAYS)
def random_ts_in_window():
# Pick a random datetime within [start, end]
delta_seconds = int((end - start).total_seconds())
s = random.randint(0, delta_seconds)
ts = start + timedelta(seconds=s)
return ts.replace(second=0, microsecond=0).isoformat(timespec="minutes") # clean minutes precision
orders = []
order_id = 1
orders_by_customer = defaultdict(list)
for cust_id in range(1, NUM_CUSTOMERS + 1):
n_orders = random.randint(*ORDERS_PER_CUSTOMER)
for _ in range(n_orders):
ts = random_ts_in_window()
total_placeholder = 0.0
orders.append((order_id, cust_id, ts, total_placeholder))
orders_by_customer[cust_id].append(order_id)
order_id += 1
# --- Generate Order Items and compute totals ---
def gen_sku():
return f"SKU-{random.choice(['RED','BLU','GRN','YEL','BLK','WHT'])}-{random.randint(100,999)}"
items = []
item_id = 1
order_totals = defaultdict(float)
for oid, cust_id, ts, _ in orders:
n_items = random.randint(*ITEMS_PER_ORDER)
for _ in range(n_items):
sku = gen_sku()
qty = random.randint(*QTY_RANGE)
price = round(random.uniform(*PRICE_RANGE), 2)
line_total = qty * price
order_totals[oid] += line_total
items.append((item_id, oid, sku, qty, price))
item_id += 1
# Replace order totals with computed values (rounded to 2 decimals)
orders_final = []
for (oid, cust_id, ts, _old) in orders:
total_amount = round(order_totals[oid], 2)
orders_final.append((oid, cust_id, ts, total_amount))
# --- Insert data (looping is fine at this scale) ---
with engine.begin() as conn:
for row in customers:
conn.exec_driver_sql(
"INSERT INTO customers (id, name, email, joined_at, state, age) VALUES (?, ?, ?, ?, ?, ?)", row
)
for row in orders_final:
conn.exec_driver_sql(
"INSERT INTO orders (id, customer_id, order_ts, total_amount) VALUES (?, ?, ?, ?)", row
)
for row in items:
conn.exec_driver_sql(
"INSERT INTO order_items (id, order_id, sku, quantity, price) VALUES (?, ?, ?, ?, ?)", row
)
# --- Sanity checks ---
with engine.connect() as conn:
c_cnt = conn.execute(text("SELECT COUNT(*) FROM customers")).scalar()
o_cnt = conn.execute(text("SELECT COUNT(*) FROM orders")).scalar()
i_cnt = conn.execute(text("SELECT COUNT(*) FROM order_items")).scalar()
ts_min, ts_max = conn.execute(text("SELECT MIN(order_ts), MAX(order_ts) FROM orders")).one()
# Example: top 5 days by order count in the last 30 days
top_days = conn.execute(text("""
SELECT substr(order_ts, 1, 10) AS day, COUNT(*) AS orders_on_day
FROM orders
GROUP BY day
ORDER BY orders_on_day DESC
LIMIT 5
""")).fetchall()
print(
"β
Demo DB generated\n"
f" - Customers: {c_cnt}\n"
f" - Orders: {o_cnt}\n"
f" - Items: {i_cnt}\n"
f" - Order window: {ts_min} β {ts_max}\n"
f" - Top days by order count: {top_days}"
)
TARGET = "salesdatabig.db"
# run a one-off autocommit connection
with engine.execution_options(isolation_level="AUTOCOMMIT").connect() as conn:
conn.exec_driver_sql(f"VACUUM INTO '{TARGET}';")
else:
print("Skipping data generation; using existing /content/salesdata.db")
def make_engine():
if DB_DIALECT == "sqlite":
return sa.create_engine(f"sqlite:///{SQLITE_PATH}")
if DB_DIALECT == "postgresql":
return sa.create_engine(POSTGRES_URL)
if DB_DIALECT == "mysql":
return sa.create_engine(MYSQL_URL)
if DB_DIALECT == "mssql":
return sa.create_engine(MSSQL_URL)
raise ValueError("Unsupported DB_DIALECT")
engine = make_engine()
print("β
Engine ready")
from sqlalchemy import inspect, text
import pandas as pd
import json
inspector = inspect(engine)
tables = inspector.get_table_names()
# Load JSON data dictionary
with open("data_dictionary.updated.json", "r") as f:
DATA_DICTIONARY = json.load(f)
def _mk_schema_block(table: str) -> str:
cols = inspector.get_columns(table)
col_lines = [f"- {c['name']} {str(c.get('type'))}" for c in cols]
return f"Table: {table}\nColumns:\n" + "\n".join(col_lines)
def _sample_rows_markdown(table: str, n: int = 3) -> str:
try:
with engine.connect() as conn:
df = pd.read_sql(text(f"SELECT * FROM {table} LIMIT {n}"), conn)
return df.to_markdown(index=False)
except Exception as e:
return f"<no sample rows: {type(e).__name__}>"
def _mk_dictionary_block(table: str) -> str:
dd = DATA_DICTIONARY.get(table, {})
lines = [f"Data Dictionary for {table}:"]
for col, meta in dd.items():
desc = meta.get("description", "")
syns = ", ".join(meta.get("synonyms", []))
lines.append(f"- {col}: {desc} Synonyms: [{syns}]")
return "\n".join(lines) if len(lines) > 1 else f"Data Dictionary for {table}: <none>"
def build_rag_docs(sample_rows_per_table: int = 3):
docs = []
for t in tables:
schema_text = _mk_schema_block(t)
samples_md = _sample_rows_markdown(t, sample_rows_per_table)
dict_text = _mk_dictionary_block(t)
# One βrichβ document per table: schema + data dictionary + samples
table_doc_text = (
schema_text
+ "\n\n"
+ dict_text
+ "\n\nSample rows:\n"
+ samples_md
)
docs.append({
"id": f"{t}::table_doc",
"text": table_doc_text,
"metadata": {"table": t, "kind": "table_doc"}
})
# Optional: per-column mini-docs (improves recall for synonyms)
for col in inspector.get_columns(t):
name = col["name"]
dtype = str(col.get("type"))
dd = DATA_DICTIONARY.get(t, {}).get(name, {})
desc = dd.get("description", "")
syns = ", ".join(dd.get("synonyms", []))
col_text = (
f"Table: {t}\nColumn: {name}\nType: {dtype}\n"
f"Description: {desc}\nSynonyms: [{syns}]"
)
docs.append({
"id": f"{t}.{name}::column_doc",
"text": col_text,
"metadata": {"table": t, "column": name, "kind": "column_doc"}
})
print(f'this is the final:',name,"::column_doc",col_text,"table", t, "column", name, "kind", "column_doc")
return docs
schema_docs = build_rag_docs(sample_rows_per_table=3)
print(f"β
Prepared {len(schema_docs)} RAG docs (tables + columns + dictionary)")
''' GENERATE CHROMA------------------------------'''
client = chromadb.Client()
collection_name = f"schema-index-{uuid.uuid4().hex[:8]}"
if EMBEDDINGS_PROVIDER == "openai":
from openai import OpenAI
openai_client = OpenAI()
if (os.environ.get("OPENAI_API_KEY") is None or os.environ.get("OPENAI_API_KEY")==""):
print(f'*** Please specify the key!')
embedding_fn = embedding_functions.OpenAIEmbeddingFunction(
api_key=os.environ.get("OPENAI_API_KEY"),
model_name=EMBEDDING_MODEL_NAME,
)
else:
embedding_fn = embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=LOCAL_EMBEDDING_MODEL
)
col = client.create_collection(name=collection_name, embedding_function=embedding_fn)
col.add(
ids=[d["id"] for d in schema_docs],
documents=[d["text"] for d in schema_docs],
metadatas=[d["metadata"] for d in schema_docs],
)
print(f'β
Chroma collection "{collection_name}" ready')
'''xxxxxxx--------------- EXPORT CHROMA ------------------'''
'''
import chromadb
# pick a folder in colab's filesystem (or in google drive if you want to keep it)
persist_dir = "/content/chroma_db"
client = chromadb.PersistentClient(path=persist_dir)
collection_name = "schema-index-2613d628"
# if the collection already exists, open it
try:
col = client.get_collection(collection_name)
print("β
Loaded existing collection:", collection_name)
except:
# else create & populate it
col = client.create_collection(name=collection_name, embedding_function=embedding_fn)
col.add(
ids=[d["id"] for d in schema_docs],
documents=[d["text"] for d in schema_docs],
metadatas=[d["metadata"] for d in schema_docs],
)
print("β
Created new collection:", collection_name)
print("Persistence folder:", persist_dir)
'''
'''xxxxxxxx---------------- IMPORT CHROMA--------------'''
'''
client = chromadb.PersistentClient(path="/content/chroma_db")
col = client.get_collection("schema-index-892e45f2")
'''
"""Define schema retriever ( nearest K )"""
'''------- load schema context from chroma into memory ----'''
def retrieve_schema_context(question: str, top_k: int = TOP_K) -> Tuple[str, list]:
res = col.query(query_texts=[question], n_results=top_k)
docs = res.get("documents", [[]])[0]
ids = res.get("ids", [[]])[0]
context = "\n\n".join(docs)
return context, ids
ctx, ids = retrieve_schema_context("total revenue by customer in 2025")
print(ctx[:1000], "...")
SYSTEM_PROMPT = (
"You are a senior data engineer that translates English to SQL for {dialect}. "
"Return ONLY a runnable SQL SELECT (or WITH ... SELECT) statement β "
"no explanations, no Markdown, no backticks, no code fences."
"Never modify data. Prefer explicit JOINs. Add a LIMIT {limit} unless an aggregate is requested."
"When users mention relative times (\"this year\", \"this quarter\", \"this month\"), always use dynamic SQLite date functions, e.g.:"
"- this year β date('now','start of year')"
"- this month β date('now','start of month')"
"- this quarter β date('now','start of 'start of year','+3 months','-1 day') ... "
"- last 12 months β date('now','-12 months')"
"Do NOT hardcode literal years like 2023 unless the user explicitly names a year."
)
def clean_sql_output(s: str) -> str:
"""Remove markdown fences/backticks/lang tags and return plain SQL."""
if s is None:
return ""
s = s.strip().replace("\r\n", "\n")
# If there are fenced code blocks, extract their contents (supports ``` or ~~~)
blocks = re.findall(r"```(?:[a-zA-Z0-9_+.-]+)?\s*([\s\S]*?)```", s)
tildes = re.findall(r"~~~(?:[a-zA-Z0-9_+.-]+)?\s*([\s\S]*?)~~~", s)
if blocks or tildes:
parts = [b.strip() for b in (blocks + tildes) if b.strip()]
s = "\n".join(parts) if parts else s
# Remove a leading language tag that may remain at the very top (e.g., 'sql\n')
s = re.sub(r"^(?:sql|sqlite|postgres|postgresql|mysql|mssql)\s*\n", "", s, flags=re.IGNORECASE)
# Nuke any remaining backticks just in case
s = s.replace("```", "")
s = s.replace("`", "")
# Also strip quotation-like Unicode artifacts if they sneak in
s = s.replace("β", "'").replace("β", '"').replace("β", '"')
return s.strip()
from sqlglot import parse_one
from sqlglot.errors import ParseError
READ_ONLY_WHITELIST = {"SELECT", "WITH"}
def is_safe_select(sql: str) -> bool:
sql = clean_sql_output(sql) # << always clean first
try:
ast = parse_one(sql, read="sqlite" if DB_DIALECT == "sqlite" else DB_DIALECT)
except ParseError:
return False
return ast.key.upper() in READ_ONLY_WHITELIST
def enforce_limit(sql: str, limit: int = MAX_RETURN_ROWS) -> str:
sql = clean_sql_output(sql) # << always clean first
try:
ast = parse_one(sql, read="sqlite" if DB_DIALECT == "sqlite" else DB_DIALECT)
if not re.search(r"\bLIMIT\b", sql, re.IGNORECASE):
sql = sql.rstrip(";") + f" LIMIT {limit};"
except ParseError:
# If parse fails, still append a LIMIT as a fallback
if not re.search(r"\bLIMIT\b", sql, re.IGNORECASE):
sql = sql.rstrip(";") + f" LIMIT {limit};"
return sql
# SQL generation prompt + function
USER_PROMPT_TEMPLATE = (
"Question: {question}\n"
"Dialect: {dialect}\n\n"
"Context:\n{context}\n\n"
"Return ONLY the SQL query."
)
from openai import OpenAI
def call_llm_for_sql(question: str, context: str, dialect: str, limit: int) -> str:
messages = [
{"role": "system", "content": SYSTEM_PROMPT.format(dialect=dialect, limit=limit)},
{"role": "user", "content": USER_PROMPT_TEMPLATE.format(question=question, context=context, dialect=dialect)},
]
client = OpenAI()
resp = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=messages,
temperature=0.0,
)
return resp.choices[0].message.content.strip()
''' DEFINE FINAL SQL-------------------------'''
# TEST QUESTION
question = "Total revenue in new york in last week"
context, hit_ids = retrieve_schema_context(question, top_k=TOP_K)
sql_raw = call_llm_for_sql(question, context, DB_DIALECT, MAX_RETURN_ROWS)
print("\n--- Raw SQL ---\n", sql_raw)
if not is_safe_select(sql_raw):
raise ValueError("Generated SQL failed safety/parse checks. Try rephrasing your question.")
sql_final = enforce_limit(sql_raw, MAX_RETURN_ROWS)
print("\n--- Final SQL ---\n", sql_final)
with engine.connect() as conn:
df = pd.read_sql(text(sql_final), conn)
if df.empty:
print("\n\nSorry, no results")
else:
df # Colab will pretty-print
def explain_sql(sql: str) -> str:
prefix = {
"sqlite": "EXPLAIN QUERY PLAN ",
"postgresql": "EXPLAIN ",
"mysql": "EXPLAIN ",
"mssql": "EXPLAIN ", # may vary by driver/version
}.get(DB_DIALECT, "EXPLAIN ")
with engine.connect() as conn:
try:
plan_df = pd.read_sql(text(prefix + sql), conn)
return plan_df.to_markdown(index=False)
except Exception as e:
return f"<explain failed: {type(e).__name__}: {e}>"
def optimize_with_llm(sql: str, plan_md: str, question: str, context: str) -> str:
if LLM_PROVIDER != "openai":
return sql
from openai import OpenAI
client = OpenAI()
system = "You are a SQL performance expert. Return only improved SQL or the original if optimal."
user = f"Question: {question}\nDialect: {DB_DIALECT}\nPlan:\n{plan_md}\n\nSQL:\n{sql}\n\nIf indexes or hints are required, add comments, but keep the query read-only."
resp = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=[{"role":"system","content":system},{"role":"user","content":user}],
temperature=0.0,
)
return resp.choices[0].message.content.strip()
def explain_the_sql(sql: str, question: str) -> str:
if LLM_PROVIDER != "openai":
return sql
from openai import OpenAI
client = OpenAI()
system = "You are a SQL Teacher for Business users extraordinaire. Return in very simple english, in the context of the original question, an english paragraph explaining how this SQL statement returns the result we are looking for and why its optimal."
user = f"Question: {question}\nDialect: {DB_DIALECT}\n\nSQL:\n{sql}\n\nAssume the user is not a SQL or database expert but is business savvy"
resp = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=[{"role":"system","content":system},{"role":"user","content":user}],
temperature=0.0,
)
return resp.choices[0].message.content.strip()
plan = explain_sql(sql_final)
print("\n--- Plan ---\n", plan)
maybe_better = optimize_with_llm(sql_final, plan, question, context)
if maybe_better and maybe_better.strip() != sql_final.strip():
print("\n--- Suggested optimized SQL ---\n", maybe_better)
if is_safe_select(maybe_better):
sql_opt = enforce_limit(maybe_better, MAX_RETURN_ROWS)
with engine.connect() as conn:
df2 = pd.read_sql(text(sql_opt), conn)
print("\n(Executed optimized SQL)")
df2
else:
print("\nSuggestion failed safety checks; keeping original.")
else:
print("\nNo changes suggested or identical SQL.")
def ask(question: str, run_explain: bool = False):
context, _ = retrieve_schema_context(question, top_k=TOP_K)
sql_raw = call_llm_for_sql(question, context, DB_DIALECT, MAX_RETURN_ROWS)
if not is_safe_select(sql_raw):
raise ValueError("Generated SQL failed safety/parse checks.")
sql_final = enforce_limit(sql_raw, MAX_RETURN_ROWS)
with engine.connect() as conn:
df = pd.read_sql(text(sql_final), conn)
print("\nSQL:\n", sql_final)
#display(df)
if run_explain:
plan = explain_sql(sql_final)
print("\nPlan:\n", plan)
# Try it:
# ask("Top 5 customers by total spent")
''' test prompt'''
ask("average revenue per month for this year")
"""
1. assume same monthly average sales then what will be the revenue for this whole year
"""
import json
from graphviz import Digraph
from sqlalchemy import text
def _is_postgres():
# SQLAlchemy engine created earlier
return engine.dialect.name in ("postgresql", "postgres")
def _explain_sqlite(sql: str):
"""
Returns list of rows with columns: id, parent, detail
from: EXPLAIN QUERY PLAN <sql>
"""
import pandas as pd
with engine.connect() as conn:
df = pd.read_sql(text("EXPLAIN QUERY PLAN " + sql), conn)
# modern SQLite returns columns: id, parent, notused, detail
cols = [c.lower() for c in df.columns]
def col(name, fallback):
try:
return df[name]
except KeyError:
# tolerate variations
idx = cols.index(name) if name in cols else None
return df.iloc[:, idx] if idx is not None else fallback
out = []
for i in range(len(df)):
rid = int(col("id", df.iloc[i:i+1,0])[i])
parent = int(col("parent", df.iloc[i:i+1,0])[i]) if "parent" in cols else -1
detail = str(col("detail", df.iloc[i:i+1,-1])[i])
out.append({"id": rid, "parent": parent, "detail": detail})
return out
def _graph_sqlite_plan(rows):
"""
Build a Graphviz Digraph from SQLite plan rows (id/parent/detail).
"""
g = Digraph("sqlite_plan", graph_attr={"rankdir": "TB"}, node_attr={"shape": "box", "fontsize":"10"})
seen = set()
for r in rows:
nid = f"n{r['id']}"
label = f"{r['id']}: {r['detail']}"
if nid not in seen:
g.node(nid, label=label)
seen.add(nid)
for r in rows:
if r["parent"] != -1:
g.edge(f"n{r['parent']}", f"n{r['id']}")
return g
def _explain_postgres(sql: str):
"""
Returns the JSON object from EXPLAIN (FORMAT JSON) <sql>.
"""
with engine.connect() as conn:
res = conn.execute(text("EXPLAIN (FORMAT JSON) " + sql)).fetchone()
# First column contains a JSON array with one element
data = res[0]
if isinstance(data, str):
data = json.loads(data)
return data[0]["Plan"]
def _graph_postgres_plan(plan, g=None, parent_id=None, node_id=[0]):
"""
Recursively build a Graphviz Digraph from a Postgres plan dict.
Shows Node Type, Join Type, Rows and Cost.
"""
if g is None:
g = Digraph("pg_plan", graph_attr={"rankdir": "TB"}, node_attr={"shape":"box", "fontsize":"10"})
node_id[0] += 1
nid = f"n{node_id[0]}"
label_lines = [plan.get("Node Type", "Node")]
if "Join Type" in plan: label_lines.append(plan["Join Type"])
est_rows = plan.get("Plan Rows") or plan.get("Actual Rows")
if est_rows is not None: label_lines.append(f"rows: {est_rows}")
if "Total Cost" in plan or "Startup Cost" in plan:
sc = plan.get("Startup Cost", "")
tc = plan.get("Total Cost", "")
label_lines.append(f"cost: {sc}..{tc}")
if "Relation Name" in plan: label_lines.append(plan["Relation Name"])
g.node(nid, label="\n".join(label_lines))
if parent_id:
g.edge(parent_id, nid)
for child in plan.get("Plans", []) or []:
_graph_postgres_plan(child, g=g, parent_id=nid, node_id=node_id)
return g
def visualize_explain_svg(sql: str) -> str:
"""
Create an SVG string of the explain plan for current engine dialect.
"""
if _is_postgres():
plan = _explain_postgres(sql)
g = _graph_postgres_plan(plan)
else:
# default to SQLite path
rows = _explain_sqlite(sql)
g = _graph_sqlite_plan(rows)
svg_bytes = g.pipe(format="svg")
return svg_bytes.decode("utf-8")
# Commented out IPython magic to ensure Python compatibility.
'''------------------------- UI ----------------------- '''
# Step 14: Gradio UI (statusText, answerText, askQuestionInput, btnAskQuestion)
# Install & import
import gradio as gr
import pandas as pd
from sqlalchemy import text
import base64, pathlib # used by (optional) banner helpers below
def _df_to_markdown(df: pd.DataFrame, max_rows: int = 200) -> str:
# keep output reasonable for the textbox
if len(df) > max_rows:
df = df.head(max_rows)
try:
return df.to_markdown(index=False)
except Exception:
return df.to_string(index=False)
# --- Helpers for UI interactions (must be defined before they're used) ---
def _toggle_sql_box(show: bool):
return gr.update(visible=show)
def _toggle_visual(show: bool):
return gr.update(visible=show)
# UPDATED: handle_ask now returns a 5th value (debug log) for the Debugger panel
def handle_ask(question: str, show_text_plan: bool = True, show_visual_plan: bool = False):
"""
Returns:
status_text (str),
answer_table (pd.DataFrame),
sql_text_update (gr.update for the right panel),
plan_html_update (gr.update for the SVG panel),
debug_log (str) # NEW
"""
debug_lines = []
def _log(section, payload):
try:
import json
text_ = payload if isinstance(payload, str) else json.dumps(payload, indent=2, default=str)
except Exception:
text_ = str(payload)
#debug_lines.append(f"[{section}]\n{text_}\n")
if not question or not question.strip():
return ("Please enter a question.", pd.DataFrame(),
gr.update(value="", visible=False),
gr.update(value="", visible=False),
"")
try:
# Retrieval
context, _ = retrieve_schema_context(question, top_k=TOP_K)
_log("Context.top_k", TOP_K)
_log("Context.sample", context[:1000] + ("..." if len(context) > 1000 else ""))
# NL -> SQL
sql_raw = call_llm_for_sql(question, context, DB_DIALECT, MAX_RETURN_ROWS)
_log("SQL.raw", sql_raw)
# Clean/validate
sql_clean = clean_sql_output(sql_raw)
_log("SQL.cleaned", sql_clean)
if not is_safe_select(sql_clean):
dbg = f"--- Raw LLM output ---\n{sql_raw}\n\n--- Cleaned SQL ---\n{sql_clean}"
_log("Guardrail", "unsafe SELECT")
return ("Generated SQL failed safety/parse checks. Try rephrasing your question.",
pd.DataFrame(),
gr.update(value=dbg, visible=True),
gr.update(value="", visible=False),
"\n".join(debug_lines))
sql_final = enforce_limit(sql_clean, MAX_RETURN_ROWS)
_log("SQL.final", sql_final)
# Execute
with engine.connect() as conn:
df = pd.read_sql(text(sql_final), conn)
status = "OK" if not df.empty else "OK (no rows)"
_log("DB.rows", len(df))
# Right box: SQL + optional text plan
sql_parts = ["### SQL Executed", f"```\n{sql_final}\n```"]
if show_text_plan:
plan_md = explain_sql(sql_final) # existing text EXPLAIN
_log("Explain.plan", plan_md[:1200] + ("..." if len(plan_md) > 1200 else ""))
sql_parts += ["### Execution Plan", plan_md]
sql_text = "\n\n".join(sql_parts)
sql_update = gr.update(value=sql_text, visible=True)
sqlExplanation = explain_the_sql(sql_final,question)
debug_lines.append(f"\n{sqlExplanation}\n")
# Visual plan
if show_visual_plan:
try:
svg = visualize_explain_svg(sql_final)
plan_update = gr.update(value=svg, visible=True)
except Exception as viz_err:
plan_update = gr.update(value=f"<pre>Diagram error: {viz_err}</pre>", visible=True)
else:
plan_update = gr.update(value="", visible=False)
return (status, df, sql_update, plan_update, "\n".join(debug_lines))
except Exception as e:
last_sql = locals().get('sql_final', locals().get('sql_clean', locals().get('sql_raw', 'N/A')))
diag = f"Question: {question}\n\nLast SQL:\n{last_sql}"
return (f"Error: {type(e).__name__}: {e}",
pd.DataFrame(),
gr.update(value=diag, visible=True),
gr.update(value="", visible=False),
"\n".join(debug_lines))
'''-------------------------------------------------------------------------------------'''
''' UI CODE '''
'''-------------------------------------------------------------------------------------'''
# Optional banner helpers (currently commented)
LOGO_PATH = "logo.png" # <- update this if your file is elsewhere
def img_to_data_uri(path: str, mime: str = "image/png") -> str:
data = pathlib.Path(path).read_bytes()
b64 = base64.b64encode(data).decode("utf-8")
return f"data:{mime};base64,{b64}"
banner_uri = img_to_data_uri(LOGO_PATH)
banner_html = f"""
<div class="banner">
<img src="{banner_uri}" alt="Databases & Sales" />
</div>
"""
BANNER_CSS = """
body, .gradio-container {
background-color: #FEFAF2 !important; /* whole app background */
}
.banner {
display:flex;
justify-content:center;
align-items:center;
padding: 8px 0 4px;
background-color: #FEFAF2;
}
.banner img {
max-width: 50%;
width: 50%;
border-radius: 12px;
box-shadow: 0 4px 14px rgba(0,0,0,0.12);
}
#btnAskQuestion > button {
background-color: #F28C28 !important; /* orange */
color: white !important;
font-weight: bold;
border-radius: 8px;
padding: 12px 16px;
transition: background-color 0.2s ease;
}
#btnAskQuestion> button:hover {
background-color: #d97706 !important; /* darker orange on hover */
}
#askQuestionInput textarea {
resize: none !important; /* disable drag resize */
overflow: auto !important; /* keep scroll if text wraps */
height: 40px !important; /* fix height so it stays one line */
}
"""
THEME = gr.themes.Soft(
primary_hue="orange", # makes primary buttons orange
neutral_hue="stone",
)
with gr.Blocks(theme=THEME, css=BANNER_CSS) as demo:
gr.HTML(banner_html)
gr.Markdown("## 30+ Days of sales data, Customers across all states - ask questions about the data, or try out predictive questions which will try to auto-generate the right projection math and tell us the projection!")
# INPUT ROW
with gr.Row():
askQuestionInput = gr.Textbox(
label="Question",
placeholder="e.g., Total revenue by state in the last 7 days",
lines=1,
elem_id="askQuestionInput",
scale=8,
autofocus=True,
value="Total revenue by state in the last 30 days"
)
btnAskQuestion = gr.Button("ask question", elem_id="btnAskQuestion", variant="primary", scale=2)
# NEW: projection / what-if helper Markdown directly below the input row
with gr.Row():
projHelp = gr.Markdown(
"**Try projection/what-if phrasing:** \n"
"- \"projection\", \"what if\", \"assume\", \"continue trend\" \n"
"- Horizons: \"rest of year\", \"next quarter\", \"next 3 months\" \n"
"- Modifiers: \"50% decline in sales volume\", \"+10% price\", \"-5% AOV\" \n"
"- Windows: \"YTD\", \"MTD\", \"last 90 days\" \n"
"\n"
"**Example projection query:** \n"
" -\"what if same average revenue, what will be total revenue for rest of year\""
)
with gr.Row():
chkExplainPlan = gr.Checkbox(value=True, label="Show SQL query") # controls right box visibility
chkVisualPlan = gr.Checkbox(value=True, label="Show visual plan (diagram)") # optional
with gr.Row():
statusText = gr.Textbox(label="Status", interactive=False, elem_id="statusText")
with gr.Row():
# SIDE-BY-SIDE OUTPUTS
with gr.Column(scale=7):
answerTable = gr.Dataframe(label="Results", interactive=False, elem_id="answerTable")
with gr.Column(scale=5):
sqlTextBox = gr.Textbox(label="SQL + Plan", lines=14, elem_id="sqlTextBox", visible=True)
planHTML = gr.HTML(label="Visual Plan (SVG)", elem_id="planHTML", visible=False)
# Live toggles (helpers defined above)
chkExplainPlan.change(fn=_toggle_sql_box, inputs=chkExplainPlan, outputs=sqlTextBox)
chkVisualPlan.change(fn=_toggle_visual, inputs=chkVisualPlan, outputs=planHTML)
# NEW: Debugger panel at bottom, BEFORE event wiring so the component exists
with gr.Row():
gr.Markdown("---\n### How did we answer your question?", elem_id="debuggerHeader")
with gr.Row():
debugText = gr.Textbox(label="Explanation of what the SQL statement does...", interactive=False, lines=14, elem_id="debugText")
# Wire ask button & submit: return 5 outputs (last goes to debugText)
btnAskQuestion.click(
fn=handle_ask,
inputs=[askQuestionInput, chkExplainPlan, chkVisualPlan],
outputs=[statusText, answerTable, sqlTextBox, planHTML, debugText],
api_name="ask"
)
askQuestionInput.submit(
fn=handle_ask,
inputs=[askQuestionInput, chkExplainPlan, chkVisualPlan],
outputs=[statusText, answerTable, sqlTextBox, planHTML, debugText]
)
demo.queue().launch(debug=True) |