tinysql-demo / tinysql_model_demo.py
abir-hr196's picture
updates
f35a40c
raw
history blame
7.01 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Model configurations
MODELS = {
"BM1_CS1_Syn (33M)": "withmartian/sql_interp_bm1_cs1_experiment_1.10",
"BM1_CS2_Syn (33M)": "withmartian/sql_interp_bm1_cs2_experiment_2.10",
"BM1_CS3_Syn (33M)": "withmartian/sql_interp_bm1_cs3_experiment_3.10",
"BM1_CS4_Syn (33M)": "withmartian/sql_interp_bm1_cs4_dataset_synonyms_experiment_1.1",
"BM1_CS5_Syn (33M)": "withmartian/sql_interp_bm1_cs5_dataset_synonyms_experiment_1.2",
"BM2_CS1_Syn (0.5B)": "withmartian/sql_interp_bm2_cs1_experiment_4.3",
"BM2_CS2_Syn (0.5B)": "withmartian/sql_interp_bm2_cs2_experiment_5.3",
"BM2_CS3_Syn (0.5B)": "withmartian/sql_interp_bm2_cs3_experiment_6.3",
"BM3_CS1_Syn (1B)": "withmartian/sql_interp_bm3_cs1_experiment_7.3",
"BM3_CS2_Syn (1B)": "withmartian/sql_interp_bm3_cs2_experiment_8.3",
"BM3_CS3_Syn (1B)": "withmartian/sql_interp_bm3_cs3_experiment_9.3",
}
model_cache = {}
def load_model(model_name):
if model_name not in model_cache:
model_id = MODELS[model_name]
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.float16,
device_map="auto"
)
model_cache[model_name] = (tokenizer, model)
return model_cache[model_name]
def generate_sql(model_name, instruction, schema, max_length=256, temperature=0.7):
if not model_name or not instruction or not schema:
return "Please fill in all fields and select a model"
try:
tokenizer, model = load_model(model_name)
prompt = f"""### Instruction: {instruction}
### Context: {schema}
### Response:"""
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_length=max_length,
temperature=temperature,
do_sample=temperature > 0,
pad_token_id=tokenizer.eos_token_id
)
generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
if "### Response:" in generated:
sql = generated.split("### Response:")[-1].strip()
else:
sql = generated.strip()
return sql
except Exception as e:
return f"Error: {str(e)}"
examples = [
[
"BM1_CS1_Syn (33M)",
"Show me the name and salary from employees",
"CREATE TABLE employees (name VARCHAR(100), salary INT, department VARCHAR(100))"
],
[
"BM2_CS2_Syn (0.5B)",
"List worker earnings from highest to lowest",
"CREATE TABLE employees (name VARCHAR(100), salary INT, department VARCHAR(100))"
],
[
"BM3_CS3_Syn (1B)",
"Count how many employees in each department",
"CREATE TABLE employees (name VARCHAR(100), salary INT, department VARCHAR(100))"
],
]
def model_demo(shared_instruction, shared_schema):
"""Model demo component that can receive examples from dataset viewer"""
gr.HTML("""
<div style="text-align: center; padding: 3rem 2rem; background: linear-gradient(135deg, #3A3A3A 0%, #4A4A4A 100%); border-radius: 16px; margin-bottom: 2rem; color: white;">
<h1 style="font-size: 2.5rem; font-weight: 700; margin-bottom: 1rem;">TinySQL Interactive Demo</h1>
<p style="font-size: 1.2rem; opacity: 0.9; line-height: 1.6;">
Transform natural language into SQL queries using <span style="color: #FF6B4A; font-weight: 600;">mechanistically interpretable</span> models
</p>
</div>
""")
gr.HTML("""
<div style="background: #3A3A3A; border-radius: 12px; padding: 1.5rem; margin: 1.5rem 0; border-left: 4px solid #FF6B4A; color: #E0E0E0;">
<strong>How it works:</strong> Select a model, describe your query in plain English, and watch the model generate SQL.
</div>
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Configuration")
model_dropdown = gr.Dropdown(
choices=list(MODELS.keys()),
value="BM2_CS2_Syn (0.5B)",
label="Model Selection",
info="Larger models = better accuracy, slower inference"
)
gr.HTML("""
<div style="background: #3A3A3A; border-radius: 8px; padding: 1rem; margin-top: 1rem; font-size: 0.9rem; color: #D0D0D0;">
<strong>BM1 (33M)</strong> - Lightning fast, simple queries<br>
<strong>BM2 (0.5B)</strong> - Balanced performance<br>
<strong>BM3 (1B)</strong> - Most accurate, complex queries<br><br>
<strong>Dataset Complexity:</strong><br>
CS1: Basic SELECT-FROM<br>
CS2: Adds ORDER BY<br>
CS3: Aggregations<br>
CS4: Adds WHERE filters<br>
CS5: Multi-table JOINs
</div>
""")
with gr.Column(scale=2):
gr.Markdown("### Your Query")
instruction = gr.Textbox(
label="What do you want to know?",
placeholder="e.g., Find all employees earning more than $50,000 sorted by name",
lines=2,
value=""
)
schema = gr.Textbox(
label="Database Schema",
placeholder="CREATE TABLE employees (name VARCHAR, salary INT, department VARCHAR)",
lines=3,
value="CREATE TABLE employees (name VARCHAR(100), salary INT, department VARCHAR(100))"
)
with gr.Row():
max_length = gr.Slider(64, 512, value=256, step=32, label="Max Length")
temperature = gr.Slider(0.0, 1.0, value=0.1, step=0.1, label="Temperature")
generate_btn = gr.Button("Generate SQL", variant="primary", size="lg")
output = gr.Code(label="Generated SQL Query", language="sql", lines=8)
gr.Markdown("### Example Queries")
gr.Examples(examples=examples, inputs=[model_dropdown, instruction, schema])
# Update instruction and schema from shared state when values change
def update_from_shared(shared_inst, shared_sch):
return shared_inst if shared_inst else "", shared_sch if shared_sch else ""
shared_instruction.change(
fn=lambda x: x,
inputs=shared_instruction,
outputs=instruction
)
shared_schema.change(
fn=lambda x: x,
inputs=shared_schema,
outputs=schema
)
generate_btn.click(
fn=generate_sql,
inputs=[model_dropdown, instruction, schema, max_length, temperature],
outputs=output
)
return {
'instruction': instruction,
'schema': schema,
'output': output
}