RealMati commited on
Commit
fe4cc52
·
verified ·
1 Parent(s): f120c35

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +84 -0
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import gradio as gr
3
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
+ import torch
5
+
6
+ MODEL_ID = "RealMati/t2sql_v6_structured"
7
+
8
+ print(f"Loading model: {MODEL_ID}")
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
10
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
11
+ model.eval()
12
+ print("Model loaded.")
13
+
14
+ SQL_KEYWORDS = [
15
+ "SELECT", "FROM", "WHERE", "AND", "OR", "NOT", "IN", "LIKE",
16
+ "JOIN", "LEFT", "RIGHT", "INNER", "OUTER", "ON", "AS",
17
+ "GROUP", "BY", "ORDER", "HAVING", "LIMIT", "OFFSET",
18
+ "INSERT", "INTO", "VALUES", "UPDATE", "SET", "DELETE",
19
+ "CREATE", "TABLE", "DROP", "ALTER", "INDEX", "VIEW",
20
+ "DISTINCT", "COUNT", "SUM", "AVG", "MIN", "MAX",
21
+ "BETWEEN", "EXISTS", "UNION", "ALL", "ANY", "CASE",
22
+ "WHEN", "THEN", "ELSE", "END", "IS", "NULL", "ASC", "DESC",
23
+ ]
24
+
25
+
26
+ def postprocess_sql(sql: str) -> str:
27
+ sql = sql.strip()
28
+ sql = re.sub(r"<pad>|<unk>|<s>|</s>", "", sql)
29
+ sql = re.sub(r"\s+", " ", sql)
30
+ for kw in SQL_KEYWORDS:
31
+ sql = re.sub(rf"\b{re.escape(kw.lower())}\b", kw, sql, flags=re.IGNORECASE)
32
+ return sql.strip()
33
+
34
+
35
+ def predict(question: str, schema: str, num_beams: int, max_length: int):
36
+ if not question.strip():
37
+ return "Please enter a question."
38
+
39
+ input_text = f"translate to SQL: {question}"
40
+ if schema.strip():
41
+ input_text += f" | schema: {schema.strip()}"
42
+
43
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
44
+
45
+ with torch.no_grad():
46
+ outputs = model.generate(
47
+ **inputs,
48
+ max_length=int(max_length),
49
+ num_beams=int(num_beams),
50
+ early_stopping=True,
51
+ do_sample=False,
52
+ )
53
+
54
+ raw_sql = tokenizer.decode(outputs[0], skip_special_tokens=True)
55
+ return postprocess_sql(raw_sql)
56
+
57
+
58
+ demo = gr.Interface(
59
+ fn=predict,
60
+ inputs=[
61
+ gr.Textbox(
62
+ label="Natural Language Question",
63
+ placeholder="e.g. Show all users older than 18",
64
+ lines=2,
65
+ ),
66
+ gr.Textbox(
67
+ label="Database Schema (optional)",
68
+ placeholder="e.g. users: id, name, age, email | orders: id, user_id, total",
69
+ lines=2,
70
+ ),
71
+ gr.Slider(minimum=1, maximum=10, value=5, step=1, label="Beam Size"),
72
+ gr.Slider(minimum=64, maximum=512, value=256, step=64, label="Max Output Length"),
73
+ ],
74
+ outputs=gr.Textbox(label="Generated SQL", lines=3),
75
+ title="Text-to-SQL (T5 Fine-tuned)",
76
+ description="Convert natural language questions to SQL queries. Provide a schema for better results.",
77
+ examples=[
78
+ ["Show all users older than 18", "users: id, name, age, email", 5, 256],
79
+ ["Count orders per customer", "customers: id, name | orders: id, customer_id, total", 5, 256],
80
+ ["Find the most expensive product", "products: id, name, price, category", 5, 256],
81
+ ],
82
+ )
83
+
84
+ demo.launch()