Elyad commited on
Commit
2dfd779
·
verified ·
1 Parent(s): 32777bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -30
app.py CHANGED
@@ -1,38 +1,33 @@
1
- import gradio
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- # Define the model and tokenizer
5
  model_id = "HridaAI/Hrida-T2SQL-3B-128k-V0.1"
6
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
7
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, trust_remote_code=True)
8
 
9
- # Define the context and prompt
10
- prompt = """
11
- Answer to the query will be in the form of an SQL query.
12
- ### Context: CREATE TABLE Employees (
13
- EmployeeID INT PRIMARY KEY,
14
- FirstName VARCHAR(50),
15
- LastName VARCHAR(50),
16
- Age INT,
17
- DepartmentID INT,
18
- Salary DECIMAL(10, 2),
19
- DateHired DATE,
20
- Active BOOLEAN,
21
- FOREIGN KEY (DepartmentID) REFERENCES Departments(DepartmentID)
22
- );
23
 
24
- CREATE TABLE Departments (
25
- DepartmentID INT PRIMARY KEY,
26
- DepartmentName VARCHAR(100),
27
- Location VARCHAR(100)
28
- );
29
- ### Input: Write a SQL query to select all the employees who are active.
30
- ### Response:
31
- """
32
- # Prepare the input
33
- messages = [{"role": "user", "content": prompt}]
34
- inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
35
 
36
- # Generate the output
37
- outputs = model.generate(inputs, max_length=300)
38
- print(tokenizer.decode(outputs[0]))
 
1
+ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ # Load the pre-trained model and tokenizer from Hugging Face Model Hub
6
  model_id = "HridaAI/Hrida-T2SQL-3B-128k-V0.1"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
8
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, trust_remote_code=True)
9
 
10
+ # Define the function to generate SQL query from natural language input
11
+ def generate_sql(query):
12
+ # Tokenize input
13
+ inputs = tokenizer(query, return_tensors="pt")
14
+
15
+ # Generate the SQL query
16
+ outputs = model.generate(**inputs, max_new_tokens=256)
17
+
18
+ # Decode the generated output into a string
19
+ sql_query = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
+
21
+ return sql_query
 
 
22
 
23
+ # Create the Gradio interface
24
+ iface = gr.Interface(
25
+ fn=generate_sql,
26
+ inputs=gr.Textbox(lines=2, placeholder="Enter your natural language question here..."),
27
+ outputs="text",
28
+ title="Text to SQL Converter",
29
+ description="Convert natural language questions into SQL queries using the Hrida-T2SQL-3B model."
30
+ )
 
 
 
31
 
32
+ # Launch the interface
33
+ iface.launch()