File size: 1,171 Bytes
2dfd779 c0c7d37 2dfd779 8f1b91c 2dfd779 c0c7d37 2dfd779 c0c7d37 2dfd779 c0c7d37 2dfd779 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the pre-trained model and tokenizer from Hugging Face Model Hub
model_id = "HridaAI/Hrida-T2SQL-3B-128k-V0.1"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, trust_remote_code=True)
# Define the function to generate SQL query from natural language input
def generate_sql(query):
# Tokenize input
inputs = tokenizer(query, return_tensors="pt")
# Generate the SQL query
outputs = model.generate(**inputs, max_new_tokens=256)
# Decode the generated output into a string
sql_query = tokenizer.decode(outputs[0], skip_special_tokens=True)
return sql_query
# Create the Gradio interface
iface = gr.Interface(
fn=generate_sql,
inputs=gr.Textbox(lines=2, placeholder="Enter your natural language question here..."),
outputs="text",
title="Text to SQL Converter",
description="Convert natural language questions into SQL queries using the Hrida-T2SQL-3B model."
)
# Launch the interface
iface.launch()
|