Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
from datasets import load_dataset
|
| 4 |
+
|
| 5 |
+
# Load model from Hugging Face Hub
|
| 6 |
+
model_name = "Qwen/Qwen1.5-7B-Chat" # or any other LLM
|
| 7 |
+
pipe = pipeline("text-generation", model=model_name)
|
| 8 |
+
|
| 9 |
+
# Optionally load GTA queries
|
| 10 |
+
gta_data = load_dataset("open-compass/GTA", split="test")
|
| 11 |
+
|
| 12 |
+
def run_model(user_query, use_sample):
|
| 13 |
+
if use_sample:
|
| 14 |
+
question = gta_data[int(user_query)]["question"]
|
| 15 |
+
else:
|
| 16 |
+
question = user_query
|
| 17 |
+
|
| 18 |
+
output = pipe(question, max_new_tokens=256, do_sample=True)
|
| 19 |
+
return f"**Input**: {question}\n\n**Output**:\n{output[0]['generated_text']}"
|
| 20 |
+
|
| 21 |
+
with gr.Blocks() as demo:
|
| 22 |
+
gr.Markdown("## 🧠 GTA Tool Reasoning Demo with Hugging Face Models")
|
| 23 |
+
with gr.Row():
|
| 24 |
+
user_input = gr.Textbox(label="Enter your query or GTA index (if using sample)")
|
| 25 |
+
use_sample = gr.Checkbox(label="Use sample from GTA dataset (enter index)", value=False)
|
| 26 |
+
run_button = gr.Button("Run")
|
| 27 |
+
output = gr.Markdown()
|
| 28 |
+
|
| 29 |
+
run_button.click(run_model, inputs=[user_input, use_sample], outputs=output)
|
| 30 |
+
|
| 31 |
+
demo.launch()
|