asrarbw's picture
Update app.py
8e7badb verified
raw
history blame
1.9 kB
import gradio as gr
import os
import pandas as pd
from huggingface_hub import InferenceClient
# ===============================
# LLM CLIENT SETUP
# ===============================
HF_TOKEN = os.getenv("HF")
client = InferenceClient(model="Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
def analyze_excel(message, history, file):
"""
Problem Statement:
1. Add your own HF token in the settings to get the LLM working.
2. Update requirements.txt, app.py as needed.
3. Develop a robust "Text-to-Code" analytical workflow.
Requirements:
a.Code Generation : Transform natural language user queries into executable, sandboxed Python code (specifically using pandas).
b.Execution : Securely execute the generated code on the Hugging Face Space server against the uploaded dataset.
c.Synthesis : Capture the raw output of the code execution and feed it back to the LLM to generate a natural language insight.
"""
if file is None:
return "Please upload an Excel file to begin."
# The function needs a return here to avoid a NoneType error in Gradio
return "File received! Candidate: Implement the Planner-Action-Synthesis logic here."
# ===============================
# UI CONFIGURATION
# ===============================
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 📊 Technical Assessment: Data Analysis Agent")
gr.Markdown("### Objective: Build a Text-to-Code workflow using Qwen 2.5")
with gr.Row():
excel_file = gr.File(
label="1. Upload Dataset (.xlsx)",
file_types=[".xlsx"]
)
gr.ChatInterface(
fn=analyze_excel,
additional_inputs=[excel_file],
type="messages",
description="2. Ask questions about your data (e.g., 'What is the average profit by region?')",
)
if __name__ == "__main__":
demo.launch()