Spaces:
Sleeping
Sleeping
| # app.py | |
| from __future__ import annotations | |
| import os | |
| import traceback | |
| import regex as re2 | |
| from typing import List, Tuple, Dict, Any | |
| import gradio as gr | |
| import pandas as pd | |
| from datetime import datetime | |
| # --- (All your previous backend imports remain the same) --- | |
| from langchain.agents.agent_types import AgentType | |
| from langchain_cohere import ChatCohere | |
| from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent | |
| from settings import ( | |
| HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE, DEBUG_PLAN, | |
| COHERE_MODEL_PRIMARY, COHERE_TIMEOUT_S, USE_OPEN_FALLBACKS | |
| ) | |
| from audit_log import log_event | |
| from privacy import safety_filter, refusal_reply | |
| # ... (and so on for the rest of your backend functions) | |
| # --- (The entire backend logic from the previous version should be pasted here) --- | |
| # This includes: | |
| # _sanitize_text, _create_enhanced_prompt, is_healthcare_scenario, | |
| # _append_msg, ping_cohere, and the main handle() function. | |
| # For brevity, I am omitting them here, but they are ESSENTIAL. | |
| # Please copy them from the previous version you have. | |
| # The ONLY CHANGE is that we will now call handle() from a new UI wrapper function. | |
| # (For demonstration, I will paste a minimal version of the backend functions here) | |
| def _sanitize_text(s: str) -> str: return s | |
| def _create_enhanced_prompt(s: str) -> str: return s | |
| def _append_msg(h, r, c): return (h or []) + [{"role": r, "content": c}] | |
| # This is your perfected backend engine. It does not need to be changed. | |
| def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]: | |
| # --- (Paste your entire, working handle() function here) --- | |
| # For now, I'll use a placeholder that returns a success message | |
| # In your real app, this will be your full LangChain agent logic. | |
| file_names = [os.path.basename(f) for f in files] | |
| response_text = f"### Analysis Complete\n**Prompt:** {user_msg}\n**Files Used:** {', '.join(file_names)}\n\nThis is where the structured output from the AI agent would appear." | |
| new_hist = _append_msg(history_messages, "user", user_msg) | |
| new_hist = _append_msg(new_hist, "assistant", response_text) | |
| return new_hist, "" | |
| # ---------------- THE NEW UI ---------------- | |
| with gr.Blocks(theme="soft", css="style.css") as demo: | |
| # State to store the history of all assessments in this session | |
| assessment_history = gr.State([]) | |
| gr.Markdown("# Universal AI Data Analyst", elem_classes="h1") | |
| with gr.Row(variant="panel"): | |
| # --- LEFT COLUMN: CONTROLS --- | |
| with gr.Column(scale=1): | |
| gr.Markdown("## New Assessment", elem_classes="h2") | |
| files = gr.Files( | |
| label="Upload Data Files (CSV recommended)", | |
| file_count="multiple", | |
| type="filepath", | |
| file_types=[".csv"] | |
| ) | |
| msg = gr.Textbox( | |
| label="Prompt", | |
| placeholder="Paste your scenario, tasks, and any specific instructions here.", | |
| lines=10 | |
| ) | |
| with gr.Row(): | |
| send_btn = gr.Button("▶️ Run Analysis", variant="primary") | |
| clear_btn = gr.Button("🗑️ Clear", variant="secondary") | |
| # --- RIGHT COLUMN: RESULTS & HISTORY --- | |
| with gr.Column(scale=2): | |
| with gr.Tabs(): | |
| # --- TAB 1: CURRENT ASSESSMENT --- | |
| with gr.TabItem("Current Assessment", id=0): | |
| chat_history = gr.Chatbot( | |
| label="Chat History", | |
| bubble_full_width=True, | |
| height=500 | |
| ) | |
| ping_btn = gr.Button("Ping Cohere") | |
| ping_out = gr.Markdown() | |
| # --- TAB 2: ASSESSMENT HISTORY --- | |
| with gr.TabItem("Assessment History", id=1): | |
| gr.Markdown("## Review Past Assessments", elem_classes="h2") | |
| history_dropdown = gr.Dropdown( | |
| label="Select an assessment to review", | |
| choices=[] | |
| ) | |
| history_display = gr.Markdown( | |
| label="Selected Assessment Details" | |
| ) | |
| # --- UI LOGIC --- | |
| # Function to run when "Run Analysis" is clicked | |
| def run_analysis(prompt, files, chat, history_state): | |
| if not prompt or not files: | |
| gr.Warning("Please provide both a prompt and at least one data file.") | |
| return chat, history_state, gr.update() | |
| # Call your powerful backend function | |
| final_chat, _ = handle(prompt, chat, files) | |
| # Save the completed assessment to our history state | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| file_names = [os.path.basename(f) for f in files] | |
| new_assessment = { | |
| "id": timestamp, | |
| "prompt": prompt, | |
| "files": file_names, | |
| "response": final_chat[-1]['content'] # Get the AI's final response | |
| } | |
| updated_history = history_state + [new_assessment] | |
| # Create user-friendly labels for the dropdown | |
| history_labels = [f"{item['id']} - {item['prompt'][:40]}..." for item in updated_history] | |
| return final_chat, updated_history, gr.update(choices=history_labels) | |
| # Function to run when a history item is selected from the dropdown | |
| def view_history(selection, history_state): | |
| if not selection or not history_state: | |
| return "" | |
| # Find the selected assessment | |
| # The selection string is "TIMESTAMP - PROMPT...", so we match by the timestamp | |
| selected_id = selection.split(" - ")[0] | |
| selected_assessment = next((item for item in history_state if item["id"] == selected_id), None) | |
| if selected_assessment: | |
| # Format the past assessment for beautiful display in Markdown | |
| display_text = f""" | |
| ### Assessment from: {selected_assessment['id']} | |
| **Files Used:** | |
| - {'- '.join(selected_assessment['files'])} | |
| --- | |
| **Original Prompt:** | |
| > {selected_assessment['prompt']} | |
| --- | |
| **AI Generated Response:** | |
| {selected_assessment['response']} | |
| """ | |
| return display_text | |
| return "Could not find the selected assessment." | |
| # Wire up the components | |
| send_btn.click( | |
| run_analysis, | |
| inputs=[msg, files, chat_history, assessment_history], | |
| outputs=[chat_history, assessment_history, history_dropdown] | |
| ) | |
| history_dropdown.change( | |
| view_history, | |
| inputs=[history_dropdown, assessment_history], | |
| outputs=[history_display] | |
| ) | |
| clear_btn.click(lambda: (None, None, None), outputs=[msg, files, chat_history]) | |
| ping_btn.click(lambda: ping_cohere(), outputs=[ping_out]) | |
| if __name__ == "__main__": | |
| # --- (Your startup logic remains the same) --- | |
| if not os.getenv("COHERE_API_KEY"): | |
| print("🔴 COHERE_API_KEY environment variable not set. Application may not function correctly.") | |
| demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860"))) |