Spaces:
Sleeping
Sleeping
| # app.py | |
| from __future__ import annotations | |
| import os | |
| import traceback | |
| import regex as re2 | |
| from typing import List, Tuple, Dict, Any | |
| import gradio as gr | |
| import pandas as pd | |
| # New additions for data analysis agent | |
| from langchain.agents.agent_types import AgentType | |
| from langchain_community.chat_models import ChatCohere | |
| from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent | |
| # ---- Local modules | |
| from settings import ( | |
| HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE, DEBUG_PLAN, | |
| COHERE_MODEL_PRIMARY, COHERE_TIMEOUT_S, USE_OPEN_FALLBACKS | |
| ) | |
| from audit_log import log_event | |
| from privacy import safety_filter, refusal_reply | |
| from data_registry import DataRegistry | |
| from upload_ingest import extract_text_from_files | |
| from healthcare_analysis import HealthcareAnalyzer | |
| from scenario_planner import parse_to_plan | |
| from scenario_engine import ScenarioEngine | |
| from rag import RAGIndex | |
| from llm_router import generate_narrative, cohere_chat, open_fallback_chat, _co_client, cohere_embed | |
| from narrative_safetynet import build_narrative | |
| # ---------------- Utilities ---------------- | |
| def _sanitize_text(s: str) -> str: | |
| if not isinstance(s, str): | |
| return s | |
| return re2.sub(r'[\p{C}--[\n\t]]+', '', s) | |
| # --- NEW: The "Intake Analyst" AI --- | |
| def _create_enhanced_prompt(user_scenario: str) -> str: | |
| """ | |
| Uses an LLM to pre-process the user's messy prompt into a structured brief | |
| for the data analysis agent. | |
| """ | |
| # This prompt instructs the first LLM to act as a project manager. | |
| prompt_for_planner = f""" | |
| You are an expert data analysis project manager. Your task is to read the user's unstructured scenario below and create a clear, structured brief for a data analysis AI. | |
| From the user's text, extract the following: | |
| 1. **Primary Objective:** A one-sentence summary of the user's main goal. | |
| 2. **Key Tasks:** A numbered list of the specific questions the user wants answered. | |
| 3. **Expert Guidelines & Assumptions:** A bulleted list of EVERY specific number, metric, calculation method, or assumption mentioned in the text. This is critical for high-quality analysis. | |
| 4. **Required Output Format:** A description of how the user wants the final answer to be structured. | |
| Present this as a clean brief. Then, include the user's original text at the end. | |
| --- USER'S SCENARIO --- | |
| {user_scenario} | |
| """ | |
| # Use the existing cohere_chat function to get the structured brief | |
| structured_brief = cohere_chat(prompt_for_planner) | |
| # If the LLM call fails, just use the original message | |
| if not structured_brief: | |
| return user_scenario | |
| return structured_brief | |
| # ---------------- Core handler ---------------- | |
| def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]: | |
| """ | |
| Core logic handler with the new two-step AI process. | |
| """ | |
| try: | |
| # Safety filter for user input | |
| safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input") | |
| if blocked_in: | |
| reply = refusal_reply(reason_in) | |
| new_hist = _append_msg(history_messages, "user", user_msg) | |
| new_hist = _append_msg(new_hist, "assistant", reply) | |
| return new_hist, "" | |
| file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])] | |
| if file_paths: | |
| try: | |
| # Load ALL uploaded CSVs into a list of DataFrames | |
| dataframes = [pd.read_csv(p) for p in file_paths if p.endswith('.csv')] | |
| if not dataframes: | |
| return _append_msg(history_messages, "assistant", "Please upload at least one CSV file."), "" | |
| # Initialize the Cohere Chat LLM for the agent | |
| llm = ChatCohere(model=COHERE_MODEL_PRIMARY, temperature=0) | |
| # STEP 1: The "Intake Analyst" AI creates a structured brief. | |
| enhanced_prompt = _create_enhanced_prompt(safe_in) | |
| # This UNIVERSAL prefix contains only behavioral rules. | |
| AGENT_PREFIX = """ | |
| You are a data analysis agent. You have access to one or more pandas dataframes. | |
| You MUST respond in one of two formats. | |
| FORMAT 1: To perform a task. Your response must be a single block of text with ONLY these three sections: | |
| Thought: Your step-by-step reasoning. | |
| Action: python_repl_ast | |
| Action Input: The Python code to run. | |
| FORMAT 2: To give the final answer. Your response must be a single block of text with ONLY these two sections: | |
| Thought: I can now answer the user's query based on the analysis. | |
| Final Answer: The complete answer, structured as the user requested. | |
| CRITICAL RULE: NEVER combine `Action` and `Final Answer` in the same response. Choose one format. | |
| Begin by analyzing the structured brief provided. | |
| """ | |
| # STEP 2: The "Data Scientist" AI (Agent) executes the clean brief. | |
| agent = create_pandas_dataframe_agent( | |
| llm, | |
| dataframes, | |
| agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, | |
| verbose=True, | |
| allow_dangerous_code=True, | |
| handle_parsing_errors=True, | |
| prefix=AGENT_PREFIX | |
| ) | |
| reply = agent.run(enhanced_prompt) | |
| reply = _sanitize_text(reply) | |
| except Exception as e: | |
| tb = traceback.format_exc() | |
| log_event("agent_error", None, {"err": str(e), "tb": tb}) | |
| reply = f"An error occurred while analyzing the data: {e}" | |
| else: | |
| # Fallback to general conversation if no files are uploaded | |
| prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:" | |
| reply = cohere_chat(prompt) or open_fallback_chat(prompt) or "How can I help further?" | |
| reply = _sanitize_text(reply) | |
| # Append interaction to chat history | |
| new_hist = _append_msg(history_messages, "user", user_msg) | |
| new_hist = _append_msg(new_hist, "assistant", reply) | |
| return new_hist, "" | |
| except Exception as e: | |
| tb = traceback.format_exc() | |
| log_event("app_error", None, {"err": str(e), "tb": tb}) | |
| new_hist = _append_msg(history_messages, "user", user_msg) | |
| new_hist = _append_msg(new_hist, "assistant", f"A critical error occurred: {e}\n\n{tb}") | |
| return new_hist, "" | |
| # ---------------- UI ---------------- | |
| with gr.Blocks(analytics_enabled=False) as demo: | |
| gr.Markdown("## Universal AI Data Analyst") | |
| with gr.Row(): | |
| chat = gr.Chatbot(label="Chat History", type="messages", height=520) | |
| files = gr.Files( | |
| label="Upload Data Files (CSV recommended)", | |
| file_count="multiple", | |
| type="filepath", | |
| file_types=[".csv"] | |
| ) | |
| msg = gr.Textbox(label="Prompt", placeholder="Paste your scenario, tasks, and any specific instructions here.") | |
| with gr.Row(): | |
| send = gr.Button("Send") | |
| clear = gr.Button("Clear") | |
| ping_btn = gr.Button("Ping Cohere") | |
| ping_out = gr.Markdown() | |
| def _on_send(m, h, f): | |
| h2, _ = handle(m, h, f or []) | |
| return h2, "" | |
| send.click(_on_send, inputs=[msg, chat, files], outputs=[chat, msg]) | |
| msg.submit(_on_send, inputs=[msg, chat, files], outputs=[chat, msg]) | |
| clear.click(lambda: ([], "", None), outputs=[chat, msg, files]) | |
| ping_btn.click(lambda: ping_cohere(), outputs=[ping_out]) | |
| if __name__ == "__main__": | |
| if not os.getenv("COHERE_API_KEY"): | |
| print("🔴 COHERE_API_KEY environment variable not set. Application may not function correctly.") | |
| log_event("startup", None, { | |
| "cohere_key_present": bool(os.getenv("COHERE_API_KEY")), | |
| "cohere_model": COHERE_MODEL_PRIMARY, | |
| "open_fallbacks": USE_OPEN_FALLBACKS, | |
| "timeout_s": COHERE_TIMEOUT_S | |
| }) | |
| demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860"))) |