File size: 7,380 Bytes
aae6699
2fccbc6
 
 
 
 
 
40db972
325f883
3d0c21e
325f883
3d0c21e
c99015b
8d366c3
c99015b
dddc062
2fccbc6
 
dddc062
325f883
 
3d0c21e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2b1fdb
3d0c21e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2fccbc6
325f883
3d0c21e
325f883
3d0c21e
c99015b
 
8d366c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
# app.py
from __future__ import annotations
import os
import traceback
import regex as re2
from typing import List, Tuple, Dict, Any

import gradio as gr
import pandas as pd
from datetime import datetime

# --- (All your previous backend imports remain the same) ---
from langchain.agents.agent_types import AgentType
from langchain_cohere import ChatCohere
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from settings import (
    HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE, DEBUG_PLAN,
    COHERE_MODEL_PRIMARY, COHERE_TIMEOUT_S, USE_OPEN_FALLBACKS
)
from audit_log import log_event
from privacy import safety_filter, refusal_reply
# ... (and so on for the rest of your backend functions)

# --- (The entire backend logic from the previous version should be pasted here) ---
# This includes:
# _sanitize_text, _create_enhanced_prompt, is_healthcare_scenario,
# _append_msg, ping_cohere, and the main handle() function.
# For brevity, I am omitting them here, but they are ESSENTIAL.
# Please copy them from the previous version you have.
# The ONLY CHANGE is that we will now call handle() from a new UI wrapper function.

# (For demonstration, I will paste a minimal version of the backend functions here)
def _sanitize_text(s: str) -> str: return s
def _create_enhanced_prompt(s: str) -> str: return s
def _append_msg(h, r, c): return (h or []) + [{"role": r, "content": c}]

# This is your perfected backend engine. It does not need to be changed.
def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]:
    # --- (Paste your entire, working handle() function here) ---
    # For now, I'll use a placeholder that returns a success message
    # In your real app, this will be your full LangChain agent logic.
    file_names = [os.path.basename(f) for f in files]
    response_text = f"### Analysis Complete\n**Prompt:** {user_msg}\n**Files Used:** {', '.join(file_names)}\n\nThis is where the structured output from the AI agent would appear."
    new_hist = _append_msg(history_messages, "user", user_msg)
    new_hist = _append_msg(new_hist, "assistant", response_text)
    return new_hist, ""


# ---------------- THE NEW UI ----------------
with gr.Blocks(theme="soft", css="style.css") as demo:
    # State to store the history of all assessments in this session
    assessment_history = gr.State([])

    gr.Markdown("# Universal AI Data Analyst", elem_classes="h1")

    with gr.Row(variant="panel"):
        # --- LEFT COLUMN: CONTROLS ---
        with gr.Column(scale=1):
            gr.Markdown("## New Assessment", elem_classes="h2")

            files = gr.Files(
                label="Upload Data Files (CSV recommended)",
                file_count="multiple",
                type="filepath",
                file_types=[".csv"]
            )
            msg = gr.Textbox(
                label="Prompt",
                placeholder="Paste your scenario, tasks, and any specific instructions here.",
                lines=10
            )
            with gr.Row():
                send_btn = gr.Button("▶️ Run Analysis", variant="primary")
                clear_btn = gr.Button("🗑️ Clear", variant="secondary")

        # --- RIGHT COLUMN: RESULTS & HISTORY ---
        with gr.Column(scale=2):
            with gr.Tabs():
                # --- TAB 1: CURRENT ASSESSMENT ---
                with gr.TabItem("Current Assessment", id=0):
                    chat_history = gr.Chatbot(
                        label="Chat History",
                        bubble_full_width=True,
                        height=500
                    )
                    ping_btn = gr.Button("Ping Cohere")
                    ping_out = gr.Markdown()


                # --- TAB 2: ASSESSMENT HISTORY ---
                with gr.TabItem("Assessment History", id=1):
                    gr.Markdown("## Review Past Assessments", elem_classes="h2")
                    history_dropdown = gr.Dropdown(
                        label="Select an assessment to review",
                        choices=[]
                    )
                    history_display = gr.Markdown(
                        label="Selected Assessment Details"
                    )

    # --- UI LOGIC ---

    # Function to run when "Run Analysis" is clicked
    def run_analysis(prompt, files, chat, history_state):
        if not prompt or not files:
            gr.Warning("Please provide both a prompt and at least one data file.")
            return chat, history_state, gr.update()

        # Call your powerful backend function
        final_chat, _ = handle(prompt, chat, files)
        
        # Save the completed assessment to our history state
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        file_names = [os.path.basename(f) for f in files]
        
        new_assessment = {
            "id": timestamp,
            "prompt": prompt,
            "files": file_names,
            "response": final_chat[-1]['content'] # Get the AI's final response
        }
        
        updated_history = history_state + [new_assessment]
        
        # Create user-friendly labels for the dropdown
        history_labels = [f"{item['id']} - {item['prompt'][:40]}..." for item in updated_history]
        
        return final_chat, updated_history, gr.update(choices=history_labels)

    # Function to run when a history item is selected from the dropdown
    def view_history(selection, history_state):
        if not selection or not history_state:
            return ""
            
        # Find the selected assessment
        # The selection string is "TIMESTAMP - PROMPT...", so we match by the timestamp
        selected_id = selection.split(" - ")[0]
        selected_assessment = next((item for item in history_state if item["id"] == selected_id), None)

        if selected_assessment:
            # Format the past assessment for beautiful display in Markdown
            display_text = f"""
            ### Assessment from: {selected_assessment['id']}
            
            **Files Used:**
            - {'- '.join(selected_assessment['files'])}
            
            ---
            
            **Original Prompt:**
            > {selected_assessment['prompt']}
            
            ---
            
            **AI Generated Response:**
            {selected_assessment['response']}
            """
            return display_text
        return "Could not find the selected assessment."

    # Wire up the components
    send_btn.click(
        run_analysis,
        inputs=[msg, files, chat_history, assessment_history],
        outputs=[chat_history, assessment_history, history_dropdown]
    )
    
    history_dropdown.change(
        view_history,
        inputs=[history_dropdown, assessment_history],
        outputs=[history_display]
    )

    clear_btn.click(lambda: (None, None, None), outputs=[msg, files, chat_history])
    ping_btn.click(lambda: ping_cohere(), outputs=[ping_out])


if __name__ == "__main__":
    # --- (Your startup logic remains the same) ---
    if not os.getenv("COHERE_API_KEY"):
        print("🔴 COHERE_API_KEY environment variable not set. Application may not function correctly.")
    demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))