Spaces:
Sleeping
Sleeping
File size: 9,549 Bytes
aae6699 c99015b 2fccbc6 40db972 325f883 c99015b 2fccbc6 dddc062 2fccbc6 dddc062 325f883 c1ff5e2 ff957d1 325f883 d5495e2 325f883 2fccbc6 325f883 2fccbc6 325f883 2fccbc6 325f883 2fccbc6 aae6699 2fccbc6 aae6699 325f883 2fccbc6 325f883 2fccbc6 325f883 2fccbc6 a2b1fdb 2fccbc6 dddc062 2fccbc6 dddc062 2fccbc6 a2b1fdb 2fccbc6 c99015b 2fccbc6 325f883 c99015b 325f883 aae6699 a2b1fdb 325f883 2fccbc6 325f883 c99015b aae6699 c99015b aae6699 c99015b 325f883 c99015b 325f883 2fccbc6 c99015b 2fccbc6 c99015b 2fccbc6 a2b1fdb c99015b a2b1fdb c99015b a2b1fdb 325f883 a2b1fdb c99015b a2b1fdb 325f883 2fccbc6 325f883 2fccbc6 dddc062 c99015b 2fccbc6 c99015b 2fccbc6 dddc062 c99015b 2fccbc6 325f883 c99015b 2fccbc6 325f883 c99015b dddc062 2fccbc6 dddc062 c99015b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
# app.py
# app.py
from __future__ import annotations
import os
import traceback
import regex as re2
from typing import List, Tuple, Dict, Any
import gradio as gr
import pandas as pd
# New additions for data analysis agent
from langchain.agents.agent_types import AgentType
from langchain_community.chat_models import ChatCohere
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
# ---- Local modules
from settings import (
HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE, DEBUG_PLAN,
COHERE_MODEL_PRIMARY, COHERE_TIMEOUT_S, USE_OPEN_FALLBACKS
)
from audit_log import log_event
from privacy import safety_filter, refusal_reply
from data_registry import DataRegistry
from upload_ingest import extract_text_from_files
from healthcare_analysis import HealthcareAnalyzer
from scenario_planner import parse_to_plan
from scenario_engine import ScenarioEngine
from rag import RAGIndex
from llm_router import generate_narrative, cohere_chat, open_fallback_chat, _co_client, cohere_embed
from narrative_safetynet import build_narrative
# ---------------- Utilities ----------------
def _sanitize_text(s: str) -> str:
if not isinstance(s, str):
return s
# remove non-printing/control chars except newlines & tabs
return re2.sub(r'[\p{C}--[\n\t]]+', '', s)
def _dataset_catalog(results: Dict[str, Any]) -> Dict[str, List[str]]:
"""Simple catalog of dataset columns for the planner prompt; dynamic & scenario-agnostic."""
cat: Dict[str, List[str]] = {}
for k, v in results.items():
if isinstance(v, pd.DataFrame):
cat[k] = v.columns.tolist()
return cat
def is_healthcare_scenario(text: str, has_files: bool) -> bool:
"""
Dynamic detection: require uploaded files AND either structured scenario sections
or healthcare keywords (configured in settings).
"""
t = (text or "").lower()
kws = HEALTHCARE_SETTINGS["healthcare_keywords"]
structured = any(s in t for s in ["background", "situation", "tasks", "deliverables"])
return has_files and (structured or any(k in t for k in kws))
def _append_msg(history_messages: List[Dict[str, str]], role: str, content: str) -> List[Dict[str, str]]:
return (history_messages or []) + [{"role": role, "content": content}]
def ping_cohere() -> str:
"""Lightweight health check against Cohere (embeddings call)."""
try:
cli = _co_client()
if not cli:
return "Cohere client not initialized. Is COHERE_API_KEY set?"
vecs = cohere_embed(["hello", "world"])
if vecs and len(vecs) == 2:
return f"Cohere OK ✅ (model={COHERE_MODEL_PRIMARY}, timeout={COHERE_TIMEOUT_S}s)"
return "Cohere reachable, but embeddings returned no vectors."
except Exception as e:
return f"Cohere ping failed: {e}"
# ---------------- Core handler ----------------
def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]:
"""
One entrypoint for both healthcare scenarios and general conversation.
- NEW: If files are uploaded, a data-aware agent is used to perform analysis.
- Scenario mode (no files): planner -> deterministic executor -> LLM narrative (Cohere).
- General mode: direct to Cohere with a light system prompt.
"""
try:
# Safety filter for user input
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
if blocked_in:
reply = refusal_reply(reason_in)
new_hist = _append_msg(history_messages, "user", user_msg)
new_hist = _append_msg(new_hist, "assistant", reply)
return new_hist, ""
file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]
# --- NEW LOGIC: Activate data agent if files are uploaded ---
if file_paths:
try:
# For this example, we'll load the first CSV file.
# This can be extended to handle multiple DataFrames.
df = pd.read_csv(file_paths[0])
# Initialize the Cohere Chat LLM for the agent
llm = ChatCohere(model=COHERE_MODEL_PRIMARY, temperature=0)
# Create the pandas DataFrame agent, powered by Cohere
agent = create_pandas_dataframe_agent(
llm,
df,
agent_type=AgentType.OPENAI_FUNCTIONS, # Recommended for reliability
verbose=True # Set to False in production
)
# Run the agent with the user's scenario text. The agent will
# write and execute code to answer the query based on the dataframe.
reply = agent.run(safe_in)
reply = _sanitize_text(reply)
except Exception as e:
tb = traceback.format_exc()
log_event("agent_error", None, {"err": str(e), "tb": tb})
reply = f"An error occurred while analyzing the data: {e}"
# --- ORIGINAL LOGIC: Fallback for scenarios without files or general chat ---
elif is_healthcare_scenario(safe_in, bool(file_paths)) and USE_SCENARIO_ENGINE:
# This block now primarily handles scenarios where no data files are provided,
# relying on the original deterministic analysis logic.
registry = DataRegistry() # This part might be simplified if files always trigger the agent
rag = RAGIndex()
try:
ing = extract_text_from_files(file_paths) # For text extraction from markdown/txt
rag.add(ing.get("chunks", []))
except Exception as e:
log_event("rag_ingest_error", None, {"err": str(e)})
analyzer = HealthcareAnalyzer(registry)
datasets = analyzer.comprehensive_analysis(safe_in)
catalog = _dataset_catalog(datasets)
plan = parse_to_plan(safe_in, catalog)
structured_md = ScenarioEngine.execute_plan(plan, datasets)
rag_hits = [txt for txt, _ in rag.retrieve(safe_in, k=6)]
narrative = generate_narrative(safe_in, structured_md, rag_hits)
if not narrative or "Unable to generate narrative" in narrative:
narrative = build_narrative(
scenario_text=safe_in, datasets=datasets, structured_tables=None,
metric_hints=["surgery_median", "consult_median", "wait", "median", "p90", "90th"],
group_hints=["facility", "specialty", "zone", "hospital", "city", "region"],
min_sample=5
)
debug_note = f"\n\n> **Planner note:** {getattr(plan, 'notes', '')}" if DEBUG_PLAN and getattr(plan, "notes", None) else ""
reply = _sanitize_text(f"{structured_md}\n\n# Narrative & Recommendations\n\n{narrative}{debug_note}")
else:
# General conversation mode (no files, not a structured scenario)
prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
reply = cohere_chat(prompt) or open_fallback_chat(prompt) or "How can I help further?"
reply = _sanitize_text(reply)
# Append interaction to chat history
new_hist = _append_msg(history_messages, "user", user_msg)
new_hist = _append_msg(new_hist, "assistant", reply)
return new_hist, ""
except Exception as e:
tb = traceback.format_exc()
log_event("app_error", None, {"err": str(e), "tb": tb})
new_hist = _append_msg(history_messages, "user", user_msg)
new_hist = _append_msg(new_hist, "assistant", f"A critical error occurred: {e}\n\n{tb}")
return new_hist, ""
# ---------------- UI ----------------
with gr.Blocks(analytics_enabled=False) as demo:
gr.Markdown("## Canadian Healthcare AI • Cohere API • Scenario-Agnostic • Deterministic Analytics")
with gr.Row():
chat = gr.Chatbot(label="Chat History", type="messages", height=520)
files = gr.Files(
label="Upload Data Files (CSV recommended)",
file_count="multiple",
type="filepath",
file_types=HEALTHCARE_SETTINGS["supported_file_types"]
)
msg = gr.Textbox(label="Prompt", placeholder="Paste any scenario (Background / Situation / Tasks / Deliverables) or just chat.")
with gr.Row():
send = gr.Button("Send")
clear = gr.Button("Clear")
ping_btn = gr.Button("Ping Cohere")
ping_out = gr.Markdown()
def _on_send(m, h, f):
h2, _ = handle(m, h or [], f or [])
return h2, ""
send.click(_on_send, inputs=[msg, chat, files], outputs=[chat, msg])
msg.submit(_on_send, inputs=[msg, chat, files], outputs=[chat, msg])
clear.click(lambda: ([], "", None), outputs=[chat, msg, files])
ping_btn.click(lambda: ping_cohere(), outputs=[ping_out])
if __name__ == "__main__":
# Ensure you have your COHERE_API_KEY set as an environment variable
if not os.getenv("COHERE_API_KEY"):
print("🔴 COHERE_API_KEY environment variable not set. Application may not function correctly.")
log_event("startup", None, {
"cohere_key_present": bool(os.getenv("COHERE_API_KEY")),
"cohere_model": COHERE_MODEL_PRIMARY,
"open_fallbacks": USE_OPEN_FALLBACKS,
"timeout_s": COHERE_TIMEOUT_S
})
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860"))) |