# app.py from __future__ import annotations import os import traceback import regex as re2 from typing import List, Tuple, Dict, Any import gradio as gr import pandas as pd # ---- Local modules from settings import ( HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE, DEBUG_PLAN, COHERE_MODEL_PRIMARY, COHERE_TIMEOUT_S, USE_OPEN_FALLBACKS ) from audit_log import log_event from privacy import safety_filter, refusal_reply from data_registry import DataRegistry from upload_ingest import extract_text_from_files from healthcare_analysis import HealthcareAnalyzer from scenario_planner import parse_to_plan from scenario_engine import ScenarioEngine from rag import RAGIndex from llm_router import generate_narrative, cohere_chat, open_fallback_chat, _co_client, cohere_embed from narrative_safetynet import build_narrative # ---------------- Utilities ---------------- def _sanitize_text(s: str) -> str: if not isinstance(s, str): return s # remove non-printing/control chars except newlines & tabs return re2.sub(r'[\p{C}--[\n\t]]+', '', s) def _dataset_catalog(results: Dict[str, Any]) -> Dict[str, List[str]]: """Simple catalog of dataset columns for the planner prompt; dynamic & scenario-agnostic.""" cat: Dict[str, List[str]] = {} for k, v in results.items(): if isinstance(v, pd.DataFrame): cat[k] = v.columns.tolist() return cat def is_healthcare_scenario(text: str, has_files: bool) -> bool: """ Dynamic detection: require uploaded files AND either structured scenario sections or healthcare keywords (configured in settings). """ t = (text or "").lower() kws = HEALTHCARE_SETTINGS["healthcare_keywords"] structured = any(s in t for s in ["background", "situation", "tasks", "deliverables"]) return has_files and (structured or any(k in t for k in kws)) def _append_msg(history_messages: List[Dict[str, str]], role: str, content: str) -> List[Dict[str, str]]: return (history_messages or []) + [{"role": role, "content": content}] def ping_cohere() -> str: """Lightweight health check against Cohere (embeddings call).""" try: cli = _co_client() if not cli: return "Cohere client not initialized. Is COHERE_API_KEY set?" vecs = cohere_embed(["hello", "world"]) if vecs and len(vecs) == 2: return f"Cohere OK ✅ (model={COHERE_MODEL_PRIMARY}, timeout={COHERE_TIMEOUT_S}s)" return "Cohere reachable, but embeddings returned no vectors." except Exception as e: return f"Cohere ping failed: {e}" # ---------------- Core handler ---------------- def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]: """ One entrypoint for both healthcare scenarios and general conversation. - Scenario mode: planner -> deterministic executor -> LLM narrative (Cohere) -> safety-net narrative if needed. - General mode: direct to Cohere/open fallback with a light system prompt. """ try: # Safety safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input") if blocked_in: reply = refusal_reply(reason_in) new_hist = _append_msg(history_messages, "user", user_msg) new_hist = _append_msg(new_hist, "assistant", reply) return new_hist, "" # Normalize files into paths (Gradio can return temp file objects or paths) file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])] # Register CSVs for deterministic analysis registry = DataRegistry() for p in file_paths: try: if p: registry.add_path(p) except Exception as e: log_event("ingest_error", None, {"file": p, "err": str(e)}) # Lightweight RAG: ingest any text/markdown for grounding (embeddings via Cohere) rag = RAGIndex() try: ing = extract_text_from_files(file_paths) rag.add(ing.get("chunks", [])) except Exception as e: log_event("rag_ingest_error", None, {"err": str(e)}) # Decide mode if is_healthcare_scenario(safe_in, bool(file_paths)) and USE_SCENARIO_ENGINE: # 1) Deterministic dataset exposure analyzer = HealthcareAnalyzer(registry) datasets = analyzer.comprehensive_analysis(safe_in) # dict[str, DataFrame] catalog = _dataset_catalog(datasets) # 2) Plan (Cohere-first; auto safety-net if LLM parse fails) plan = parse_to_plan(safe_in, catalog) # 3) Execute plan deterministically structured_md = ScenarioEngine.execute_plan(plan, datasets) # 4) Narrative (Cohere-first), grounded with RAG hits rag_hits = [txt for txt, _ in rag.retrieve(safe_in, k=6)] narrative = generate_narrative(safe_in, structured_md, rag_hits) # 5) Safety-net narrative if LLM narrative absent/failed if not narrative or "Unable to generate narrative" in narrative: # Provide generic hints only (dynamic, not hard-coded to any schema) narrative = build_narrative( scenario_text=safe_in, datasets=datasets, structured_tables=None, metric_hints=["surgery_median", "consult_median", "wait", "median", "p90", "90th"], group_hints=["facility", "specialty", "zone", "hospital", "city", "region"], min_sample=5 ) debug_note = "" if DEBUG_PLAN and getattr(plan, "notes", None): debug_note = f"\n\n> **Planner note:** {getattr(plan, 'notes', '')}" reply = _sanitize_text( f"{structured_md}\n\n# Narrative & Recommendations\n\n{narrative}{debug_note}" ) else: # General conversation mode (Cohere-first; open-weights fallback) prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:" reply = cohere_chat(prompt) or open_fallback_chat(prompt) or "How can I help further?" reply = _sanitize_text(reply) # Append to chat history new_hist = _append_msg(history_messages, "user", user_msg) new_hist = _append_msg(new_hist, "assistant", reply) return new_hist, "" except Exception as e: tb = traceback.format_exc() log_event("app_error", None, {"err": str(e), "tb": tb}) new_hist = _append_msg(history_messages, "user", user_msg) new_hist = _append_msg(new_hist, "assistant", f"Error: {e}\n\n{tb}") return new_hist, "" # ---------------- UI ---------------- with gr.Blocks(analytics_enabled=False) as demo: gr.Markdown("## Canadian Healthcare AI • Cohere API • Scenario-Agnostic • Deterministic Analytics") with gr.Row(): # Use messages format to avoid deprecation warnings and enable role-based history chat = gr.Chatbot(type="messages", height=520) files = gr.Files( file_count="multiple", type="filepath", file_types=HEALTHCARE_SETTINGS["supported_file_types"] ) msg = gr.Textbox(placeholder="Paste any scenario (Background / Situation / Tasks / Deliverables) or just chat.") with gr.Row(): send = gr.Button("Send") clear = gr.Button("Clear") ping_btn = gr.Button("Ping Cohere") ping_out = gr.Markdown() def _on_send(m, h, f): h2, _ = handle(m, h or [], f or []) return h2, "" send.click(_on_send, inputs=[msg, chat, files], outputs=[chat, msg]) msg.submit(_on_send, inputs=[msg, chat, files], outputs=[chat, msg]) clear.click(lambda: ([], ""), outputs=[chat, msg]) ping_btn.click(lambda: ping_cohere(), outputs=[ping_out]) if __name__ == "__main__": log_event("startup", None, { "cohere_key_present": bool(os.getenv("COHERE_API_KEY")), "cohere_model": COHERE_MODEL_PRIMARY, "open_fallbacks": USE_OPEN_FALLBACKS, "timeout_s": COHERE_TIMEOUT_S }) demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))