# app.py # app.py import os, traceback, regex as re2 import gradio as gr import pandas as pd from typing import List, Tuple, Dict, Any from settings import HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE from audit_log import log_event from privacy import safety_filter, refusal_reply from data_registry import DataRegistry from upload_ingest import extract_text_from_files from healthcare_analysis import HealthcareAnalyzer from scenario_planner import parse_to_plan from scenario_engine import ScenarioEngine from rag import RAGIndex from llm_router import generate_narrative, cohere_chat, open_fallback_chat def _sanitize_text(s: str) -> str: if not isinstance(s, str): return s # strip control chars (keep newlines/tabs) return re2.sub(r'[\p{C}--[\n\t]]+', '', s) def _dataset_catalog(results: Dict[str, Any]) -> Dict[str, List[str]]: """Expose available columns per dataset to the planner.""" cat: Dict[str, List[str]] = {} for k, v in results.items(): if isinstance(v, pd.DataFrame): cat[k] = v.columns.tolist() return cat def is_healthcare_scenario(text: str, has_files: bool) -> bool: """Heuristic: scenario mode when user provided files + scenario-ish text.""" t = (text or "").lower() kws = HEALTHCARE_SETTINGS["healthcare_keywords"] structured = any(s in t for s in ["background", "situation", "tasks", "deliverables"]) return has_files and (structured or any(k in t for k in kws)) def _append_msg(history_messages: List[Dict[str, str]], role: str, content: str) -> List[Dict[str, str]]: """Return a new history list with one message appended.""" return (history_messages or []) + [{"role": role, "content": content}] def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]: try: safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input") if blocked_in: reply = refusal_reply(reason_in) new_hist = _append_msg(history_messages, "user", user_msg) new_hist = _append_msg(new_hist, "assistant", reply) return new_hist, "" # Normalize files -> paths (safe when files is None) file_paths = [getattr(f, "name", None) or f for f in (files or [])] # Register CSVs into the registry registry = DataRegistry() for p in file_paths: try: registry.add_path(p) except Exception as e: log_event("ingest_error", None, {"file": p, "err": str(e)}) # RAG ingest (best-effort, text only; safe on empty) rag = RAGIndex() ing = extract_text_from_files(file_paths) rag.add(ing.get("chunks", [])) # Scenario mode: plan -> deterministic execution -> narrative if is_healthcare_scenario(safe_in, bool(file_paths)) and USE_SCENARIO_ENGINE: analyzer = HealthcareAnalyzer(registry) datasets = analyzer.comprehensive_analysis(safe_in) # expose dataframes by filename catalog = _dataset_catalog(datasets) # 1) LLM parses scenario into a plan (scenario-agnostic, no hardcoding) plan = parse_to_plan(safe_in, catalog) # 2) Deterministic execution of the plan (pandas-based) structured_md = ScenarioEngine.execute_plan(plan, datasets) # 3) Canadian grounding + narrative (Cohere primary, open-model fallback) rag_hits = [txt for txt, _ in rag.retrieve(safe_in, k=6)] narrative = generate_narrative(safe_in, structured_md, rag_hits) final = f"{structured_md}\n\n# Narrative & Recommendations\n\n{narrative}" reply = _sanitize_text(final) else: # General conversation mode (no scenario/files required) prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:" reply = cohere_chat(prompt) or open_fallback_chat(prompt) or "How can I help further?" reply = _sanitize_text(reply) # Append user then assistant messages to history new_hist = _append_msg(history_messages, "user", user_msg) new_hist = _append_msg(new_hist, "assistant", reply) return new_hist, "" except Exception as e: tb = traceback.format_exc() log_event("app_error", None, {"err": str(e), "tb": tb}) new_hist = _append_msg(history_messages, "user", user_msg) new_hist = _append_msg(new_hist, "assistant", f"Error: {e}\n\n{tb}") return new_hist, "" # -------- UI -------- with gr.Blocks(analytics_enabled=False) as demo: gr.Markdown("## Canadian Healthcare AI • Scenario-Agnostic (Cohere primary • Deterministic analytics)") # Use the new 'messages' format to avoid deprecation chat = gr.Chatbot(type="messages", height=520) files = gr.Files( file_count="multiple", type="filepath", file_types=HEALTHCARE_SETTINGS["supported_file_types"] ) msg = gr.Textbox(placeholder="Paste any scenario (Background / Situation / Tasks / Deliverables) or just chat.") send = gr.Button("Send") clear = gr.Button("Clear") def _on_send(m, h, f): # h is already a list of {'role','content'} dicts with type="messages" h2, _ = handle(m, h or [], f or []) return h2, "" send.click(_on_send, inputs=[msg, chat, files], outputs=[chat, msg]) msg.submit(_on_send, inputs=[msg, chat, files], outputs=[chat, msg]) clear.click(lambda: ([], ""), outputs=[chat, msg]) if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))