Rajan Sharma commited on
Commit
325f883
·
verified ·
1 Parent(s): 5d68d4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -32
app.py CHANGED
@@ -1,38 +1,99 @@
 
1
  import gradio as gr
 
 
 
2
  from settings import HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE
 
 
3
  from data_registry import DataRegistry
4
  from upload_ingest import extract_text_from_files
5
  from healthcare_analysis import HealthcareAnalyzer
6
- from rag import RAGIndex
7
- from scenario_planner import plan_from_llm
8
  from scenario_engine import ScenarioEngine
9
- from llm_router import cohere_chat
10
-
11
- def is_healthcare_scenario(text, files):
12
- return any(k in text.lower() for k in HEALTHCARE_SETTINGS["healthcare_keywords"]) and bool(files)
13
-
14
- def handle(msg, history, files):
15
- registry=DataRegistry()
16
- for f in files or []: registry.add_path(f)
17
- rag=RAGIndex(); rag.add(extract_text_from_files(files).get("chunks",[]))
18
- if is_healthcare_scenario(msg, files) and USE_SCENARIO_ENGINE:
19
- analyzer=HealthcareAnalyzer(registry)
20
- results=analyzer.comprehensive_analysis(msg)
21
- catalog={n:list(df.columns) for n,df in results.items() if hasattr(df,"columns")}
22
- plan=plan_from_llm(msg, catalog)
23
- structured=ScenarioEngine.render_plan(plan, results)
24
- return history+[(msg, structured)], ""
25
- else:
26
- out=cohere_chat(f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {msg}\nAssistant:") or "..."
27
- return history+[(msg, out)], ""
28
-
29
- with gr.Blocks() as demo:
30
- chat=gr.Chatbot()
31
- files=gr.Files(type="filepath", file_count="multiple")
32
- msg=gr.Textbox()
33
- btn=gr.Button("Send")
34
- btn.click(handle,[msg,chat,files],[chat,msg])
35
- msg.submit(handle,[msg,chat,files],[chat,msg])
36
-
37
- if __name__=="__main__":
38
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, traceback, regex as re2
2
  import gradio as gr
3
+ import pandas as pd
4
+ from typing import List, Tuple, Dict
5
+
6
  from settings import HEALTHCARE_SETTINGS, GENERAL_CONVERSATION_PROMPT, USE_SCENARIO_ENGINE
7
+ from audit_log import log_event
8
+ from privacy import safety_filter, refusal_reply
9
  from data_registry import DataRegistry
10
  from upload_ingest import extract_text_from_files
11
  from healthcare_analysis import HealthcareAnalyzer
12
+ from scenario_planner import parse_to_plan
 
13
  from scenario_engine import ScenarioEngine
14
+ from rag import RAGIndex
15
+ from llm_router import generate_narrative, cohere_chat
16
+
17
+ def _sanitize_text(s: str) -> str:
18
+ if not isinstance(s, str): return s
19
+ return re2.sub(r'[\p{C}--[\n\t]]+', '', s)
20
+
21
+ def _dataset_catalog(results: Dict[str, any]) -> Dict[str, List[str]]:
22
+ cat = {}
23
+ for k, v in results.items():
24
+ if isinstance(v, pd.DataFrame):
25
+ cat[k] = v.columns.tolist()
26
+ return cat
27
+
28
+ def is_healthcare_scenario(text: str, has_files: bool) -> bool:
29
+ t = (text or "").lower()
30
+ kws = HEALTHCARE_SETTINGS["healthcare_keywords"]
31
+ structured = any(s in t for s in ["background", "situation", "tasks", "deliverables"])
32
+ return has_files and (structured or any(k in t for k in kws))
33
+
34
+ def handle(user_msg: str, history: list, files: list) -> Tuple[list, str]:
35
+ try:
36
+ safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
37
+ if blocked_in: return history + [(user_msg, refusal_reply(reason_in))], ""
38
+
39
+ # Normalize files -> paths
40
+ file_paths = [getattr(f, "name", None) or f for f in (files or [])]
41
+
42
+ # Register CSVs
43
+ registry = DataRegistry()
44
+ for p in file_paths:
45
+ try: registry.add_path(p)
46
+ except Exception as e: log_event("ingest_error", None, {"file": p, "err": str(e)})
47
+
48
+ # RAG ingest (safe on empty)
49
+ rag = RAGIndex()
50
+ ing = extract_text_from_files(file_paths)
51
+ rag.add(ing.get("chunks", []))
52
+
53
+ if is_healthcare_scenario(safe_in, bool(file_paths)) and USE_SCENARIO_ENGINE:
54
+ analyzer = HealthcareAnalyzer(registry)
55
+ datasets = analyzer.comprehensive_analysis(safe_in)
56
+ catalog = _dataset_catalog(datasets)
57
+
58
+ # LLM → plan (no hardcoding)
59
+ plan = parse_to_plan(safe_in, catalog)
60
+
61
+ # Deterministic execution
62
+ structured_md = ScenarioEngine.execute_plan(plan, datasets)
63
+
64
+ # Narrative with Canadian grounding
65
+ rag_hits = [txt for txt, _ in rag.retrieve(safe_in, k=6)]
66
+ narrative = generate_narrative(safe_in, structured_md, rag_hits)
67
+
68
+ final = f"{structured_md}\n\n# Narrative & Recommendations\n\n{narrative}"
69
+ return history + [(user_msg, _sanitize_text(final))], ""
70
+
71
+ # General conversation (Cohere primary, open-model fallback inside cohere_chat if needed)
72
+ prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
73
+ ans = cohere_chat(prompt) or "How can I help further?"
74
+ return history + [(user_msg, _sanitize_text(ans))], ""
75
+
76
+ except Exception as e:
77
+ tb = traceback.format_exc()
78
+ log_event("app_error", None, {"err": str(e), "tb": tb})
79
+ return history + [(user_msg, f"Error: {e}\n\n{tb}")], ""
80
+
81
+ with gr.Blocks(analytics_enabled=False) as demo:
82
+ gr.Markdown("## Canadian Healthcare AI • Scenario-Agnostic (Cohere primary • Deterministic analytics)")
83
+ chat = gr.Chatbot(type="tuple", height=520) # tuple mode (matches how we store history)
84
+ files = gr.Files(file_count="multiple", type="filepath", file_types=HEALTHCARE_SETTINGS["supported_file_types"])
85
+ msg = gr.Textbox(placeholder="Paste any scenario (Background / Situation / Tasks / Deliverables) or just chat.")
86
+ send = gr.Button("Send")
87
+ clear = gr.Button("Clear")
88
+
89
+ def _on_send(m, h, f):
90
+ h2, _ = handle(m, h or [], f or [])
91
+ return h2, ""
92
+
93
+ send.click(_on_send, inputs=[msg, chat, files], outputs=[chat, msg])
94
+ msg.submit(_on_send, inputs=[msg, chat, files], outputs=[chat, msg])
95
+ clear.click(lambda: ([], ""), outputs=[chat, msg])
96
+
97
+ if __name__ == "__main__":
98
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
99
+