Spaces:
Sleeping
Sleeping
Rajan Sharma
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -35,11 +35,18 @@ def is_healthcare_scenario(text: str, has_files: bool) -> bool:
|
|
| 35 |
structured = any(s in t for s in ["background", "situation", "tasks", "deliverables"])
|
| 36 |
return has_files and (structured or any(k in t for k in kws))
|
| 37 |
|
| 38 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
try:
|
| 40 |
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
|
| 41 |
if blocked_in:
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
# Normalize files -> paths (safe when files is None)
|
| 45 |
file_paths = [getattr(f, "name", None) or f for f in (files or [])]
|
|
@@ -74,23 +81,30 @@ def handle(user_msg: str, history: list, files: list) -> Tuple[list, str]:
|
|
| 74 |
narrative = generate_narrative(safe_in, structured_md, rag_hits)
|
| 75 |
|
| 76 |
final = f"{structured_md}\n\n# Narrative & Recommendations\n\n{narrative}"
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
except Exception as e:
|
| 85 |
tb = traceback.format_exc()
|
| 86 |
log_event("app_error", None, {"err": str(e), "tb": tb})
|
| 87 |
-
|
|
|
|
|
|
|
| 88 |
|
| 89 |
# -------- UI --------
|
| 90 |
with gr.Blocks(analytics_enabled=False) as demo:
|
| 91 |
gr.Markdown("## Canadian Healthcare AI • Scenario-Agnostic (Cohere primary • Deterministic analytics)")
|
| 92 |
-
# Use the
|
| 93 |
-
chat = gr.Chatbot(type="
|
| 94 |
files = gr.Files(
|
| 95 |
file_count="multiple",
|
| 96 |
type="filepath",
|
|
@@ -101,6 +115,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
| 101 |
clear = gr.Button("Clear")
|
| 102 |
|
| 103 |
def _on_send(m, h, f):
|
|
|
|
| 104 |
h2, _ = handle(m, h or [], f or [])
|
| 105 |
return h2, ""
|
| 106 |
|
|
@@ -110,4 +125,3 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
| 110 |
|
| 111 |
if __name__ == "__main__":
|
| 112 |
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
|
| 113 |
-
|
|
|
|
| 35 |
structured = any(s in t for s in ["background", "situation", "tasks", "deliverables"])
|
| 36 |
return has_files and (structured or any(k in t for k in kws))
|
| 37 |
|
| 38 |
+
def _append_msg(history_messages: List[Dict[str, str]], role: str, content: str) -> List[Dict[str, str]]:
|
| 39 |
+
"""Return a new history list with one message appended."""
|
| 40 |
+
return (history_messages or []) + [{"role": role, "content": content}]
|
| 41 |
+
|
| 42 |
+
def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]:
|
| 43 |
try:
|
| 44 |
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
|
| 45 |
if blocked_in:
|
| 46 |
+
reply = refusal_reply(reason_in)
|
| 47 |
+
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 48 |
+
new_hist = _append_msg(new_hist, "assistant", reply)
|
| 49 |
+
return new_hist, ""
|
| 50 |
|
| 51 |
# Normalize files -> paths (safe when files is None)
|
| 52 |
file_paths = [getattr(f, "name", None) or f for f in (files or [])]
|
|
|
|
| 81 |
narrative = generate_narrative(safe_in, structured_md, rag_hits)
|
| 82 |
|
| 83 |
final = f"{structured_md}\n\n# Narrative & Recommendations\n\n{narrative}"
|
| 84 |
+
reply = _sanitize_text(final)
|
| 85 |
+
else:
|
| 86 |
+
# General conversation mode (no scenario/files required)
|
| 87 |
+
prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
|
| 88 |
+
reply = cohere_chat(prompt) or open_fallback_chat(prompt) or "How can I help further?"
|
| 89 |
+
reply = _sanitize_text(reply)
|
| 90 |
+
|
| 91 |
+
# Append user then assistant messages to history
|
| 92 |
+
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 93 |
+
new_hist = _append_msg(new_hist, "assistant", reply)
|
| 94 |
+
return new_hist, ""
|
| 95 |
|
| 96 |
except Exception as e:
|
| 97 |
tb = traceback.format_exc()
|
| 98 |
log_event("app_error", None, {"err": str(e), "tb": tb})
|
| 99 |
+
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 100 |
+
new_hist = _append_msg(new_hist, "assistant", f"Error: {e}\n\n{tb}")
|
| 101 |
+
return new_hist, ""
|
| 102 |
|
| 103 |
# -------- UI --------
|
| 104 |
with gr.Blocks(analytics_enabled=False) as demo:
|
| 105 |
gr.Markdown("## Canadian Healthcare AI • Scenario-Agnostic (Cohere primary • Deterministic analytics)")
|
| 106 |
+
# Use the new 'messages' format to avoid deprecation
|
| 107 |
+
chat = gr.Chatbot(type="messages", height=520)
|
| 108 |
files = gr.Files(
|
| 109 |
file_count="multiple",
|
| 110 |
type="filepath",
|
|
|
|
| 115 |
clear = gr.Button("Clear")
|
| 116 |
|
| 117 |
def _on_send(m, h, f):
|
| 118 |
+
# h is already a list of {'role','content'} dicts with type="messages"
|
| 119 |
h2, _ = handle(m, h or [], f or [])
|
| 120 |
return h2, ""
|
| 121 |
|
|
|
|
| 125 |
|
| 126 |
if __name__ == "__main__":
|
| 127 |
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
|
|
|