Spaces:
Sleeping
Sleeping
Rajan Sharma
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,25 +4,27 @@ from functools import lru_cache
|
|
| 4 |
import gradio as gr
|
| 5 |
import torch
|
| 6 |
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
os.environ.setdefault("HF_HOME", "/data/.cache/huggingface")
|
| 9 |
os.environ.setdefault("HF_HUB_CACHE", "/data/.cache/huggingface/hub")
|
| 10 |
os.environ.setdefault("GRADIO_TEMP_DIR", "/data/gradio")
|
| 11 |
os.environ.setdefault("GRADIO_CACHE_DIR", "/data/gradio")
|
| 12 |
-
os.environ.pop("TRANSFORMERS_CACHE", None)
|
| 13 |
for p in ["/data/.cache/huggingface/hub", "/data/gradio"]:
|
| 14 |
-
try:
|
| 15 |
-
|
| 16 |
-
except Exception:
|
| 17 |
-
pass
|
| 18 |
|
| 19 |
-
#
|
| 20 |
try:
|
| 21 |
from zoneinfo import ZoneInfo # noqa: F401
|
| 22 |
except Exception:
|
| 23 |
ZoneInfo = None # noqa: N816
|
| 24 |
|
| 25 |
-
#
|
| 26 |
try:
|
| 27 |
import cohere
|
| 28 |
_HAS_COHERE = True
|
|
@@ -32,7 +34,6 @@ except Exception:
|
|
| 32 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 33 |
from huggingface_hub import login
|
| 34 |
|
| 35 |
-
# ---------- ClarityOps modules ----------
|
| 36 |
from safety import safety_filter, refusal_reply
|
| 37 |
from retriever import init_retriever, retrieve_context
|
| 38 |
from decision_math import compute_operational_numbers
|
|
@@ -41,7 +42,6 @@ from upload_ingest import extract_text_from_files
|
|
| 41 |
from session_rag import SessionRAG
|
| 42 |
from mdsi_analysis import capacity_projection, cost_estimate, outcomes_summary
|
| 43 |
|
| 44 |
-
# ---------- Config ----------
|
| 45 |
MODEL_ID = os.getenv("MODEL_ID", "CohereLabs/c4ai-command-r7b-12-2024")
|
| 46 |
HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN")
|
| 47 |
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
|
|
@@ -49,36 +49,24 @@ USE_HOSTED_COHERE = bool(COHERE_API_KEY and _HAS_COHERE)
|
|
| 49 |
|
| 50 |
# ---------- Helpers ----------
|
| 51 |
def pick_dtype_and_map():
|
| 52 |
-
if torch.cuda.is_available():
|
| 53 |
-
|
| 54 |
-
if torch.backends.mps.is_available():
|
| 55 |
-
return torch.float16, {"": "mps"}
|
| 56 |
return torch.float32, "cpu"
|
| 57 |
|
| 58 |
def is_identity_query(message, history):
|
| 59 |
patterns = [
|
| 60 |
-
r"\bwho\s+are\s+you\b",
|
| 61 |
-
r"\
|
| 62 |
-
r"\
|
| 63 |
-
r"\bwho\s+is\s+this\b",
|
| 64 |
-
r"\bidentify\s+yourself\b",
|
| 65 |
-
r"\btell\s+me\s+about\s+yourself\b",
|
| 66 |
-
r"\bdescribe\s+yourself\b",
|
| 67 |
-
r"\band\s+you\s*\?\b",
|
| 68 |
-
r"\byour\s+name\b",
|
| 69 |
-
r"\bwho\s+am\s+i\s+chatting\s+with\b",
|
| 70 |
]
|
| 71 |
def match(t): return any(re.search(p, (t or "").strip().lower()) for p in patterns)
|
| 72 |
-
if match(message):
|
| 73 |
-
return True
|
| 74 |
if history:
|
| 75 |
last_user = history[-1][0] if isinstance(history[-1], (list, tuple)) else None
|
| 76 |
-
if match(last_user):
|
| 77 |
-
return True
|
| 78 |
return False
|
| 79 |
|
| 80 |
def _iter_user_assistant(history):
|
| 81 |
-
# history is a list of (user, assistant) tuples (Chatbot default format)
|
| 82 |
for item in (history or []):
|
| 83 |
if isinstance(item, (list, tuple)):
|
| 84 |
u = item[0] if len(item) > 0 else ""
|
|
@@ -115,7 +103,7 @@ def cohere_chat(message, history):
|
|
| 115 |
except Exception as e:
|
| 116 |
return f"Error calling Cohere API: {e}"
|
| 117 |
|
| 118 |
-
# ---------- Local model ----------
|
| 119 |
@lru_cache(maxsize=1)
|
| 120 |
def load_local_model():
|
| 121 |
if not HF_TOKEN:
|
|
@@ -125,45 +113,46 @@ def load_local_model():
|
|
| 125 |
tok = AutoTokenizer.from_pretrained(
|
| 126 |
MODEL_ID, token=HF_TOKEN, use_fast=True, model_max_length=8192, padding_side="left", trust_remote_code=True,
|
| 127 |
)
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
if mdl.config.eos_token_id is None and tok.eos_token_id is not None:
|
| 133 |
mdl.config.eos_token_id = tok.eos_token_id
|
| 134 |
return mdl, tok
|
| 135 |
|
| 136 |
def build_inputs(tokenizer, message, history):
|
| 137 |
-
# Convert tuple history to chat template input for HF models
|
| 138 |
msgs = []
|
| 139 |
for u, a in _iter_user_assistant(history):
|
| 140 |
if u: msgs.append({"role": "user", "content": u})
|
| 141 |
if a: msgs.append({"role": "assistant", "content": a})
|
| 142 |
msgs.append({"role": "user", "content": message})
|
| 143 |
-
return tokenizer.apply_chat_template(
|
| 144 |
-
msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt"
|
| 145 |
-
)
|
| 146 |
|
| 147 |
def local_generate(model, tokenizer, input_ids, max_new_tokens=900):
|
| 148 |
input_ids = input_ids.to(model.device)
|
| 149 |
with torch.no_grad():
|
| 150 |
out = model.generate(
|
| 151 |
-
input_ids=input_ids, max_new_tokens=max_new_tokens,
|
| 152 |
-
|
| 153 |
-
repetition_penalty=1.15,
|
| 154 |
-
pad_token_id=tokenizer.eos_token_id,
|
| 155 |
-
eos_token_id=tokenizer.eos_token_id,
|
| 156 |
)
|
| 157 |
gen_only = out[0, input_ids.shape[-1]:]
|
| 158 |
return tokenizer.decode(gen_only, skip_special_tokens=True).strip()
|
| 159 |
|
| 160 |
# ---------- Snapshot loader ----------
|
| 161 |
-
def _load_snapshot(path=
|
| 162 |
try:
|
| 163 |
with open(path, "r", encoding="utf-8") as f:
|
| 164 |
return json.load(f)
|
| 165 |
except Exception:
|
| 166 |
-
# Safe fallback if no snapshot present
|
| 167 |
return {
|
| 168 |
"timestamp": None, "beds_total": 400, "staffed_ratio": 1.0, "occupied_pct": 0.97,
|
| 169 |
"ed_census": 62, "ed_admits_waiting": 19, "avg_ed_wait_hours": 8,
|
|
@@ -175,7 +164,7 @@ def _load_snapshot(path="snapshots/current.json"):
|
|
| 175 |
|
| 176 |
# ---------- Init retrieval engines ----------
|
| 177 |
init_retriever()
|
| 178 |
-
_session_rag = SessionRAG() #
|
| 179 |
|
| 180 |
# ---------- Executive pre-compute (MDSi block) ----------
|
| 181 |
def _mdsi_block():
|
|
@@ -185,59 +174,55 @@ def _mdsi_block():
|
|
| 185 |
cost_1200 = cost_estimate(1200, 74.0, 75000.0)
|
| 186 |
outcomes = outcomes_summary()
|
| 187 |
return json.dumps({
|
| 188 |
-
"capacity_projection": {
|
| 189 |
-
"conservative": cons_capacity, "base": base_capacity, "optimistic": opt_capacity
|
| 190 |
-
},
|
| 191 |
"cost_for_1200": cost_1200,
|
| 192 |
"outcomes_summary": outcomes
|
| 193 |
}, indent=2)
|
| 194 |
|
| 195 |
# ---------- Core chat logic ----------
|
| 196 |
def clarityops_reply(user_msg, history, tz, uploaded_files_paths):
|
| 197 |
-
"""
|
| 198 |
-
- user_msg: latest message text
|
| 199 |
-
- history: list[(user, assistant)]
|
| 200 |
-
- tz: timezone str (unused but kept for future features)
|
| 201 |
-
- uploaded_files_paths: list[str] absolute paths of uploaded files
|
| 202 |
-
"""
|
| 203 |
try:
|
| 204 |
-
#
|
|
|
|
|
|
|
| 205 |
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
|
| 206 |
if blocked_in:
|
| 207 |
-
|
|
|
|
| 208 |
|
| 209 |
-
# Identity short-circuit
|
| 210 |
if is_identity_query(safe_in, history):
|
| 211 |
-
|
|
|
|
| 212 |
|
| 213 |
-
# Ingest
|
| 214 |
if uploaded_files_paths:
|
| 215 |
items = extract_text_from_files(uploaded_files_paths)
|
| 216 |
if items:
|
| 217 |
_session_rag.add_docs(items)
|
|
|
|
|
|
|
| 218 |
|
| 219 |
-
#
|
| 220 |
session_snips = "\n---\n".join(_session_rag.retrieve(
|
| 221 |
-
"diabetes screening Indigenous Métis mobile program cost throughput outcomes logistics bed flow staffing discharge forecast",
|
| 222 |
-
k=6
|
| 223 |
))
|
| 224 |
|
| 225 |
-
# Load daily snapshot + policies + computed ops numbers
|
| 226 |
snapshot = _load_snapshot()
|
| 227 |
policy_context = retrieve_context(
|
| 228 |
"mobile diabetes screening Indigenous community outreach logistics referral pathways cultural safety data governance cost effectiveness outcomes bed management discharge acceleration ambulance offload"
|
| 229 |
)
|
| 230 |
computed = compute_operational_numbers(snapshot)
|
| 231 |
|
| 232 |
-
# Smart scenario detection: if user message suggests exec MDSi context, include pre-compute block
|
| 233 |
user_lower = (safe_in or "").lower()
|
| 234 |
mdsi_extra = _mdsi_block() if ("diabetes" in user_lower or "mdsi" in user_lower or "mobile screening" in user_lower) else ""
|
| 235 |
|
|
|
|
|
|
|
| 236 |
system_preamble = build_system_preamble(
|
| 237 |
snapshot=snapshot,
|
| 238 |
policy_context=policy_context,
|
| 239 |
computed_numbers=computed,
|
| 240 |
-
scenario_text=
|
| 241 |
session_snips=session_snips
|
| 242 |
)
|
| 243 |
|
|
@@ -257,10 +242,15 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths):
|
|
| 257 |
if out.startswith(tag):
|
| 258 |
out = out[len(tag):].strip()
|
| 259 |
|
| 260 |
-
# Safety (output)
|
| 261 |
safe_out, blocked_out, reason_out = safety_filter(out, mode="output")
|
| 262 |
if blocked_out:
|
| 263 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
|
| 265 |
return history + [(user_msg, safe_out)]
|
| 266 |
except Exception as e:
|
|
@@ -271,85 +261,34 @@ theme = gr.themes.Soft(primary_hue="teal", neutral_hue="slate", radius_size=gr.t
|
|
| 271 |
custom_css = """
|
| 272 |
:root { --brand-bg: #e6f7f8; --brand-accent: #0d9488; --brand-text: #0f172a; --brand-text-light: #ffffff; }
|
| 273 |
.gradio-container { background: var(--brand-bg); }
|
| 274 |
-
|
| 275 |
-
/* Title */
|
| 276 |
h1 { color: var(--brand-text); font-weight: 700; font-size: 28px !important; }
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
.chatbot header, .chatbot .label, .chatbot .label-wrap, .chatbot .top, .chatbot .header, .chatbot > .wrap > header {
|
| 280 |
-
display: none !important;
|
| 281 |
-
}
|
| 282 |
-
|
| 283 |
-
/* Chat bubbles */
|
| 284 |
-
.message.user, .message.bot {
|
| 285 |
-
background: var(--brand-accent) !important;
|
| 286 |
-
color: var(--brand-text-light) !important;
|
| 287 |
-
border-radius: 12px !important;
|
| 288 |
-
padding: 8px 12px !important;
|
| 289 |
-
}
|
| 290 |
-
|
| 291 |
-
/* Inputs softer */
|
| 292 |
textarea, input, .gr-input { border-radius: 12px !important; }
|
| 293 |
"""
|
| 294 |
|
| 295 |
-
# ---------- UI
|
| 296 |
with gr.Blocks(theme=theme, css=custom_css) as demo:
|
| 297 |
-
# timezone capture (hidden)
|
| 298 |
tz_box = gr.Textbox(visible=False)
|
| 299 |
-
demo.load(
|
| 300 |
-
lambda tz: tz,
|
| 301 |
-
inputs=[tz_box],
|
| 302 |
-
outputs=[tz_box],
|
| 303 |
-
js="() => Intl.DateTimeFormat().resolvedOptions().timeZone",
|
| 304 |
-
)
|
| 305 |
-
|
| 306 |
-
# extra DOM cleanup for some gradio builds
|
| 307 |
-
hide_label_sink = gr.HTML(visible=False)
|
| 308 |
-
demo.load(
|
| 309 |
-
fn=lambda: "",
|
| 310 |
-
inputs=None,
|
| 311 |
-
outputs=hide_label_sink,
|
| 312 |
-
js="""
|
| 313 |
-
() => {
|
| 314 |
-
const sel = [
|
| 315 |
-
'.chatbot header','.chatbot .label','.chatbot .label-wrap',
|
| 316 |
-
'.chatbot .top','.chatbot .header','.chatbot > .wrap > header'
|
| 317 |
-
];
|
| 318 |
-
sel.forEach(s => document.querySelectorAll(s).forEach(el => el.style.display = 'none'));
|
| 319 |
-
return "";
|
| 320 |
-
}
|
| 321 |
-
""",
|
| 322 |
-
)
|
| 323 |
-
|
| 324 |
gr.Markdown("# ClarityOps Augmented Decision AI")
|
| 325 |
|
| 326 |
-
# Main chat area (IMPORTANT: no type="messages" -> uses tuple history)
|
| 327 |
chat = gr.Chatbot(label="", show_label=False, height=700)
|
| 328 |
|
| 329 |
-
# ---- Bottom bar: uploads + message box + send/clear ----
|
| 330 |
with gr.Row():
|
| 331 |
uploads = gr.Files(
|
| 332 |
label="Upload docs/images (PDF, DOCX, CSV, PNG, JPG)",
|
| 333 |
-
file_types=["file"],
|
| 334 |
-
file_count="multiple",
|
| 335 |
-
height=68
|
| 336 |
)
|
| 337 |
|
| 338 |
with gr.Row():
|
| 339 |
-
msg = gr.Textbox(
|
| 340 |
-
label="",
|
| 341 |
-
show_label=False,
|
| 342 |
-
placeholder="Type a message… (paste scenarios here too; ClarityOps will adapt)",
|
| 343 |
-
scale=10
|
| 344 |
-
)
|
| 345 |
send = gr.Button("Send", scale=1)
|
| 346 |
clear = gr.Button("Clear chat", scale=1)
|
| 347 |
|
| 348 |
-
# States
|
| 349 |
state_history = gr.State(value=[])
|
| 350 |
state_uploaded = gr.State(value=[])
|
| 351 |
|
| 352 |
-
# When user selects files, store their paths in state (so they persist across turns)
|
| 353 |
def _store_uploads(files, current):
|
| 354 |
paths = []
|
| 355 |
for f in (files or []):
|
|
@@ -358,32 +297,15 @@ with gr.Blocks(theme=theme, css=custom_css) as demo:
|
|
| 358 |
|
| 359 |
uploads.change(fn=_store_uploads, inputs=[uploads, state_uploaded], outputs=state_uploaded)
|
| 360 |
|
| 361 |
-
# Send message -> compute reply -> update chat & history
|
| 362 |
def _on_send(user_msg, history, tz, up_paths):
|
| 363 |
if not user_msg or not user_msg.strip():
|
| 364 |
-
return history, "", history
|
| 365 |
new_history = clarityops_reply(user_msg.strip(), history or [], tz, up_paths or [])
|
| 366 |
return new_history, "", new_history
|
| 367 |
|
| 368 |
-
send.click(
|
| 369 |
-
|
| 370 |
-
inputs=[msg, state_history, tz_box, state_uploaded],
|
| 371 |
-
outputs=[chat, msg, state_history],
|
| 372 |
-
queue=True,
|
| 373 |
-
)
|
| 374 |
-
|
| 375 |
-
# Also allow pressing Enter inside the textbox
|
| 376 |
-
msg.submit(
|
| 377 |
-
fn=_on_send,
|
| 378 |
-
inputs=[msg, state_history, tz_box, state_uploaded],
|
| 379 |
-
outputs=[chat, msg, state_history],
|
| 380 |
-
queue=True,
|
| 381 |
-
)
|
| 382 |
|
| 383 |
-
# Clear chat (keeps uploads so you can keep referencing docs)
|
| 384 |
-
def _clear_chat():
|
| 385 |
-
return [], [], []
|
| 386 |
-
# Clear only chat + input; keep uploads
|
| 387 |
clear.click(lambda: ([], "", []), None, [chat, msg, state_history])
|
| 388 |
|
| 389 |
if __name__ == "__main__":
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
import torch
|
| 6 |
|
| 7 |
+
from settings import SNAPSHOT_PATH, PERSIST_CONTENT
|
| 8 |
+
from audit_log import log_event, hash_summary
|
| 9 |
+
from privacy import redact_text
|
| 10 |
+
|
| 11 |
+
# ---------- Env/cache ----------
|
| 12 |
os.environ.setdefault("HF_HOME", "/data/.cache/huggingface")
|
| 13 |
os.environ.setdefault("HF_HUB_CACHE", "/data/.cache/huggingface/hub")
|
| 14 |
os.environ.setdefault("GRADIO_TEMP_DIR", "/data/gradio")
|
| 15 |
os.environ.setdefault("GRADIO_CACHE_DIR", "/data/gradio")
|
| 16 |
+
os.environ.pop("TRANSFORMERS_CACHE", None)
|
| 17 |
for p in ["/data/.cache/huggingface/hub", "/data/gradio"]:
|
| 18 |
+
try: os.makedirs(p, exist_ok=True)
|
| 19 |
+
except Exception: pass
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
# Optional timezone
|
| 22 |
try:
|
| 23 |
from zoneinfo import ZoneInfo # noqa: F401
|
| 24 |
except Exception:
|
| 25 |
ZoneInfo = None # noqa: N816
|
| 26 |
|
| 27 |
+
# Optional Cohere
|
| 28 |
try:
|
| 29 |
import cohere
|
| 30 |
_HAS_COHERE = True
|
|
|
|
| 34 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 35 |
from huggingface_hub import login
|
| 36 |
|
|
|
|
| 37 |
from safety import safety_filter, refusal_reply
|
| 38 |
from retriever import init_retriever, retrieve_context
|
| 39 |
from decision_math import compute_operational_numbers
|
|
|
|
| 42 |
from session_rag import SessionRAG
|
| 43 |
from mdsi_analysis import capacity_projection, cost_estimate, outcomes_summary
|
| 44 |
|
|
|
|
| 45 |
MODEL_ID = os.getenv("MODEL_ID", "CohereLabs/c4ai-command-r7b-12-2024")
|
| 46 |
HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN")
|
| 47 |
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
|
|
|
|
| 49 |
|
| 50 |
# ---------- Helpers ----------
|
| 51 |
def pick_dtype_and_map():
|
| 52 |
+
if torch.cuda.is_available(): return torch.float16, "auto"
|
| 53 |
+
if torch.backends.mps.is_available(): return torch.float16, {"": "mps"}
|
|
|
|
|
|
|
| 54 |
return torch.float32, "cpu"
|
| 55 |
|
| 56 |
def is_identity_query(message, history):
|
| 57 |
patterns = [
|
| 58 |
+
r"\bwho\s+are\s+you\b", r"\bwhat\s+are\s+you\b", r"\bwhat\s+is\s+your\s+name\b",
|
| 59 |
+
r"\bwho\s+is\s+this\b", r"\bidentify\s+yourself\b", r"\btell\s+me\s+about\s+yourself\b",
|
| 60 |
+
r"\bdescribe\s+yourself\b", r"\band\s+you\s*\?\b", r"\byour\s+name\b", r"\bwho\s+am\s+i\s+chatting\s+with\b"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
]
|
| 62 |
def match(t): return any(re.search(p, (t or "").strip().lower()) for p in patterns)
|
| 63 |
+
if match(message): return True
|
|
|
|
| 64 |
if history:
|
| 65 |
last_user = history[-1][0] if isinstance(history[-1], (list, tuple)) else None
|
| 66 |
+
if match(last_user): return True
|
|
|
|
| 67 |
return False
|
| 68 |
|
| 69 |
def _iter_user_assistant(history):
|
|
|
|
| 70 |
for item in (history or []):
|
| 71 |
if isinstance(item, (list, tuple)):
|
| 72 |
u = item[0] if len(item) > 0 else ""
|
|
|
|
| 103 |
except Exception as e:
|
| 104 |
return f"Error calling Cohere API: {e}"
|
| 105 |
|
| 106 |
+
# ---------- Local model (with accelerate fallback) ----------
|
| 107 |
@lru_cache(maxsize=1)
|
| 108 |
def load_local_model():
|
| 109 |
if not HF_TOKEN:
|
|
|
|
| 113 |
tok = AutoTokenizer.from_pretrained(
|
| 114 |
MODEL_ID, token=HF_TOKEN, use_fast=True, model_max_length=8192, padding_side="left", trust_remote_code=True,
|
| 115 |
)
|
| 116 |
+
# Try device_map path (needs accelerate). Fallback to manual .to(device) if it fails.
|
| 117 |
+
try:
|
| 118 |
+
mdl = AutoModelForCausalLM.from_pretrained(
|
| 119 |
+
MODEL_ID, token=HF_TOKEN, device_map=device_map,
|
| 120 |
+
low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True,
|
| 121 |
+
)
|
| 122 |
+
except Exception:
|
| 123 |
+
mdl = AutoModelForCausalLM.from_pretrained(
|
| 124 |
+
MODEL_ID, token=HF_TOKEN,
|
| 125 |
+
low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True,
|
| 126 |
+
)
|
| 127 |
+
mdl.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 128 |
if mdl.config.eos_token_id is None and tok.eos_token_id is not None:
|
| 129 |
mdl.config.eos_token_id = tok.eos_token_id
|
| 130 |
return mdl, tok
|
| 131 |
|
| 132 |
def build_inputs(tokenizer, message, history):
|
|
|
|
| 133 |
msgs = []
|
| 134 |
for u, a in _iter_user_assistant(history):
|
| 135 |
if u: msgs.append({"role": "user", "content": u})
|
| 136 |
if a: msgs.append({"role": "assistant", "content": a})
|
| 137 |
msgs.append({"role": "user", "content": message})
|
| 138 |
+
return tokenizer.apply_chat_template(msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
|
|
|
|
|
|
| 139 |
|
| 140 |
def local_generate(model, tokenizer, input_ids, max_new_tokens=900):
|
| 141 |
input_ids = input_ids.to(model.device)
|
| 142 |
with torch.no_grad():
|
| 143 |
out = model.generate(
|
| 144 |
+
input_ids=input_ids, max_new_tokens=max_new_tokens, do_sample=True, temperature=0.3, top_p=0.9,
|
| 145 |
+
repetition_penalty=1.15, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id,
|
|
|
|
|
|
|
|
|
|
| 146 |
)
|
| 147 |
gen_only = out[0, input_ids.shape[-1]:]
|
| 148 |
return tokenizer.decode(gen_only, skip_special_tokens=True).strip()
|
| 149 |
|
| 150 |
# ---------- Snapshot loader ----------
|
| 151 |
+
def _load_snapshot(path=SNAPSHOT_PATH):
|
| 152 |
try:
|
| 153 |
with open(path, "r", encoding="utf-8") as f:
|
| 154 |
return json.load(f)
|
| 155 |
except Exception:
|
|
|
|
| 156 |
return {
|
| 157 |
"timestamp": None, "beds_total": 400, "staffed_ratio": 1.0, "occupied_pct": 0.97,
|
| 158 |
"ed_census": 62, "ed_admits_waiting": 19, "avg_ed_wait_hours": 8,
|
|
|
|
| 164 |
|
| 165 |
# ---------- Init retrieval engines ----------
|
| 166 |
init_retriever()
|
| 167 |
+
_session_rag = SessionRAG() # in-memory only
|
| 168 |
|
| 169 |
# ---------- Executive pre-compute (MDSi block) ----------
|
| 170 |
def _mdsi_block():
|
|
|
|
| 174 |
cost_1200 = cost_estimate(1200, 74.0, 75000.0)
|
| 175 |
outcomes = outcomes_summary()
|
| 176 |
return json.dumps({
|
| 177 |
+
"capacity_projection": {"conservative": cons_capacity, "base": base_capacity, "optimistic": opt_capacity},
|
|
|
|
|
|
|
| 178 |
"cost_for_1200": cost_1200,
|
| 179 |
"outcomes_summary": outcomes
|
| 180 |
}, indent=2)
|
| 181 |
|
| 182 |
# ---------- Core chat logic ----------
|
| 183 |
def clarityops_reply(user_msg, history, tz, uploaded_files_paths):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
try:
|
| 185 |
+
# Audit (content-free)
|
| 186 |
+
log_event("user_message", None, {"sizes": {"chars": len(user_msg or "")}})
|
| 187 |
+
|
| 188 |
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
|
| 189 |
if blocked_in:
|
| 190 |
+
ans = refusal_reply(reason_in)
|
| 191 |
+
return history + [(user_msg, ans)]
|
| 192 |
|
|
|
|
| 193 |
if is_identity_query(safe_in, history):
|
| 194 |
+
ans = "I am ClarityOps, your strategic decision making AI partner."
|
| 195 |
+
return history + [(user_msg, ans)]
|
| 196 |
|
| 197 |
+
# Ingest uploads (PHI-redacted in upload_ingest)
|
| 198 |
if uploaded_files_paths:
|
| 199 |
items = extract_text_from_files(uploaded_files_paths)
|
| 200 |
if items:
|
| 201 |
_session_rag.add_docs(items)
|
| 202 |
+
# Audit upload names & sizes only
|
| 203 |
+
log_event("uploads_added", None, {"count": len(items)})
|
| 204 |
|
| 205 |
+
# Retrieve from session uploads
|
| 206 |
session_snips = "\n---\n".join(_session_rag.retrieve(
|
| 207 |
+
"diabetes screening Indigenous Métis mobile program cost throughput outcomes logistics bed flow staffing discharge forecast", k=6
|
|
|
|
| 208 |
))
|
| 209 |
|
|
|
|
| 210 |
snapshot = _load_snapshot()
|
| 211 |
policy_context = retrieve_context(
|
| 212 |
"mobile diabetes screening Indigenous community outreach logistics referral pathways cultural safety data governance cost effectiveness outcomes bed management discharge acceleration ambulance offload"
|
| 213 |
)
|
| 214 |
computed = compute_operational_numbers(snapshot)
|
| 215 |
|
|
|
|
| 216 |
user_lower = (safe_in or "").lower()
|
| 217 |
mdsi_extra = _mdsi_block() if ("diabetes" in user_lower or "mdsi" in user_lower or "mobile screening" in user_lower) else ""
|
| 218 |
|
| 219 |
+
# Optionally include long scenario text; redact if persisting later (we don't persist by default)
|
| 220 |
+
scenario_block = safe_in if len(safe_in) > 400 else ""
|
| 221 |
system_preamble = build_system_preamble(
|
| 222 |
snapshot=snapshot,
|
| 223 |
policy_context=policy_context,
|
| 224 |
computed_numbers=computed,
|
| 225 |
+
scenario_text=scenario_block + (f"\n\nExecutive Pre-Computed Blocks:\n{mdsi_extra}" if mdsi_extra else ""),
|
| 226 |
session_snips=session_snips
|
| 227 |
)
|
| 228 |
|
|
|
|
| 242 |
if out.startswith(tag):
|
| 243 |
out = out[len(tag):].strip()
|
| 244 |
|
|
|
|
| 245 |
safe_out, blocked_out, reason_out = safety_filter(out, mode="output")
|
| 246 |
if blocked_out:
|
| 247 |
+
safe_out = refusal_reply(reason_out)
|
| 248 |
+
|
| 249 |
+
# Audit (content-free fingerprints)
|
| 250 |
+
log_event("assistant_reply", None, {
|
| 251 |
+
**hash_summary("prompt", augmented_user if not PERSIST_CONTENT else ""),
|
| 252 |
+
**hash_summary("reply", safe_out if not PERSIST_CONTENT else ""),
|
| 253 |
+
})
|
| 254 |
|
| 255 |
return history + [(user_msg, safe_out)]
|
| 256 |
except Exception as e:
|
|
|
|
| 261 |
custom_css = """
|
| 262 |
:root { --brand-bg: #e6f7f8; --brand-accent: #0d9488; --brand-text: #0f172a; --brand-text-light: #ffffff; }
|
| 263 |
.gradio-container { background: var(--brand-bg); }
|
|
|
|
|
|
|
| 264 |
h1 { color: var(--brand-text); font-weight: 700; font-size: 28px !important; }
|
| 265 |
+
.chatbot header, .chatbot .label, .chatbot .label-wrap, .chatbot .top, .chatbot .header, .chatbot > .wrap > header { display: none !important; }
|
| 266 |
+
.message.user, .message.bot { background: var(--brand-accent) !important; color: var(--brand-text-light) !important; border-radius: 12px !important; padding: 8px 12px !important; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
textarea, input, .gr-input { border-radius: 12px !important; }
|
| 268 |
"""
|
| 269 |
|
| 270 |
+
# ---------- UI ----------
|
| 271 |
with gr.Blocks(theme=theme, css=custom_css) as demo:
|
|
|
|
| 272 |
tz_box = gr.Textbox(visible=False)
|
| 273 |
+
demo.load(lambda tz: tz, inputs=[tz_box], outputs=[tz_box], js="() => Intl.DateTimeFormat().resolvedOptions().timeZone")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
gr.Markdown("# ClarityOps Augmented Decision AI")
|
| 275 |
|
|
|
|
| 276 |
chat = gr.Chatbot(label="", show_label=False, height=700)
|
| 277 |
|
|
|
|
| 278 |
with gr.Row():
|
| 279 |
uploads = gr.Files(
|
| 280 |
label="Upload docs/images (PDF, DOCX, CSV, PNG, JPG)",
|
| 281 |
+
file_types=["file"], file_count="multiple", height=68
|
|
|
|
|
|
|
| 282 |
)
|
| 283 |
|
| 284 |
with gr.Row():
|
| 285 |
+
msg = gr.Textbox(label="", show_label=False, placeholder="Type a message… (paste scenarios here too; ClarityOps will adapt)", scale=10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
send = gr.Button("Send", scale=1)
|
| 287 |
clear = gr.Button("Clear chat", scale=1)
|
| 288 |
|
|
|
|
| 289 |
state_history = gr.State(value=[])
|
| 290 |
state_uploaded = gr.State(value=[])
|
| 291 |
|
|
|
|
| 292 |
def _store_uploads(files, current):
|
| 293 |
paths = []
|
| 294 |
for f in (files or []):
|
|
|
|
| 297 |
|
| 298 |
uploads.change(fn=_store_uploads, inputs=[uploads, state_uploaded], outputs=state_uploaded)
|
| 299 |
|
|
|
|
| 300 |
def _on_send(user_msg, history, tz, up_paths):
|
| 301 |
if not user_msg or not user_msg.strip():
|
| 302 |
+
return history, "", history
|
| 303 |
new_history = clarityops_reply(user_msg.strip(), history or [], tz, up_paths or [])
|
| 304 |
return new_history, "", new_history
|
| 305 |
|
| 306 |
+
send.click(fn=_on_send, inputs=[msg, state_history, tz_box, state_uploaded], outputs=[chat, msg, state_history], queue=True)
|
| 307 |
+
msg.submit(fn=_on_send, inputs=[msg, state_history, tz_box, state_uploaded], outputs=[chat, msg, state_history], queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 309 |
clear.click(lambda: ([], "", []), None, [chat, msg, state_history])
|
| 310 |
|
| 311 |
if __name__ == "__main__":
|