Rajan Sharma
Update app.py
8f6e031 verified
raw
history blame
16.5 kB
import os, re, json, traceback
from functools import lru_cache
import gradio as gr
import torch
# NEW: robust control-char sanitizer (requires `regex` package)
import regex as re2 # pip install regex
from settings import SNAPSHOT_PATH, PERSIST_CONTENT
from audit_log import log_event, hash_summary
from privacy import redact_text
# ---------- Environment / cache ----------
os.environ.setdefault("HF_HOME", "/data/.cache/huggingface")
os.environ.setdefault("HF_HUB_CACHE", "/data/.cache/huggingface/hub")
os.environ.setdefault("GRADIO_TEMP_DIR", "/data/gradio")
os.environ.setdefault("GRADIO_CACHE_DIR", "/data/gradio")
os.environ.pop("TRANSFORMERS_CACHE", None)
for p in ["/data/.cache/huggingface/hub", "/data/gradio"]:
try:
os.makedirs(p, exist_ok=True)
except Exception:
pass
# Optional Cohere
try:
import cohere
_HAS_COHERE = True
except Exception:
_HAS_COHERE = False
from transformers import AutoTokenizer, AutoModelForCausalLM
from huggingface_hub import login
from safety import safety_filter, refusal_reply
from retriever import init_retriever, retrieve_context
from decision_math import compute_operational_numbers
from prompt_templates import build_system_preamble
from upload_ingest import extract_text_from_files
from session_rag import SessionRAG
from mdsi_analysis import capacity_projection, cost_estimate, outcomes_summary
# ---------- Config ----------
MODEL_ID = os.getenv("MODEL_ID", "microsoft/Phi-3-mini-4k-instruct") # local fallback
HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN")
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
USE_HOSTED_COHERE = bool(COHERE_API_KEY and _HAS_COHERE)
MAX_NEW_TOKENS = int(os.getenv("MAX_NEW_TOKENS", "512"))
# ---------- Helpers ----------
def pick_dtype_and_map():
if torch.cuda.is_available():
return torch.float16, "auto"
if torch.backends.mps.is_available():
return torch.float16, {"": "mps"}
return torch.float32, "cpu"
def is_identity_query(message, history):
patterns = [
r"\bwho\s+are\s+you\b",
r"\bwhat\s+are\s+you\b",
r"\bwhat\s+is\s+your\s+name\b",
r"\bwho\s+is\s+this\b",
r"\bidentify\s+yourself\b",
r"\btell\s+me\s+about\s+yourself\b",
r"\bdescribe\s+yourself\b",
r"\band\s+you\s*\?\b",
r"\byour\s+name\b",
r"\bwho\s+am\s+i\s+chatting\s+with\b",
]
def match(t): return any(re.search(p, (t or "").strip().lower()) for p in patterns)
if match(message): return True
if history:
last_user = history[-1][0] if isinstance(history[-1], (list, tuple)) else None
if match(last_user): return True
return False
def _iter_user_assistant(history):
for item in (history or []):
if isinstance(item, (list, tuple)):
u = item[0] if len(item) > 0 else ""
a = item[1] if len(item) > 1 else ""
yield u, a
def _history_to_prompt(message, history):
parts = []
for u, a in _iter_user_assistant(history):
if u: parts.append(f"User: {u}")
if a: parts.append(f"Assistant: {a}")
parts.append(f"User: {message}")
parts.append("Assistant:")
return "\n".join(parts)
def _sanitize_text(s: str) -> str:
"""
Strip control characters (except newline/tab) to avoid garbled UI output.
"""
if not isinstance(s, str):
return s
return re2.sub(r'[\p{C}--[\n\t]]+', '', s)
# ---------- Cohere (default path) ----------
def cohere_chat(message, history):
if not USE_HOSTED_COHERE:
return None
try:
# Create client on demand to avoid init errors in some environments
client = cohere.Client(api_key=COHERE_API_KEY)
prompt = _history_to_prompt(message, history)
resp = client.chat(
model="command-r7b-12-2024",
message=prompt,
temperature=0.3,
max_tokens=MAX_NEW_TOKENS,
)
if hasattr(resp, "text") and resp.text: return resp.text.strip()
if hasattr(resp, "reply") and resp.reply: return resp.reply.strip()
if hasattr(resp, "generations") and resp.generations: return resp.generations[0].text.strip()
return None
except Exception:
return None
# ---------- Local model (accelerate-safe fallback) ----------
@lru_cache(maxsize=1)
def load_local_model():
if not HF_TOKEN:
raise RuntimeError("HUGGINGFACE_HUB_TOKEN is not set.")
login(token=HF_TOKEN, add_to_git_credential=False)
dtype, device_map = pick_dtype_and_map()
tok = AutoTokenizer.from_pretrained(
MODEL_ID, token=HF_TOKEN, use_fast=True, model_max_length=8192,
padding_side="left", trust_remote_code=True,
)
try:
mdl = AutoModelForCausalLM.from_pretrained(
MODEL_ID, token=HF_TOKEN, device_map=device_map,
low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True,
)
except Exception:
mdl = AutoModelForCausalLM.from_pretrained(
MODEL_ID, token=HF_TOKEN,
low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True,
)
mdl.to("cuda" if torch.cuda.is_available() else "cpu")
if mdl.config.eos_token_id is None and tok.eos_token_id is not None:
mdl.config.eos_token_id = tok.eos_token_id
return mdl, tok
def build_inputs(tokenizer, message, history):
msgs = []
for u, a in _iter_user_assistant(history):
if u: msgs.append({"role": "user", "content": u})
if a: msgs.append({"role": "assistant", "content": a})
msgs.append({"role": "user", "content": message})
return tokenizer.apply_chat_template(
msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt"
)
def local_generate(model, tokenizer, input_ids, max_new_tokens=MAX_NEW_TOKENS):
input_ids = input_ids.to(model.device)
with torch.no_grad():
out = model.generate(
input_ids=input_ids, max_new_tokens=max_new_tokens,
do_sample=True, temperature=0.3, top_p=0.9,
repetition_penalty=1.15,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
gen_only = out[0, input_ids.shape[-1]:]
return tokenizer.decode(gen_only, skip_special_tokens=True).strip()
# ---------- Snapshot loader ----------
def _load_snapshot(path=SNAPSHOT_PATH):
try:
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
except Exception:
return {
"timestamp": None, "beds_total": 400, "staffed_ratio": 1.0, "occupied_pct": 0.97,
"ed_census": 62, "ed_admits_waiting": 19, "avg_ed_wait_hours": 8,
"discharge_ready_today": 11, "discharge_barriers": {"allied_health": 7, "placement": 4},
"rn_shortfall": {"med_ward_A": 1, "med_ward_B": 1},
"forecast_admits_next_24h": {"respiratory": 14, "other": 9},
"isolation_needs_waiting": {"contact": 3, "airborne": 1}, "telemetry_needed_waiting": 5
}
# ---------- Init retrieval engines ----------
init_retriever()
_session_rag = SessionRAG() # in-memory; supports artifacts (CSV columns)
# ---------- Executive pre-compute (MDSi block) ----------
def _mdsi_block():
base_capacity = capacity_projection(18, 48, 6)
cons_capacity = capacity_projection(12, 48, 6)
opt_capacity = capacity_projection(24, 48, 6)
cost_1200 = cost_estimate(1200, 74.0, 75000.0)
outcomes = outcomes_summary()
return json.dumps({
"capacity_projection": {"conservative": cons_capacity, "base": base_capacity, "optimistic": opt_capacity},
"cost_for_1200": cost_1200,
"outcomes_summary": outcomes
}, indent=2)
# ---------- Core chat logic (Cohere-first with fallback) ----------
def clarityops_reply(user_msg, history, tz, uploaded_files_paths):
try:
log_event("user_message", None, {"sizes": {"chars": len(user_msg or "")}})
# Safety (input)
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
if blocked_in:
ans = refusal_reply(reason_in)
return history + [(user_msg, ans)]
# Identity short-circuit
if is_identity_query(safe_in, history):
ans = "I am ClarityOps, your strategic decision making AI partner."
return history + [(user_msg, ans)]
# Debug slash command: /diag
if (safe_in or "").strip().lower().startswith("/diag"):
try:
chunk_count = len(getattr(_session_rag, "texts", []) or [])
cols = _session_rag.get_latest_csv_columns()
sample = _session_rag.retrieve("the", k=2)
msg = [
f"Chunks in session: {chunk_count}",
f"Latest CSV columns: {', '.join(cols) if cols else '<none>'}",
"Sample retrieved snippets:",
*(sample or ["<no snippets>"])
]
return history + [(user_msg, "\n\n".join(msg))]
except Exception as e:
return history + [(user_msg, f"Diag error: {e}")]
# Ingest uploads: returns chunks + artifacts
if uploaded_files_paths:
ing = extract_text_from_files(uploaded_files_paths)
chunks = ing.get("chunks", []) if isinstance(ing, dict) else (inf or [])
artifacts = ing.get("artifacts", []) if isinstance(ing, dict) else []
if chunks:
_session_rag.add_docs(chunks)
if artifacts:
_session_rag.register_artifacts(artifacts)
log_event("uploads_added", None, {"chunks": len(chunks), "artifacts": len(artifacts)})
# Deterministic CSV "columns/headers" handler
if re.search(r"\b(columns?|headers?)\b", (safe_in or "").lower()):
cols = _session_rag.get_latest_csv_columns()
if cols:
return history + [(user_msg, "Here are the column names from your most recent CSV upload:\n\n- " + "\n- ".join(cols))]
# Heuristic: scenario mode nudge if a long case study was pasted
plain = (safe_in or "").strip().lower()
looks_like_case = ("background" in plain and "objective" in plain) or ("case study" in plain)
if looks_like_case and len(plain) > 600:
safe_in += (
"\n\nPlease analyze the scenario above using the Expected Output Format: "
"produce structured recommendations, estimates and assumptions, include tables and bullet points, "
"and explicitly state how uploaded files (CSV/docs) influenced your estimates."
)
# Retrieve from session uploads (text chunks)
session_snips = "\n---\n".join(_session_rag.retrieve(
"diabetes screening Indigenous Métis mobile program cost throughput outcomes logistics bed flow staffing discharge forecast",
k=6
))
# Load daily snapshot + policies + computed ops numbers
snapshot = _load_snapshot()
policy_context = retrieve_context(
"mobile diabetes screening Indigenous community outreach logistics referral pathways cultural safety data governance cost effectiveness outcomes bed management discharge acceleration ambulance offload"
)
computed = compute_operational_numbers(snapshot)
# Exec scenario detect (MDSi)
user_lower = (safe_in or "").lower()
mdsi_extra = _mdsi_block() if ("diabetes" in user_lower or "mdsi" in user_lower or "mobile screening" in user_lower) else ""
scenario_block = safe_in if len(safe_in) > 400 else ""
system_preamble = build_system_preamble(
snapshot=snapshot,
policy_context=policy_context,
computed_numbers=computed,
scenario_text=scenario_block + (f"\n\nExecutive Pre-Computed Blocks:\n{mdsi_extra}" if mdsi_extra else ""),
session_snips=session_snips
)
augmented_user = system_preamble + "\n\nUser question or request:\n" + safe_in
# Cohere first
out = cohere_chat(augmented_user, history)
# Fallback to local HF model if Cohere not set or failed
if not out:
model, tokenizer = load_local_model()
inputs = build_inputs(tokenizer, augmented_user, history)
out = local_generate(model, tokenizer, inputs, max_new_tokens=MAX_NEW_TOKENS)
# Tidy echoes and sanitize
if isinstance(out, str):
for tag in ("Assistant:", "System:", "User:"):
if out.startswith(tag):
out = out[len(tag):].strip()
out = _sanitize_text(out)
# Safety (output)
safe_out, blocked_out, reason_out = safety_filter(out, mode="output")
if blocked_out:
safe_out = refusal_reply(reason_out)
# Audit (content-free fingerprints)
log_event("assistant_reply", None, {
**hash_summary("prompt", augmented_user if not PERSIST_CONTENT else ""),
**hash_summary("reply", safe_out if not PERSIST_CONTENT else ""),
})
return history + [(user_msg, safe_out)]
except Exception as e:
err = f"Error: {e}"
try:
traceback.print_exc()
except Exception:
pass
return history + [(user_msg, err)]
# ---------- Theme & CSS ----------
theme = gr.themes.Soft(primary_hue="teal", neutral_hue="slate", radius_size=gr.themes.sizes.radius_lg)
custom_css = """
:root { --brand-bg: #e6f7f8; --brand-accent: #0d9488; --brand-text: #0f172a; --brand-text-light: #ffffff; }
.gradio-container { background: var(--brand-bg); }
h1 { color: var(--brand-text); font-weight: 700; font-size: 28px !important; }
.chatbot header, .chatbot .label, .chatbot .label-wrap, .chatbot .top, .chatbot .header, .chatbot > .wrap > header { display: none !important; }
.message.user, .message.bot { background: var(--brand-accent) !important; color: var(--brand-text-light) !important; border-radius: 12px !important; padding: 8px 12px !important; }
textarea, input, .gr-input { border-radius: 12px !important; }
"""
# ---------- UI (single window; uploads at bottom) ----------
with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo:
gr.Markdown("# ClarityOps Augmented Decision AI")
chat = gr.Chatbot(label="", show_label=False, height=700)
with gr.Row():
uploads = gr.Files(
label="Upload docs/images (PDF, DOCX, CSV, PNG, JPG)",
file_types=["file"], file_count="multiple", height=68
)
with gr.Row():
msg = gr.Textbox(
label="",
show_label=False,
placeholder="Type a message… (paste scenarios here too; ClarityOps will adapt)",
scale=10
)
send = gr.Button("Send", scale=1)
clear = gr.Button("Clear chat", scale=1)
state_history = gr.State(value=[])
state_uploaded = gr.State(value=[])
def _store_uploads(files, current):
paths = []
for f in (files or []):
paths.append(getattr(f, "name", None) or f)
return (current or []) + paths
uploads.change(fn=_store_uploads, inputs=[uploads, state_uploaded], outputs=state_uploaded)
def _on_send(user_msg, history, up_paths):
try:
if not user_msg or not user_msg.strip():
return history, "", history
new_history = clarityops_reply(user_msg.strip(), history or [], None, up_paths or [])
return new_history, "", new_history
except Exception as e:
err = f"Error: {e}"
try:
traceback.print_exc()
except Exception:
pass
new_hist = (history or []) + [(user_msg or "", err)]
return new_hist, "", new_hist
send.click(_on_send, inputs=[msg, state_history, state_uploaded],
outputs=[chat, msg, state_history],
concurrency_limit=2, queue=True)
msg.submit(_on_send, inputs=[msg, state_history, state_uploaded],
outputs=[chat, msg, state_history],
concurrency_limit=2, queue=True)
clear.click(lambda: ([], "", []), None, [chat, msg, state_history])
if __name__ == "__main__":
port = int(os.environ.get("PORT", "7860"))
demo.launch(server_name="0.0.0.0", server_port=port, show_api=False, max_threads=8)