Rajan Sharma commited on
Commit
76fca29
·
verified ·
1 Parent(s): 35f7e6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -31
app.py CHANGED
@@ -128,7 +128,6 @@ def cohere_chat(message, history):
128
  return None
129
  try:
130
  client = cohere.Client(api_key=COHERE_API_KEY)
131
- # Build a simple conversational prompt (history included)
132
  parts = []
133
  for u, a in _iter_user_assistant(history):
134
  if u: parts.append(f"User: {u}")
@@ -269,11 +268,6 @@ def _is_mdsi_diabetes(text: str) -> bool:
269
  return any(k in t for k in ["mdsi", "mobile diabetes", "diabetes", "metabolic", "a1c", "metis"])
270
 
271
  def build_dynamic_clarifications(scenario_text: str, artifacts: List[Dict[str, Any]]) -> str:
272
- """
273
- Build up to 5 grouped clarification questions based on what's MISSING.
274
- Groups: Prioritization, Capacity, Cost, Clinical, Recommendations.
275
- Only ask for domains not covered by uploads/scenario text.
276
- """
277
  flags_from_files = _extract_present_domains(artifacts)
278
  flags_from_text = _domain_from_text(scenario_text)
279
  missing = {
@@ -335,26 +329,18 @@ def build_dynamic_clarifications(scenario_text: str, artifacts: List[Dict[str, A
335
 
336
  # ---------- Core chat logic (auto scenario, dynamic Phase 1) ----------
337
  def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answers=False):
338
- """
339
- awaiting_answers:
340
- - False: If scenario triggered -> Phase 1 (dynamic questions). Else normal chat.
341
- - True: If scenario triggered -> Phase 2 (structured analysis). Else normal chat.
342
- """
343
  try:
344
  log_event("user_message", None, {"sizes": {"chars": len(user_msg or "")}})
345
 
346
- # Safety (input)
347
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
348
  if blocked_in:
349
  ans = refusal_reply(reason_in)
350
  return history + [(user_msg, ans)], awaiting_answers
351
 
352
- # Identity short-circuit
353
  if is_identity_query(safe_in, history):
354
  ans = "I am ClarityOps, your strategic decision making AI partner."
355
  return history + [(user_msg, ans)], awaiting_answers
356
 
357
- # Ingest uploads FIRST (files alone can trigger scenario mode)
358
  artifacts = []
359
  if uploaded_files_paths:
360
  ing = extract_text_from_files(uploaded_files_paths)
@@ -366,17 +352,14 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answe
366
  _session_rag.register_artifacts(artifacts)
367
  log_event("uploads_added", None, {"chunks": len(chunks), "artifacts": len(artifacts)})
368
 
369
- # CSV columns helper (works in both modes)
370
  if re.search(r"\b(columns?|headers?)\b", (safe_in or "").lower()):
371
  cols = _session_rag.get_latest_csv_columns()
372
  if cols:
373
  return history + [(user_msg, "Here are the column names from your most recent CSV upload:\n\n- " + "\n- ".join(cols))], awaiting_answers
374
 
375
- # Decide mode
376
  scenario_mode = is_scenario_triggered(safe_in, uploaded_files_paths)
377
 
378
  if not scenario_mode:
379
- # ---------- Normal conversational chat ----------
380
  out = cohere_chat(safe_in, history) if USE_HOSTED_COHERE else None
381
  if not out:
382
  model, tokenizer = load_local_model()
@@ -399,9 +382,7 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answe
399
  })
400
  return history + [(user_msg, safe_out)], awaiting_answers
401
 
402
- # ---------- Scenario Mode ----------
403
  if not awaiting_answers:
404
- # PHASE 1: generate dynamic questions here (no assumptions)
405
  phase1 = build_dynamic_clarifications(scenario_text=safe_in, artifacts=artifacts or _session_rag.artifacts)
406
  phase1 = _sanitize_text(phase1)
407
  log_event("assistant_reply", None, {
@@ -412,7 +393,6 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answe
412
  })
413
  return history + [(user_msg, phase1)], True
414
 
415
- # PHASE 2: build rich system preamble + feed to LLM
416
  session_snips = "\n---\n".join(_session_rag.retrieve(
417
  "diabetes screening Indigenous Métis mobile program cost throughput outcomes logistics",
418
  k=6
@@ -427,7 +407,6 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answe
427
  user_lower = (safe_in or "").lower()
428
  mdsi_extra = _mdsi_block() if ("diabetes" in user_lower or "mdsi" in user_lower or "mobile screening" in user_lower) else ""
429
 
430
- # Summarize artifacts for the model (concise, structured)
431
  arts = _session_rag.artifacts or []
432
  if arts:
433
  arts_summ = []
@@ -494,7 +473,7 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answe
494
  # ---------- Theme & CSS ----------
495
  theme = gr.themes.Soft(primary_hue="teal", neutral_hue="slate", radius_size=gr.themes.sizes.radius_lg)
496
  custom_css = """
497
- :root { --brand-bg: #e6f7f8; --brand-accent: #0d9488; --brand-text: #0f172a; --brand-text-light: #ffffff; }
498
  html, body, .gradio-container { height: 100vh; }
499
  .gradio-container { background: var(--brand-bg); display: flex; flex-direction: column; }
500
 
@@ -504,7 +483,6 @@ html, body, .gradio-container { height: 100vh; }
504
  #hero h2 { color: #0f172a; font-weight: 800; font-size: 32px; margin-bottom: 22px; }
505
  #hero .search-row { width: min(860px, 92vw); margin: 0 auto; display: flex; gap: 8px; align-items: stretch; }
506
  #hero .search-row .hero-box { flex: 1 1 auto; }
507
- /* Force equal heights between the single-line textbox and the submit button */
508
  #hero .search-row .hero-box textarea { height: 52px !important; }
509
  #hero-send > button { height: 52px !important; padding: 0 18px !important; border-radius: 12px !important; }
510
  #hero .hint { color: #334155; margin-top: 10px; font-size: 13px; opacity: 0.9; }
@@ -560,7 +538,7 @@ with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo:
560
  # ---- State
561
  state_history = gr.State(value=[])
562
  state_uploaded = gr.State(value=[])
563
- state_awaiting = gr.State(value=False) # False -> Phase 1 next; True -> Phase 2 next (awaiting answers)
564
 
565
  # ---- Uploads
566
  def _store_uploads(files, current):
@@ -592,9 +570,9 @@ with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo:
592
  chat_o, msg_o, hist_o, await_o = _on_send(user_msg, history, up_paths, awaiting)
593
  return (
594
  chat_o, msg_o, hist_o, await_o,
595
- gr.update(visible=False), # hide hero
596
- gr.update(visible=True), # show app
597
- "" # clear hero box
598
  )
599
 
600
  hero_send.click(
@@ -619,12 +597,11 @@ with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo:
619
  concurrency_limit=2, queue=True)
620
 
621
  def _on_clear():
622
- # Reset to fresh hero screen
623
  return (
624
  [], "", [], False,
625
- gr.update(visible=True), # show hero
626
- gr.update(visible=False), # hide app
627
- "" # clear hero input
628
  )
629
 
630
  clear.click(_on_clear, None, [chat, msg, state_history, state_awaiting, hero_wrap, app_wrap, hero_msg])
@@ -633,3 +610,4 @@ if __name__ == "__main__":
633
  port = int(os.environ.get("PORT", "7860"))
634
  demo.launch(server_name="0.0.0.0", server_port=port, show_api=False, max_threads=8)
635
 
 
 
128
  return None
129
  try:
130
  client = cohere.Client(api_key=COHERE_API_KEY)
 
131
  parts = []
132
  for u, a in _iter_user_assistant(history):
133
  if u: parts.append(f"User: {u}")
 
268
  return any(k in t for k in ["mdsi", "mobile diabetes", "diabetes", "metabolic", "a1c", "metis"])
269
 
270
  def build_dynamic_clarifications(scenario_text: str, artifacts: List[Dict[str, Any]]) -> str:
 
 
 
 
 
271
  flags_from_files = _extract_present_domains(artifacts)
272
  flags_from_text = _domain_from_text(scenario_text)
273
  missing = {
 
329
 
330
  # ---------- Core chat logic (auto scenario, dynamic Phase 1) ----------
331
  def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answers=False):
 
 
 
 
 
332
  try:
333
  log_event("user_message", None, {"sizes": {"chars": len(user_msg or "")}})
334
 
 
335
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
336
  if blocked_in:
337
  ans = refusal_reply(reason_in)
338
  return history + [(user_msg, ans)], awaiting_answers
339
 
 
340
  if is_identity_query(safe_in, history):
341
  ans = "I am ClarityOps, your strategic decision making AI partner."
342
  return history + [(user_msg, ans)], awaiting_answers
343
 
 
344
  artifacts = []
345
  if uploaded_files_paths:
346
  ing = extract_text_from_files(uploaded_files_paths)
 
352
  _session_rag.register_artifacts(artifacts)
353
  log_event("uploads_added", None, {"chunks": len(chunks), "artifacts": len(artifacts)})
354
 
 
355
  if re.search(r"\b(columns?|headers?)\b", (safe_in or "").lower()):
356
  cols = _session_rag.get_latest_csv_columns()
357
  if cols:
358
  return history + [(user_msg, "Here are the column names from your most recent CSV upload:\n\n- " + "\n- ".join(cols))], awaiting_answers
359
 
 
360
  scenario_mode = is_scenario_triggered(safe_in, uploaded_files_paths)
361
 
362
  if not scenario_mode:
 
363
  out = cohere_chat(safe_in, history) if USE_HOSTED_COHERE else None
364
  if not out:
365
  model, tokenizer = load_local_model()
 
382
  })
383
  return history + [(user_msg, safe_out)], awaiting_answers
384
 
 
385
  if not awaiting_answers:
 
386
  phase1 = build_dynamic_clarifications(scenario_text=safe_in, artifacts=artifacts or _session_rag.artifacts)
387
  phase1 = _sanitize_text(phase1)
388
  log_event("assistant_reply", None, {
 
393
  })
394
  return history + [(user_msg, phase1)], True
395
 
 
396
  session_snips = "\n---\n".join(_session_rag.retrieve(
397
  "diabetes screening Indigenous Métis mobile program cost throughput outcomes logistics",
398
  k=6
 
407
  user_lower = (safe_in or "").lower()
408
  mdsi_extra = _mdsi_block() if ("diabetes" in user_lower or "mdsi" in user_lower or "mobile screening" in user_lower) else ""
409
 
 
410
  arts = _session_rag.artifacts or []
411
  if arts:
412
  arts_summ = []
 
473
  # ---------- Theme & CSS ----------
474
  theme = gr.themes.Soft(primary_hue="teal", neutral_hue="slate", radius_size=gr.themes.sizes.radius_lg)
475
  custom_css = """
476
+ :root { --brand-bg: #0f172a; --brand-accent: #0d9488; --brand-text: #0f172a; --brand-text-light: #ffffff; } /* CHANGED bg only */
477
  html, body, .gradio-container { height: 100vh; }
478
  .gradio-container { background: var(--brand-bg); display: flex; flex-direction: column; }
479
 
 
483
  #hero h2 { color: #0f172a; font-weight: 800; font-size: 32px; margin-bottom: 22px; }
484
  #hero .search-row { width: min(860px, 92vw); margin: 0 auto; display: flex; gap: 8px; align-items: stretch; }
485
  #hero .search-row .hero-box { flex: 1 1 auto; }
 
486
  #hero .search-row .hero-box textarea { height: 52px !important; }
487
  #hero-send > button { height: 52px !important; padding: 0 18px !important; border-radius: 12px !important; }
488
  #hero .hint { color: #334155; margin-top: 10px; font-size: 13px; opacity: 0.9; }
 
538
  # ---- State
539
  state_history = gr.State(value=[])
540
  state_uploaded = gr.State(value=[])
541
+ state_awaiting = gr.State(value=False)
542
 
543
  # ---- Uploads
544
  def _store_uploads(files, current):
 
570
  chat_o, msg_o, hist_o, await_o = _on_send(user_msg, history, up_paths, awaiting)
571
  return (
572
  chat_o, msg_o, hist_o, await_o,
573
+ gr.update(visible=False),
574
+ gr.update(visible=True),
575
+ ""
576
  )
577
 
578
  hero_send.click(
 
597
  concurrency_limit=2, queue=True)
598
 
599
  def _on_clear():
 
600
  return (
601
  [], "", [], False,
602
+ gr.update(visible=True),
603
+ gr.update(visible=False),
604
+ ""
605
  )
606
 
607
  clear.click(_on_clear, None, [chat, msg, state_history, state_awaiting, hero_wrap, app_wrap, hero_msg])
 
610
  port = int(os.environ.get("PORT", "7860"))
611
  demo.launch(server_name="0.0.0.0", server_port=port, show_api=False, max_threads=8)
612
 
613
+