Rajan Sharma commited on
Commit
35f7e6e
·
verified ·
1 Parent(s): 9dffee0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -54
app.py CHANGED
@@ -67,12 +67,10 @@ MAX_NEW_TOKENS = int(os.getenv("MAX_NEW_TOKENS", "2048"))
67
  SYSTEM_MASTER = """
68
  SYSTEM ROLE
69
  You are ClarityOps, a medical analytics system that interacts only via this chat.
70
-
71
  Absolute rules:
72
  - Use ONLY information provided in this conversation (scenario text + uploaded files + user answers).
73
  - Never invent data. If something required is missing after clarifications, write the literal token: INSUFFICIENT_DATA.
74
  - Produce clear calculations (show multipliers and totals), follow medical units, and keep privacy safeguards (aggregate; suppress cohorts <10).
75
-
76
  Formatting hard rules for Phase 2:
77
  - Start with the header: “Structured Analysis”
78
  - Follow this section order:
@@ -235,10 +233,6 @@ def _mdsi_block():
235
 
236
  # ---------- Dynamic Phase 1 question generator ----------
237
  def _extract_present_domains(artifacts: List[Dict[str, Any]]) -> Dict[str, bool]:
238
- """
239
- Inspect artifact names/columns to see which domains are present.
240
- Returns flags for: population, cost, clinical, capacity/logistics.
241
- """
242
  flags = dict(population=False, cost=False, clinical=False, capacity=False)
243
  for a in artifacts or []:
244
  name = (a.get("name") or "").lower()
@@ -290,54 +284,46 @@ def build_dynamic_clarifications(scenario_text: str, artifacts: List[Dict[str, A
290
  qs: List[Tuple[str, str]] = []
291
  is_mdsi = _is_mdsi_diabetes(scenario_text)
292
 
293
- # Prioritization
294
  if missing["population"]:
295
- if is_mdsi:
296
- qs.append(("Prioritization",
297
- "Confirm prioritization inputs: settlement membership living on-settlement (latest), obesity/metabolic syndrome prevalence, and any access-to-care constraints to weigh."))
298
- else:
299
- qs.append(("Prioritization",
300
- "Which population/risk indicators should drive prioritization (size, prevalence, access, equity factors)?"))
301
 
302
- # Capacity
303
  if missing["capacity"]:
304
- if is_mdsi:
305
- qs.append(("Capacity",
306
- "What is the realistic per-team screening rate (clients/day) and operating schedule (days/week, weeks/3-month window)?"))
307
- else:
308
- qs.append(("Capacity",
309
- "What per-team throughput and operating schedule should be used for capacity calculations?"))
310
 
311
- # Cost
312
  if missing["cost"]:
313
- if is_mdsi:
314
- qs.append(("Cost",
315
- "Provide startup cost per client and ongoing cost per client/visit (or total program costs) to price scenarios like 1,200 screens."))
316
- else:
317
- qs.append(("Cost",
318
- "Provide fixed setup costs and variable cost per client to model total program spend."))
319
 
320
- # Clinical
321
  if missing["clinical"]:
322
- if is_mdsi:
323
- qs.append(("Clinical",
324
- "What longitudinal deltas should we expect (e.g., ΔA1c, ΔBP, ΔBMI, lipids) from repeat screenings, and over what interval?"))
325
- else:
326
- qs.append(("Clinical",
327
- "Which clinical indicators and expected effect sizes should be tracked for outcomes?"))
328
-
329
- # Recommendations – always ask one targeted planning question last
330
- if is_mdsi:
331
- qs.append(("Recommendations",
332
- "Are there community constraints (events/seasonality/cultural protocols) that should shape routing and visit cadence?"))
333
- else:
334
- qs.append(("Recommendations",
335
- "Any operational constraints (scheduling, staffing, partnerships) we should incorporate into deployment modeling?"))
336
-
337
- # Cap at 5 groups
338
- qs = qs[:5]
339
 
340
- # Assemble markdown
341
  out = ["**Clarification Questions**"]
342
  current_group = None
343
  for grp, q in qs:
@@ -393,7 +379,6 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answe
393
  # ---------- Normal conversational chat ----------
394
  out = cohere_chat(safe_in, history) if USE_HOSTED_COHERE else None
395
  if not out:
396
- # Small system nudge for normal chat
397
  model, tokenizer = load_local_model()
398
  tiny = [{"role": "system", "content": "You are a helpful assistant."}]
399
  for u, a in _iter_user_assistant(history):
@@ -455,7 +440,6 @@ def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answe
455
  else:
456
  artifact_block = "Uploaded Data Files (summarized):\n- <none>"
457
 
458
- # Build system preamble
459
  scenario_block = safe_in if len((safe_in or "")) > 0 else ""
460
  system_preamble = build_system_preamble(
461
  snapshot=snapshot,
@@ -518,8 +502,11 @@ html, body, .gradio-container { height: 100vh; }
518
  #hero-wrap { height: 70vh; display: grid; place-items: center; }
519
  #hero { text-align: center; }
520
  #hero h2 { color: #0f172a; font-weight: 800; font-size: 32px; margin-bottom: 22px; }
521
- #hero .search-row { width: min(860px, 92vw); margin: 0 auto; display: flex; gap: 8px; }
522
  #hero .search-row .hero-box { flex: 1 1 auto; }
 
 
 
523
  #hero .hint { color: #334155; margin-top: 10px; font-size: 13px; opacity: 0.9; }
524
 
525
  /* CHAT */
@@ -527,6 +514,11 @@ html, body, .gradio-container { height: 100vh; }
527
  .chatbot header, .chatbot .label, .chatbot .label-wrap { display: none !important; }
528
  .message.user, .message.bot { background: var(--brand-accent) !important; color: var(--brand-text-light) !important; border-radius: 12px !important; padding: 8px 12px !important; }
529
  textarea, input, .gr-input { border-radius: 12px !important; }
 
 
 
 
 
530
  """
531
 
532
  # ---------- UI ----------
@@ -542,7 +534,7 @@ with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo:
542
  lines=1,
543
  elem_classes="hero-box"
544
  )
545
- hero_send = gr.Button("➤", scale=0)
546
  gr.Markdown('<div class="hint">Scenario Mode triggers when you type the word <b>scenario</b> or upload files. Phase&nbsp;1 asks dynamic clarifications; Phase&nbsp;2 returns a structured analysis.</div>')
547
 
548
  # --- MAIN APP (hidden until first message) ---
@@ -553,15 +545,17 @@ with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo:
553
  label="Upload docs/images (PDF, DOCX, CSV, PNG, JPG)",
554
  file_types=["file"], file_count="multiple", height=68
555
  )
556
- with gr.Row():
557
  msg = gr.Textbox(
558
  label="",
559
  show_label=False,
560
  placeholder="Continue here. Paste scenario details (include the word 'scenario' to trigger), add files above.",
561
- scale=10
 
 
562
  )
563
- send = gr.Button("Send", scale=1)
564
- clear = gr.Button("Clear chat", scale=1)
565
 
566
  # ---- State
567
  state_history = gr.State(value=[])
@@ -638,3 +632,4 @@ with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo:
638
  if __name__ == "__main__":
639
  port = int(os.environ.get("PORT", "7860"))
640
  demo.launch(server_name="0.0.0.0", server_port=port, show_api=False, max_threads=8)
 
 
67
  SYSTEM_MASTER = """
68
  SYSTEM ROLE
69
  You are ClarityOps, a medical analytics system that interacts only via this chat.
 
70
  Absolute rules:
71
  - Use ONLY information provided in this conversation (scenario text + uploaded files + user answers).
72
  - Never invent data. If something required is missing after clarifications, write the literal token: INSUFFICIENT_DATA.
73
  - Produce clear calculations (show multipliers and totals), follow medical units, and keep privacy safeguards (aggregate; suppress cohorts <10).
 
74
  Formatting hard rules for Phase 2:
75
  - Start with the header: “Structured Analysis”
76
  - Follow this section order:
 
233
 
234
  # ---------- Dynamic Phase 1 question generator ----------
235
  def _extract_present_domains(artifacts: List[Dict[str, Any]]) -> Dict[str, bool]:
 
 
 
 
236
  flags = dict(population=False, cost=False, clinical=False, capacity=False)
237
  for a in artifacts or []:
238
  name = (a.get("name") or "").lower()
 
284
  qs: List[Tuple[str, str]] = []
285
  is_mdsi = _is_mdsi_diabetes(scenario_text)
286
 
 
287
  if missing["population"]:
288
+ qs.append((
289
+ "Prioritization",
290
+ "Which population/risk indicators should drive prioritization (size, prevalence, access, equity factors)?"
291
+ if not is_mdsi else
292
+ "Confirm prioritization inputs: settlement membership living on-settlement (latest), obesity/metabolic syndrome prevalence, and any access-to-care constraints to weigh."
293
+ ))
294
 
 
295
  if missing["capacity"]:
296
+ qs.append((
297
+ "Capacity",
298
+ "What per-team throughput and operating schedule should be used for capacity calculations?"
299
+ if not is_mdsi else
300
+ "What is the realistic per-team screening rate (clients/day) and operating schedule (days/week, weeks/3-month window)?"
301
+ ))
302
 
 
303
  if missing["cost"]:
304
+ qs.append((
305
+ "Cost",
306
+ "Provide fixed setup costs and variable cost per client to model total program spend."
307
+ if not is_mdsi else
308
+ "Provide startup cost per client and ongoing cost per client/visit (or total program costs) to price scenarios like 1,200 screens."
309
+ ))
310
 
 
311
  if missing["clinical"]:
312
+ qs.append((
313
+ "Clinical",
314
+ "Which clinical indicators and expected effect sizes should be tracked for outcomes?"
315
+ if not is_mdsi else
316
+ "What longitudinal deltas should we expect (e.g., ΔA1c, ΔBP, ΔBMI, lipids) from repeat screenings, and over what interval?"
317
+ ))
318
+
319
+ qs.append((
320
+ "Recommendations",
321
+ "Any operational constraints (scheduling, staffing, partnerships) we should incorporate into deployment modeling?"
322
+ if not is_mdsi else
323
+ "Are there community constraints (events/seasonality/cultural protocols) that should shape routing and visit cadence?"
324
+ ))
 
 
 
 
325
 
326
+ qs = qs[:5]
327
  out = ["**Clarification Questions**"]
328
  current_group = None
329
  for grp, q in qs:
 
379
  # ---------- Normal conversational chat ----------
380
  out = cohere_chat(safe_in, history) if USE_HOSTED_COHERE else None
381
  if not out:
 
382
  model, tokenizer = load_local_model()
383
  tiny = [{"role": "system", "content": "You are a helpful assistant."}]
384
  for u, a in _iter_user_assistant(history):
 
440
  else:
441
  artifact_block = "Uploaded Data Files (summarized):\n- <none>"
442
 
 
443
  scenario_block = safe_in if len((safe_in or "")) > 0 else ""
444
  system_preamble = build_system_preamble(
445
  snapshot=snapshot,
 
502
  #hero-wrap { height: 70vh; display: grid; place-items: center; }
503
  #hero { text-align: center; }
504
  #hero h2 { color: #0f172a; font-weight: 800; font-size: 32px; margin-bottom: 22px; }
505
+ #hero .search-row { width: min(860px, 92vw); margin: 0 auto; display: flex; gap: 8px; align-items: stretch; }
506
  #hero .search-row .hero-box { flex: 1 1 auto; }
507
+ /* Force equal heights between the single-line textbox and the submit button */
508
+ #hero .search-row .hero-box textarea { height: 52px !important; }
509
+ #hero-send > button { height: 52px !important; padding: 0 18px !important; border-radius: 12px !important; }
510
  #hero .hint { color: #334155; margin-top: 10px; font-size: 13px; opacity: 0.9; }
511
 
512
  /* CHAT */
 
514
  .chatbot header, .chatbot .label, .chatbot .label-wrap { display: none !important; }
515
  .message.user, .message.bot { background: var(--brand-accent) !important; color: var(--brand-text-light) !important; border-radius: 12px !important; padding: 8px 12px !important; }
516
  textarea, input, .gr-input { border-radius: 12px !important; }
517
+
518
+ /* Chat input row equal heights */
519
+ #chat-input-row { align-items: stretch; }
520
+ #chat-msg textarea { height: 52px !important; }
521
+ #chat-send > button, #chat-clear > button { height: 52px !important; padding: 0 18px !important; border-radius: 12px !important; }
522
  """
523
 
524
  # ---------- UI ----------
 
534
  lines=1,
535
  elem_classes="hero-box"
536
  )
537
+ hero_send = gr.Button("➤", scale=0, elem_id="hero-send")
538
  gr.Markdown('<div class="hint">Scenario Mode triggers when you type the word <b>scenario</b> or upload files. Phase&nbsp;1 asks dynamic clarifications; Phase&nbsp;2 returns a structured analysis.</div>')
539
 
540
  # --- MAIN APP (hidden until first message) ---
 
545
  label="Upload docs/images (PDF, DOCX, CSV, PNG, JPG)",
546
  file_types=["file"], file_count="multiple", height=68
547
  )
548
+ with gr.Row(elem_id="chat-input-row"):
549
  msg = gr.Textbox(
550
  label="",
551
  show_label=False,
552
  placeholder="Continue here. Paste scenario details (include the word 'scenario' to trigger), add files above.",
553
+ scale=10,
554
+ elem_id="chat-msg",
555
+ lines=1,
556
  )
557
+ send = gr.Button("Send", scale=1, elem_id="chat-send")
558
+ clear = gr.Button("Clear chat", scale=1, elem_id="chat-clear")
559
 
560
  # ---- State
561
  state_history = gr.State(value=[])
 
632
  if __name__ == "__main__":
633
  port = int(os.environ.get("PORT", "7860"))
634
  demo.launch(server_name="0.0.0.0", server_port=port, show_api=False, max_threads=8)
635
+