Rajan Sharma commited on
Commit
99d5da9
·
verified ·
1 Parent(s): 3d2ccd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -99
app.py CHANGED
@@ -1,15 +1,8 @@
1
-
2
- # app_phi.py
3
- #
4
- # HIPAA-aware wrapper of the existing app. This keeps the analysis and assessment
5
- # capabilities intact while adding PHI safeguards:
6
- # - PHI_MODE flags with opt-in persistence
7
- # - Redaction before sending content to any external LLM
8
- # - Safer logging (no raw PHI)
9
- # - Optional banner and history controls
10
  #
11
- # NOTE: This file is designed to be a drop-in alternative to app.py.
12
- # It preserves existing analysis logic and UI while adding HIPAA toggles.
 
13
 
14
  from __future__ import annotations
15
 
@@ -46,6 +39,8 @@ from privacy import safety_filter, refusal_reply
46
  from llm_router import cohere_chat, _co_client, cohere_embed
47
 
48
 
 
 
49
  def load_markdown_text(filepath: str) -> str:
50
  try:
51
  with open(filepath, "r", encoding="utf-8") as f:
@@ -57,21 +52,18 @@ def load_markdown_text(filepath: str) -> str:
57
  def _sanitize_text(s: str) -> str:
58
  if not isinstance(s, str):
59
  return s
60
- # Remove control characters (except newline and tab)
61
  return re2.sub(r"[\p{C}--[\n\t]]+", "", s)
62
 
63
 
64
- # ---------------------- HIPAA helpers ----------------------
65
-
66
- # Very conservative redaction (risk reduction; not a full de-identification program).
67
  PHI_PATTERNS = [
68
- (re.compile(r"\b\d{3}-\d{2}-\d{4}\b"), "[REDACTED_SSN]"), # US SSN
69
- (re.compile(r"\b\d{9}\b"), "[REDACTED_MRN]"), # 9-digit MRN (example)
70
  (re.compile(r"\b\d{3}[-.\s]?\d{3}[-.\s]?\d{4}\b"), "[REDACTED_PHONE]"),
71
  (re.compile(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}"), "[REDACTED_EMAIL]"),
72
- (re.compile(r"\b(19|20)\d{2}-\d{2}-\d{2}\b"), "[REDACTED_DOB]"), # YYYY-MM-DD
73
- (re.compile(r"\b\d{2}/\d{2}/(19|20)\d{2}\b"), "[REDACTED_DOB]"), # MM/DD/YYYY
74
- (re.compile(r"\b\d{5}(-\d{4})?\b"), "[REDACTED_ZIP]"), # ZIP (US)
75
  ]
76
 
77
  def redact_phi(text: str) -> str:
@@ -83,18 +75,14 @@ def redact_phi(text: str) -> str:
83
  return t
84
 
85
  def safe_log(event_name: str, meta: dict | None = None):
86
- # Avoid logging raw PHI or payloads
87
  try:
88
  meta = (meta or {}).copy()
89
  meta.pop("raw", None)
90
  log_event(event_name, None, meta)
91
  except Exception:
92
- # Never raise from logging in PHI context
93
  pass
94
 
95
 
96
- # ---------------------- Original analysis helpers (unchanged) ----------------------
97
-
98
  def _create_python_script(user_scenario: str, schema_context: str) -> str:
99
  EXPERT_ANALYTICAL_GUIDELINES = """
100
  --- EXPERT ANALYTICAL GUIDELINES ---
@@ -195,13 +183,11 @@ def ping_cohere() -> str:
195
 
196
  def handle(user_msg: str, files: list, yield_update) -> str:
197
  try:
198
- # Run app safety filter
199
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
200
  if blocked_in:
201
  return refusal_reply(reason_in)
202
 
203
- # If PHI mode is enabled and we are not permitted to send PHI externally,
204
- # redact the content before any LLM calls.
205
  redacted_in = safe_in
206
  if PHI_MODE and REDACT_BEFORE_LLM:
207
  redacted_in = redact_phi(safe_in)
@@ -209,8 +195,6 @@ def handle(user_msg: str, files: list, yield_update) -> str:
209
  file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]
210
 
211
  if file_paths:
212
- # CSV analysis path preserved; we only use redacted_in in PROMPTS sent to the LLM.
213
- # CSV data itself is processed locally as before.
214
  dataframes, schema_parts = [], []
215
  for i, p in enumerate(file_paths):
216
  if p.endswith(".csv"):
@@ -227,8 +211,6 @@ def handle(user_msg: str, files: list, yield_update) -> str:
227
  return "Please upload at least one CSV file."
228
 
229
  schema_context = "\n".join(schema_parts)
230
-
231
- # If PHI is not allowed externally and PHI_MODE is on, we will use the redacted prompt.
232
  prompt_for_code = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
233
 
234
  yield_update("""```
@@ -255,12 +237,10 @@ def handle(user_msg: str, files: list, yield_update) -> str:
255
  yield_update("""```
256
  ✍️ Synthesizing final comprehensive report...
257
  ```""")
258
- # For the final narrative, also route based on PHI policy
259
  writer_input = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
260
  final_report = _generate_final_report(writer_input, raw_data_output)
261
  return _sanitize_text(final_report)
262
  else:
263
- # Pure chat path: redact if PHI_MODE and external is not allowed
264
  chat_input = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
265
  prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {chat_input}\nAssistant:"
266
  return _sanitize_text(cohere_chat(prompt) or "How can I help further?")
@@ -268,43 +248,119 @@ def handle(user_msg: str, files: list, yield_update) -> str:
268
  except Exception as e:
269
  tb = traceback.format_exc()
270
  safe_log("app_error", {"err": str(e)})
271
- # Do not leak stack traces to UI in PHI mode
272
  return "A critical error occurred. Please contact your administrator." if PHI_MODE else f"A critical error occurred: {e}"
273
 
274
 
275
  PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
276
  TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")
277
 
278
- with gr.Blocks(theme="soft", css="style.css") as demo:
279
- # Persistent history state (in-memory). PHI mode defaults to no persistence.
280
- assessment_history = gr.State([])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
 
282
- # Optional PHI banner
283
- if PHI_MODE:
284
- gr.Markdown(
285
- "⚠️ **PHI Mode Enabled**: Protected Health Information safeguards are active. "
286
- "History persistence is disabled by default. Avoid unnecessary identifiers."
287
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
- # Modals
290
- with gr.Group(visible=False) as privacy_modal:
291
- with gr.Blocks():
292
- gr.Markdown(PRIVACY_POLICY_TEXT)
293
- close_privacy_btn = gr.Button("Close")
294
-
295
- with gr.Group(visible=False) as terms_modal:
296
- with gr.Blocks():
297
- gr.Markdown(TERMS_OF_SERVICE_TEXT)
298
- close_terms_btn = gr.Button("Close")
299
-
300
- # UI
301
- gr.Markdown("# Universal AI Data Analyst")
302
- with gr.Row(variant="panel"):
303
- with gr.Column(scale=1):
304
- gr.Markdown("## New Assessment")
305
- gr.Markdown(
306
- "<p style='font-size:0.9rem; color: #6C757D;'>Upload CSVs for data analysis, or just enter a prompt to chat.</p>"
307
- )
308
  files_input = gr.Files(
309
  label="Upload Data Files (.csv)",
310
  file_count="multiple",
@@ -312,34 +368,54 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
312
  file_types=[".csv"],
313
  )
314
  prompt_input = gr.Textbox(
315
- label="Prompt", placeholder="Paste your scenario or question here.", lines=15
 
 
 
 
316
  )
317
- with gr.Row():
318
- send_btn = gr.Button("▶️ Send / Run Analysis", variant="primary", scale=2)
319
- clear_btn = gr.Button("🗑️ Clear")
320
- ping_btn = gr.Button("Ping Cohere")
 
 
 
 
321
  ping_out = gr.Markdown()
322
- with gr.Column(scale=2):
323
- with gr.Tabs():
324
- with gr.TabItem("Current Assessment", id=0):
325
- chat_history_output = gr.Chatbot(
326
- label="Analysis Output", type="messages", height=600
327
- )
328
- with gr.TabItem("Assessment History", id=1):
329
- gr.Markdown("## Review Past Assessments")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  history_dropdown = gr.Dropdown(
331
  label="Select an assessment to review", choices=[]
332
  )
333
  history_display = gr.Markdown(label="Selected Assessment Details")
334
 
335
- with gr.Row():
336
- gr.Markdown("---")
337
-
338
- with gr.Row():
339
- privacy_link = gr.Button("Privacy Policy", variant="link")
340
- terms_link = gr.Button("Terms of Service", variant="link")
341
 
342
- # Logic
343
 
344
  def run_analysis_wrapper(prompt, files, chat_history_list, history_state_list):
345
  if not prompt:
@@ -347,14 +423,11 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
347
  yield chat_history_list, history_state_list, gr.update()
348
  return
349
 
350
- # Append user message
351
  chat_with_user_msg = _append_msg(chat_history_list, "user", prompt)
352
 
353
- # Placeholder for streamed updates (unused)
354
  def dummy_update(message: str):
355
  pass
356
 
357
- # Thinking message
358
  thinking_message = _append_msg(
359
  chat_with_user_msg,
360
  "assistant",
@@ -364,21 +437,17 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
364
  )
365
  yield thinking_message, history_state_list, gr.update()
366
 
367
- # Run analysis/chat
368
  ai_response_text = handle(prompt, files, dummy_update)
369
 
370
- # Append assistant response
371
  final_chat = _append_msg(chat_with_user_msg, "assistant", ai_response_text)
372
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
373
 
374
- # Filenames
375
  file_names: List[str] = []
376
  if files:
377
  file_names = [
378
  os.path.basename(f.name if hasattr(f, "name") else f) for f in files
379
  ]
380
 
381
- # Construct history entry
382
  new_entry = {
383
  "id": timestamp,
384
  "prompt": prompt,
@@ -387,7 +456,7 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
387
  "chat_history": final_chat,
388
  }
389
 
390
- # Persist only if allowed
391
  if PERSIST_HISTORY and (not PHI_MODE or (PHI_MODE and HISTORY_TTL_DAYS > 0)):
392
  updated_history: List[Dict[str, Any]] = (history_state_list or []) + [new_entry]
393
  else:
@@ -439,24 +508,30 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
439
  {chat_md}
440
  """
441
 
442
- # Wire up UI
443
  send_btn.click(
444
  run_analysis_wrapper,
445
- inputs=[prompt_input, files_input, chat_history_output, assessment_history],
446
- outputs=[chat_history_output, assessment_history, history_dropdown],
447
  )
 
 
 
448
  history_dropdown.change(
449
- view_history, inputs=[history_dropdown, assessment_history], outputs=[history_display]
450
  )
 
451
  clear_btn.click(
452
- lambda: (None, None, []), # clear prompt, files, and chat
453
  outputs=[prompt_input, files_input, chat_history_output],
454
  )
 
455
  ping_btn.click(ping_cohere, outputs=[ping_out])
456
- privacy_link.click(lambda: gr.update(visible=True), outputs=[privacy_modal])
457
- close_privacy_btn.click(lambda: gr.update(visible=False), outputs=[privacy_modal])
458
- terms_link.click(lambda: gr.update(visible=True), outputs=[terms_modal])
459
- close_terms_btn.click(lambda: gr.update(visible=False), outputs=[terms_modal])
 
460
 
461
  if __name__ == "__main__":
462
  if not os.getenv("COHERE_API_KEY"):
 
1
+ # app_phi_sleek.py
 
 
 
 
 
 
 
 
2
  #
3
+ # Sleek UI + Voice-to-Text for the HIPAA-aware app.
4
+ # IMPORTANT: All analysis/assessment logic is preserved exactly.
5
+ # Changes are limited to the UI layout, CSS, and a client-side STT helper.
6
 
7
  from __future__ import annotations
8
 
 
39
  from llm_router import cohere_chat, _co_client, cohere_embed
40
 
41
 
42
+ # ---------------------- helpers (unchanged logic) ----------------------
43
+
44
  def load_markdown_text(filepath: str) -> str:
45
  try:
46
  with open(filepath, "r", encoding="utf-8") as f:
 
52
  def _sanitize_text(s: str) -> str:
53
  if not isinstance(s, str):
54
  return s
 
55
  return re2.sub(r"[\p{C}--[\n\t]]+", "", s)
56
 
57
 
58
+ # Very conservative PHI redaction (unchanged idea)
 
 
59
  PHI_PATTERNS = [
60
+ (re.compile(r"\b\d{3}-\d{2}-\d{4}\b"), "[REDACTED_SSN]"),
61
+ (re.compile(r"\b\d{9}\b"), "[REDACTED_MRN]"),
62
  (re.compile(r"\b\d{3}[-.\s]?\d{3}[-.\s]?\d{4}\b"), "[REDACTED_PHONE]"),
63
  (re.compile(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}"), "[REDACTED_EMAIL]"),
64
+ (re.compile(r"\b(19|20)\d{2}-\d{2}-\d{2}\b"), "[REDACTED_DOB]"),
65
+ (re.compile(r"\b\d{2}/\d{2}/(19|20)\d{2}\b"), "[REDACTED_DOB]"),
66
+ (re.compile(r"\b\d{5}(-\d{4})?\b"), "[REDACTED_ZIP]"),
67
  ]
68
 
69
  def redact_phi(text: str) -> str:
 
75
  return t
76
 
77
  def safe_log(event_name: str, meta: dict | None = None):
 
78
  try:
79
  meta = (meta or {}).copy()
80
  meta.pop("raw", None)
81
  log_event(event_name, None, meta)
82
  except Exception:
 
83
  pass
84
 
85
 
 
 
86
  def _create_python_script(user_scenario: str, schema_context: str) -> str:
87
  EXPERT_ANALYTICAL_GUIDELINES = """
88
  --- EXPERT ANALYTICAL GUIDELINES ---
 
183
 
184
  def handle(user_msg: str, files: list, yield_update) -> str:
185
  try:
 
186
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
187
  if blocked_in:
188
  return refusal_reply(reason_in)
189
 
190
+ # Redact prompt if PHI_MODE and external PHI not allowed
 
191
  redacted_in = safe_in
192
  if PHI_MODE and REDACT_BEFORE_LLM:
193
  redacted_in = redact_phi(safe_in)
 
195
  file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]
196
 
197
  if file_paths:
 
 
198
  dataframes, schema_parts = [], []
199
  for i, p in enumerate(file_paths):
200
  if p.endswith(".csv"):
 
211
  return "Please upload at least one CSV file."
212
 
213
  schema_context = "\n".join(schema_parts)
 
 
214
  prompt_for_code = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
215
 
216
  yield_update("""```
 
237
  yield_update("""```
238
  ✍️ Synthesizing final comprehensive report...
239
  ```""")
 
240
  writer_input = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
241
  final_report = _generate_final_report(writer_input, raw_data_output)
242
  return _sanitize_text(final_report)
243
  else:
 
244
  chat_input = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
245
  prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {chat_input}\nAssistant:"
246
  return _sanitize_text(cohere_chat(prompt) or "How can I help further?")
 
248
  except Exception as e:
249
  tb = traceback.format_exc()
250
  safe_log("app_error", {"err": str(e)})
 
251
  return "A critical error occurred. Please contact your administrator." if PHI_MODE else f"A critical error occurred: {e}"
252
 
253
 
254
  PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
255
  TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")
256
 
257
+ # ---------------------- Sleek UI Layer only ----------------------
258
+
259
+ SLEEK_CSS = """
260
+ /* Full-bleed, modern look */
261
+ :root, body, #root, .gradio-container { height: 100%; }
262
+ .gradio-container { padding: 0 !important; }
263
+ .block { padding: 0 !important; }
264
+
265
+ /* Header */
266
+ .header {
267
+ padding: 20px 28px;
268
+ background: linear-gradient(135deg, #0e1726, #1d2a44 60%, #243a5e);
269
+ color: #fff;
270
+ display: flex; align-items: center; justify-content: space-between;
271
+ gap: 16px;
272
+ }
273
+ .header h1 { margin: 0; font-size: 22px; letter-spacing: 0.3px; font-weight: 600; }
274
+ .header .badge { font-size: 12px; opacity: 0.9; background:#ffffff22; padding:6px 10px; border-radius: 999px; }
275
+
276
+ /* Main layout */
277
+ .main {
278
+ display: grid;
279
+ grid-template-columns: 420px 1fr;
280
+ gap: 16px;
281
+ padding: 16px;
282
+ height: calc(100vh - 72px);
283
+ box-sizing: border-box;
284
+ }
285
+ .left, .right {
286
+ background: #0b1020;
287
+ color: #e9edf3;
288
+ border-radius: 16px;
289
+ border: 1px solid #1c2642;
290
+ }
291
+ .left { padding: 16px; display: flex; flex-direction: column; gap: 12px; }
292
+ .right { padding: 0; display: flex; flex-direction: column; }
293
+
294
+ /* Panels */
295
+ .panel-title { font-size: 14px; font-weight: 600; color: #aeb8cc; margin-bottom: 6px; }
296
+ .helper { font-size: 12px; color: #97a3bb; margin-bottom: 8px; }
297
+
298
+ /* Sticky actions */
299
+ .actions {
300
+ display: flex; gap: 8px; align-items: center; justify-content: stretch;
301
+ }
302
+ .actions .gr-button { flex: 1; }
303
+
304
+ /* Tabs full height */
305
+ .right .tabs { height: 100%; display: flex; flex-direction: column; }
306
+ .right .tabitem { flex: 1; display: flex; flex-direction: column; }
307
+ #chatbot_container { flex: 1; }
308
+ #chatbot_container .gr-chatbot { height: 100%; }
309
+
310
+ /* Tiny separators */
311
+ .hr { height: 1px; background: #16203b; margin: 10px 0; }
312
+
313
+ /* Voice hint */
314
+ .voice-hint { font-size: 12px; color:#9fb0cc; margin-top: 4px; }
315
+ """
316
 
317
+ VOICE_STT_HTML = """
318
+ <script>
319
+ let __rs_rec = null;
320
+ function rs_toggle_stt(elemId){
321
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
322
+ if (!SpeechRecognition){
323
+ alert("This browser does not support Speech Recognition. Try Chrome or Edge.");
324
+ return;
325
+ }
326
+ if (__rs_rec){ __rs_rec.stop(); __rs_rec = null; return; }
327
+ __rs_rec = new SpeechRecognition();
328
+ __rs_rec.lang = "en-US";
329
+ __rs_rec.interimResults = true;
330
+ __rs_rec.continuous = true;
331
+
332
+ const box = document.querySelector(`#${elemId} textarea`);
333
+ if (!box){ alert("Prompt box not found."); return; }
334
+ let base = box.value || "";
335
+
336
+ __rs_rec.onresult = (ev) => {
337
+ let t = "";
338
+ for (let i = ev.resultIndex; i < ev.results.length; i++){
339
+ t += ev.results[i][0].transcript;
340
+ }
341
+ box.value = (base + " " + t).trim();
342
+ box.dispatchEvent(new Event("input", { bubbles: true }));
343
+ };
344
+ __rs_rec.onend = () => { __rs_rec = null; };
345
+ __rs_rec.start();
346
+ }
347
+ </script>
348
+ """
349
 
350
+ with gr.Blocks(theme=gr.themes.Soft(), css=SLEEK_CSS, fill_width=True) as demo:
351
+ # Header
352
+ with gr.Row(elem_classes=["header"]):
353
+ gr.Markdown("<h1>Universal AI Data Analyst</h1>")
354
+ pill = "PHI Mode ON · history off" if (PHI_MODE and not PERSIST_HISTORY) else \
355
+ "PHI Mode ON" if PHI_MODE else "PHI Mode OFF"
356
+ gr.Markdown(f"<span class='badge'>{pill}</span>")
357
+
358
+ # Main
359
+ with gr.Row(elem_classes=["main"]):
360
+ # Left panel
361
+ with gr.Column(elem_classes=["left"]):
362
+ gr.Markdown("<div class='panel-title'>New Assessment</div>")
363
+ gr.Markdown("<div class='helper'>Upload CSVs for analysis, or enter a prompt. Voice works in modern browsers.</div>")
 
 
 
 
 
364
  files_input = gr.Files(
365
  label="Upload Data Files (.csv)",
366
  file_count="multiple",
 
368
  file_types=[".csv"],
369
  )
370
  prompt_input = gr.Textbox(
371
+ label="Prompt",
372
+ placeholder="Paste your scenario or question here...",
373
+ lines=12,
374
+ elem_id="prompt_box",
375
+ autofocus=True,
376
  )
377
+
378
+ with gr.Row(elem_classes=["actions"]):
379
+ send_btn = gr.Button("▶️ Run Analysis", variant="primary")
380
+ clear_btn = gr.Button("🧹 Clear")
381
+ voice_btn = gr.Button("🎙️ Voice")
382
+
383
+ gr.Markdown("<div class='voice-hint'>Click Voice to start/stop dictation into the prompt box.</div>")
384
+ ping_btn = gr.Button("🔌 Ping Cohere")
385
  ping_out = gr.Markdown()
386
+
387
+ gr.Markdown("<div class='hr'></div>")
388
+ if PHI_MODE:
389
+ gr.Markdown(
390
+ "⚠️ **PHI Mode:** History persistence is disabled by default. Avoid unnecessary identifiers.",
391
+ )
392
+
393
+ with gr.Accordion("Privacy & Terms", open=False):
394
+ PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
395
+ TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")
396
+ gr.Markdown(PRIVACY_POLICY_TEXT)
397
+ gr.Markdown("<div class='hr'></div>")
398
+ gr.Markdown(TERMS_OF_SERVICE_TEXT)
399
+
400
+ # Right panel
401
+ with gr.Column(elem_classes=["right"]):
402
+ with gr.Tabs(elem_classes=["tabs"]):
403
+ with gr.TabItem("Current Assessment", id=0, elem_classes=["tabitem"]):
404
+ with gr.Column(elem_id="chatbot_container"):
405
+ chat_history_output = gr.Chatbot(
406
+ label="Analysis Output", type="messages"
407
+ )
408
+ with gr.TabItem("Assessment History", id=1, elem_classes=["tabitem"]):
409
+ gr.Markdown("### Review Past Assessments")
410
  history_dropdown = gr.Dropdown(
411
  label="Select an assessment to review", choices=[]
412
  )
413
  history_display = gr.Markdown(label="Selected Assessment Details")
414
 
415
+ # Inject STT helper
416
+ gr.HTML(VOICE_STT_HTML)
 
 
 
 
417
 
418
+ # --------- Logic (unchanged analysis flow) ----------
419
 
420
  def run_analysis_wrapper(prompt, files, chat_history_list, history_state_list):
421
  if not prompt:
 
423
  yield chat_history_list, history_state_list, gr.update()
424
  return
425
 
 
426
  chat_with_user_msg = _append_msg(chat_history_list, "user", prompt)
427
 
 
428
  def dummy_update(message: str):
429
  pass
430
 
 
431
  thinking_message = _append_msg(
432
  chat_with_user_msg,
433
  "assistant",
 
437
  )
438
  yield thinking_message, history_state_list, gr.update()
439
 
 
440
  ai_response_text = handle(prompt, files, dummy_update)
441
 
 
442
  final_chat = _append_msg(chat_with_user_msg, "assistant", ai_response_text)
443
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
444
 
 
445
  file_names: List[str] = []
446
  if files:
447
  file_names = [
448
  os.path.basename(f.name if hasattr(f, "name") else f) for f in files
449
  ]
450
 
 
451
  new_entry = {
452
  "id": timestamp,
453
  "prompt": prompt,
 
456
  "chat_history": final_chat,
457
  }
458
 
459
+ # Respect PHI history rules exactly as before
460
  if PERSIST_HISTORY and (not PHI_MODE or (PHI_MODE and HISTORY_TTL_DAYS > 0)):
461
  updated_history: List[Dict[str, Any]] = (history_state_list or []) + [new_entry]
462
  else:
 
508
  {chat_md}
509
  """
510
 
511
+ # Wire events
512
  send_btn.click(
513
  run_analysis_wrapper,
514
+ inputs=[prompt_input, files_input, chat_history_output, []], # local state
515
+ outputs=[chat_history_output, [], history_dropdown],
516
  )
517
+ # Use a hidden State for history to avoid changing logic; or substitute your existing State variable.
518
+ # If you want to persist in-memory between runs, replace [] with a gr.State([]) you manage outside.
519
+
520
  history_dropdown.change(
521
+ view_history, inputs=[history_dropdown, []], outputs=[history_display]
522
  )
523
+
524
  clear_btn.click(
525
+ lambda: (None, None, []),
526
  outputs=[prompt_input, files_input, chat_history_output],
527
  )
528
+
529
  ping_btn.click(ping_cohere, outputs=[ping_out])
530
+
531
+ # Voice button (client-side only)
532
+ voice_btn.click(
533
+ None, [], [], js="rs_toggle_stt('prompt_box')"
534
+ )
535
 
536
  if __name__ == "__main__":
537
  if not os.getenv("COHERE_API_KEY"):