SarahXia0405 commited on
Commit
ddd286e
·
verified ·
1 Parent(s): 1cf51d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -383
app.py CHANGED
@@ -1,10 +1,9 @@
 
1
  import os
2
  import time
3
- import json
4
-
5
  import base64
6
  from collections import defaultdict
7
- from typing import List, Dict
8
 
9
  import gradio as gr
10
  from langsmith import Client # LangSmith 客户端
@@ -19,12 +18,9 @@ from clare_core import (
19
  update_weaknesses_from_message,
20
  update_cognitive_state_from_message,
21
  render_session_status,
22
- find_similar_past_question,
23
  detect_language,
24
  chat_with_clare,
25
  export_conversation,
26
- generate_quiz_from_history,
27
- get_empty_input_prompt,
28
  summarize_conversation,
29
  )
30
  from rag_engine import (
@@ -181,10 +177,10 @@ No. Clare supplements your learning by providing on-demand guidance.
181
 
182
  **Q: What languages does Clare support?**
183
  Currently: English & 简体中文.
184
- """
185
  }
186
 
187
- # ================== CSS 样式表 ==================
188
  CUSTOM_CSS = """
189
  /* --- Main Header --- */
190
  .header-container { padding: 10px 20px; background-color: #ffffff; border-bottom: 2px solid #f3f4f6; margin-bottom: 15px; display: flex; align-items: center; }
@@ -228,8 +224,6 @@ CUSTOM_CSS = """
228
 
229
  /* Action Buttons */
230
  .action-btn { font-weight: bold !important; font-size: 0.9rem !important; position: relative; overflow: visible !important; }
231
- .action-btn:hover::before { content: "See User Guide for details"; position: absolute; bottom: 110%; left: 50%; transform: translateX(-50%); background-color: #333; color: #fff; padding: 5px 10px; border-radius: 5px; font-size: 12px; white-space: nowrap; z-index: 1000; pointer-events: none; opacity: 0; animation: fadeIn 0.2s forwards; }
232
- .action-btn:hover::after { content: ""; position: absolute; bottom: 100%; left: 50%; margin-left: -5px; border-width: 5px; border-style: solid; border-color: #333 transparent transparent transparent; opacity: 0; animation: fadeIn 0.2s forwards; }
233
 
234
  /* Tooltips & Memory Line */
235
  .html-tooltip { border-bottom: 1px dashed #999; cursor: help; position: relative; }
@@ -239,8 +233,6 @@ CUSTOM_CSS = """
239
  /* Results Box Style */
240
  .result-box { border: 1px solid #e5e7eb; background: #ffffff; padding: 10px; border-radius: 8px; height: 100%; }
241
  .result-box .prose { font-size: 0.9rem; }
242
-
243
- @keyframes fadeIn { to { opacity: 1; } }
244
  """
245
 
246
  # ========== Preload Module 10 PDF ==========
@@ -252,12 +244,8 @@ preloaded_chunks: List[Dict] = []
252
 
253
  if os.path.exists(MODULE10_PATH):
254
  try:
255
- preloaded_topics = extract_course_topics_from_file(
256
- MODULE10_PATH, MODULE10_DOC_TYPE
257
- )
258
- preloaded_chunks = build_rag_chunks_from_file(
259
- MODULE10_PATH, MODULE10_DOC_TYPE
260
- )
261
  print("Module 10 PDF preloaded successfully.")
262
  except Exception as e:
263
  print("Module 10 preload failed:", e)
@@ -278,7 +266,6 @@ def log_event(data: Dict):
278
  "question": data.get("question"),
279
  "student_id": data.get("student_id"),
280
  }
281
- # ✅ event_type 等字段作为 metadata,这样在 Dataset 列表里能直接看到 / 过滤
282
  metadata = {k: v for k, v in data.items() if k not in ("question", "answer")}
283
 
284
  ls_client.create_example(
@@ -292,9 +279,7 @@ def log_event(data: Dict):
292
 
293
 
294
  # ===== Reference Formatting Helper =====
295
- def format_references(
296
- rag_chunks: List[Dict], max_files: int = 2, max_sections_per_file: int = 3
297
- ) -> str:
298
  if not rag_chunks:
299
  return ""
300
 
@@ -320,28 +305,18 @@ def format_references(
320
  else:
321
  lines.append(f"- *{file_name}*")
322
 
323
- if len(lines) == 1:
324
- return ""
325
- return "\n".join(lines)
326
 
327
 
328
  def is_academic_query(message: str) -> bool:
329
  if not message:
330
  return False
331
-
332
  m = message.strip().lower()
333
  if not m:
334
  return False
335
-
336
  m = " ".join(m.split())
337
 
338
- smalltalk_tokens = {
339
- "hi", "hello", "hey", "yo",
340
- "thanks", "thank", "thank you",
341
- "ok", "okay",
342
- "bye", "goodbye", "see you",
343
- "haha", "lol"
344
- }
345
  tokens = m.split()
346
 
347
  if "?" not in m and all(t in smalltalk_tokens for t in tokens):
@@ -372,11 +347,15 @@ def is_academic_query(message: str) -> bool:
372
  return True
373
 
374
 
375
- # ================== Gradio App ==================
376
- with gr.Blocks(
377
- title="Clare Hanbridge AI Teaching Assistant", css=CUSTOM_CSS
378
- ) as demo:
 
379
 
 
 
 
380
  # 全局状态
381
  course_outline_state = gr.State(preloaded_topics or DEFAULT_COURSE_TOPICS)
382
  weakness_state = gr.State([])
@@ -389,12 +368,8 @@ with gr.Blocks(
389
  user_name_state = gr.State("")
390
  user_id_state = gr.State("")
391
 
392
- # ✅ 当前“最近一次回答”是否已经被点赞/点踩(只允许一次)
393
  feedback_used_state = gr.State(False)
394
 
395
- # ✅ 性能输出
396
- perf_state = gr.State({})
397
-
398
  # --- Header ---
399
  with gr.Row(elem_classes="header-container"):
400
  with gr.Column(scale=3):
@@ -417,20 +392,12 @@ with gr.Blocks(
417
 
418
  # --- Main Layout ---
419
  with gr.Row():
420
-
421
  # === Left Sidebar ===
422
  with gr.Column(scale=1, min_width=200):
423
- clear_btn = gr.Button(
424
- "Reset Conversation", variant="stop", interactive=False
425
- )
426
 
427
  gr.Markdown("### Model Settings")
428
- model_name = gr.Textbox(
429
- label="Model",
430
- value="gpt-4.1-mini",
431
- interactive=False,
432
- lines=1,
433
- )
434
  language_preference = gr.Radio(
435
  choices=["Auto", "English", "简体中文"],
436
  value="Auto",
@@ -446,64 +413,26 @@ with gr.Blocks(
446
  interactive=False,
447
  )
448
 
449
- with gr.Accordion(
450
- "User Guide", open=True, elem_classes="main-user-guide"
451
- ):
452
- with gr.Accordion(
453
- "Getting Started",
454
- open=False,
455
- elem_classes="clean-accordion",
456
- ):
457
  gr.Markdown(USER_GUIDE_SECTIONS["getting_started"])
458
- with gr.Accordion(
459
- "Mode Definition",
460
- open=False,
461
- elem_classes="clean-accordion",
462
- ):
463
  gr.Markdown(USER_GUIDE_SECTIONS["mode_definition"])
464
- with gr.Accordion(
465
- "How Clare Works",
466
- open=False,
467
- elem_classes="clean-accordion",
468
- ):
469
  gr.Markdown(USER_GUIDE_SECTIONS["how_clare_works"])
470
- with gr.Accordion(
471
- "What is Memory Line",
472
- open=False,
473
- elem_classes="clean-accordion",
474
- ):
475
  gr.Markdown(USER_GUIDE_SECTIONS["memory_line"])
476
- with gr.Accordion(
477
- "Learning Progress Report",
478
- open=False,
479
- elem_classes="clean-accordion",
480
- ):
481
  gr.Markdown(USER_GUIDE_SECTIONS["learning_progress"])
482
- with gr.Accordion(
483
- "How Clare Uses Your Files",
484
- open=False,
485
- elem_classes="clean-accordion",
486
- ):
487
  gr.Markdown(USER_GUIDE_SECTIONS["how_files"])
488
- with gr.Accordion(
489
- "Micro-Quiz", open=False, elem_classes="clean-accordion"
490
- ):
491
  gr.Markdown(USER_GUIDE_SECTIONS["micro_quiz"])
492
- with gr.Accordion(
493
- "Summarization",
494
- open=False,
495
- elem_classes="clean-accordion",
496
- ):
497
  gr.Markdown(USER_GUIDE_SECTIONS["summarization"])
498
- with gr.Accordion(
499
- "Export Conversation",
500
- open=False,
501
- elem_classes="clean-accordion",
502
- ):
503
  gr.Markdown(USER_GUIDE_SECTIONS["export_conversation"])
504
- with gr.Accordion(
505
- "FAQ", open=False, elem_classes="clean-accordion"
506
- ):
507
  gr.Markdown(USER_GUIDE_SECTIONS["faq"])
508
 
509
  gr.Markdown("---")
@@ -519,7 +448,6 @@ with gr.Blocks(
519
 
520
  # === Center Main ===
521
  with gr.Column(scale=3):
522
-
523
  gr.Markdown(
524
  """
525
  <div style="background-color:#f9fafb; padding:10px; border-radius:5px; margin-top:10px; font-size:0.9em; color:#555;">
@@ -538,18 +466,10 @@ with gr.Blocks(
538
  type="tuples",
539
  )
540
 
541
- # ✅ profiling output JSON (shows your TTFT / tokens/sec etc.)
542
- perf_output = gr.JSON(label="Output", value={})
543
-
544
- # Rating bar (last answer)
545
  gr.Markdown("#### Rate Clare’s last answer")
546
  with gr.Row():
547
- thumb_up_btn = gr.Button(
548
- "👍 Helpful", size="sm", interactive=False
549
- )
550
- thumb_down_btn = gr.Button(
551
- "👎 Not helpful", size="sm", interactive=False
552
- )
553
 
554
  feedback_toggle_btn = gr.Button(
555
  "Give detailed feedback", size="sm", variant="secondary", interactive=False
@@ -591,12 +511,7 @@ with gr.Blocks(
591
  interactive=False,
592
  )
593
  gr.HTML("<div style='height:5px'></div>")
594
- docs_btn = gr.Button(
595
- "📂 Loaded Docs",
596
- size="sm",
597
- variant="secondary",
598
- interactive=False,
599
- )
600
  with gr.Column(scale=2):
601
  with gr.Group(elem_classes="memory-line-box"):
602
  gr.HTML(
@@ -615,12 +530,7 @@ with gr.Blocks(
615
  </div>
616
  """
617
  )
618
- review_btn = gr.Button(
619
- "Review Now",
620
- size="sm",
621
- variant="primary",
622
- interactive=False,
623
- )
624
  session_status = gr.Markdown(visible=False)
625
 
626
  # === Right Sidebar ===
@@ -629,37 +539,21 @@ with gr.Blocks(
629
  gr.HTML(f"<img src='{image_to_base64(CLARE_READING_PATH)}'>")
630
 
631
  with gr.Group(visible=True) as login_state_1:
632
- login_start_btn = gr.Button(
633
- "Student Login", elem_classes="login-main-btn"
634
- )
635
 
636
  with gr.Group(visible=False) as login_state_2:
637
- name_input = gr.Textbox(
638
- label="Student Name", placeholder="Name", container=True
639
- )
640
- id_input = gr.Textbox(
641
- label="Email/ID", placeholder="ID", container=True
642
- )
643
- login_confirm_btn = gr.Button(
644
- "Enter", variant="primary", size="sm"
645
- )
646
 
647
  with gr.Group(visible=False) as login_state_3:
648
  student_info_html = gr.HTML()
649
- logout_btn = gr.Button(
650
- "Log out", elem_classes="logout-btn", size="sm"
651
- )
652
 
653
  gr.Markdown("### Actions")
654
- export_btn = gr.Button(
655
- "Export Conversation", size="sm", elem_classes="action-btn", interactive=False
656
- )
657
- quiz_btn = gr.Button(
658
- "Let's Try (Micro-Quiz)", size="sm", elem_classes="action-btn", interactive=False
659
- )
660
- summary_btn = gr.Button(
661
- "Summarization", size="sm", elem_classes="action-btn", interactive=False
662
- )
663
 
664
  gr.Markdown("### Results")
665
  with gr.Group(elem_classes="result-box"):
@@ -669,7 +563,6 @@ with gr.Blocks(
669
  )
670
 
671
  # ================== Login Flow ==================
672
-
673
  def show_inputs():
674
  return {
675
  login_state_1: gr.update(visible=False),
@@ -677,9 +570,7 @@ with gr.Blocks(
677
  login_state_3: gr.update(visible=False),
678
  }
679
 
680
- login_start_btn.click(
681
- show_inputs, outputs=[login_state_1, login_state_2, login_state_3]
682
- )
683
 
684
  def confirm_login(name, id_val):
685
  if not name or not id_val:
@@ -710,8 +601,6 @@ with gr.Blocks(
710
  feedback_toggle_btn: gr.update(interactive=False),
711
  feedback_text: gr.update(visible=False, value=""),
712
  feedback_submit_btn: gr.update(interactive=False, visible=False),
713
- perf_state: {}, # ✅
714
- perf_output: gr.update(value={}), # ✅
715
  }
716
 
717
  info_html = f"""
@@ -743,14 +632,11 @@ with gr.Blocks(
743
  learning_mode: gr.update(interactive=True),
744
  model_name: gr.update(interactive=False),
745
  docs_btn: gr.update(interactive=True),
746
- # ✅ 登录后仍然不允许点赞点踩,必须“有回答”才解锁
747
  thumb_up_btn: gr.update(interactive=False, value="👍 Helpful"),
748
  thumb_down_btn: gr.update(interactive=False, value="👎 Not helpful"),
749
  feedback_toggle_btn: gr.update(interactive=True),
750
  feedback_text: gr.update(visible=False, value=""),
751
  feedback_submit_btn: gr.update(interactive=True, visible=False),
752
- perf_state: {}, # ✅
753
- perf_output: gr.update(value={}), # ✅
754
  }
755
 
756
  login_confirm_btn.click(
@@ -781,8 +667,6 @@ with gr.Blocks(
781
  feedback_toggle_btn,
782
  feedback_text,
783
  feedback_submit_btn,
784
- perf_state, # ✅
785
- perf_output, # ✅
786
  ],
787
  )
788
 
@@ -817,8 +701,6 @@ with gr.Blocks(
817
  feedback_toggle_btn: gr.update(interactive=False),
818
  feedback_text: gr.update(visible=False, value=""),
819
  feedback_submit_btn: gr.update(interactive=False, visible=False),
820
- perf_state: {}, # ✅
821
- perf_output: gr.update(value={}), # ✅
822
  }
823
 
824
  logout_btn.click(
@@ -849,13 +731,10 @@ with gr.Blocks(
849
  feedback_toggle_btn,
850
  feedback_text,
851
  feedback_submit_btn,
852
- perf_state, # ✅
853
- perf_output, # ✅
854
  ],
855
  )
856
 
857
  # ================== Main Logic ==================
858
-
859
  def update_course_and_rag(file, doc_type_val):
860
  local_topics = preloaded_topics or []
861
  local_chunks = preloaded_chunks or []
@@ -914,15 +793,6 @@ with gr.Blocks(
914
  user_id_val,
915
  feedback_used,
916
  ):
917
- # perf container
918
- t0 = time.perf_counter()
919
- marks = {"start": 0}
920
- segs = {}
921
-
922
- def mark(name: str):
923
- marks[name] = (time.perf_counter() - t0) * 1000.0
924
-
925
- # 未登录:不解锁按钮
926
  if not user_id_val:
927
  out_msg = (
928
  "🔒 Please log in with your Student Name and Email/ID on the right "
@@ -934,7 +804,6 @@ with gr.Blocks(
934
  weaknesses or [],
935
  cognitive_state or {"confusion": 0, "mastery": 0},
936
  )
937
- perf = {"marks_ms": marks, "segments_ms": segs, "total_ms": (time.perf_counter() - t0) * 1000.0}
938
  return (
939
  "",
940
  new_history,
@@ -946,23 +815,17 @@ with gr.Blocks(
946
  feedback_used,
947
  gr.update(interactive=False, value="👍 Helpful"),
948
  gr.update(interactive=False, value="👎 Not helpful"),
949
- perf, # ✅ perf_state
950
- perf, # ✅ perf_output
951
  )
952
 
953
- # language detect
954
- resolved_lang = detect_language(message or "", lang_pref)
955
- mark("language_detect_done")
956
- segs["language_detect_done"] = marks["language_detect_done"] - marks["start"]
957
 
958
- # 空输入:不改变按钮状态
959
  if not message or not message.strip():
960
  new_status = render_session_status(
961
  mode_val or "Concept Explainer",
962
  weaknesses or [],
963
  cognitive_state or {"confusion": 0, "mastery": 0},
964
  )
965
- perf = {"marks_ms": marks, "segments_ms": segs, "total_ms": (time.perf_counter() - t0) * 1000.0}
966
  return (
967
  "",
968
  chat_history,
@@ -974,36 +837,18 @@ with gr.Blocks(
974
  feedback_used,
975
  gr.update(),
976
  gr.update(),
977
- perf, # ✅
978
- perf, # ✅
979
  )
980
 
981
- # weakness/cognitive updates
982
- t_w0 = time.perf_counter()
983
  weaknesses = update_weaknesses_from_message(message, weaknesses or [])
984
- mark("weakness_update_done")
985
- segs["weakness_update_done"] = (time.perf_counter() - t_w0) * 1000.0
986
-
987
- t_c0 = time.perf_counter()
988
  cognitive_state = update_cognitive_state_from_message(message, cognitive_state)
989
- mark("cognitive_update_done")
990
- segs["cognitive_update_done"] = (time.perf_counter() - t_c0) * 1000.0
991
 
992
- # RAG
993
- t_r0 = time.perf_counter()
994
  if is_academic_query(message):
995
- rag_context_text, rag_used_chunks = retrieve_relevant_chunks(
996
- message, rag_chunks or []
997
- )
998
  else:
999
  rag_context_text, rag_used_chunks = "", []
1000
- mark("rag_retrieve_done")
1001
- segs["rag_retrieve_done"] = (time.perf_counter() - t_r0) * 1000.0
1002
 
1003
- # LLM (chat_with_clare must return 3 values)
1004
  start_ts = time.time()
1005
-
1006
- answer, new_history, llm_stats = chat_with_clare(
1007
  message=message,
1008
  history=chat_history,
1009
  model_name=model_name_val,
@@ -1015,43 +860,10 @@ with gr.Blocks(
1015
  cognitive_state=cognitive_state,
1016
  rag_context=rag_context_text,
1017
  )
1018
-
1019
  end_ts = time.time()
1020
  latency_ms = (end_ts - start_ts) * 1000.0
1021
-
1022
- # === optional: print profiled metrics locally ===
1023
- try:
1024
- print("[LLM_PROFILE] " + json.dumps(llm_stats, ensure_ascii=False))
1025
- except Exception:
1026
- pass
1027
-
1028
- # merge llm_stats into perf
1029
- perf = {
1030
- "marks_ms": marks,
1031
- "segments_ms": segs,
1032
- "total_ms": marks.get("llm_done", (time.perf_counter() - t0) * 1000.0),
1033
- }
1034
-
1035
- # llm_stats may contain marks_ms/segments_ms/llm_profile
1036
- if isinstance(llm_stats, dict):
1037
- if "llm_profile" in llm_stats:
1038
- perf.update({"llm_profile": llm_stats.get("llm_profile", {})})
1039
- # merge marks/segments from llm_stats if present
1040
- ms2 = llm_stats.get("marks_ms") if isinstance(llm_stats.get("marks_ms"), dict) else {}
1041
- sg2 = llm_stats.get("segments_ms") if isinstance(llm_stats.get("segments_ms"), dict) else {}
1042
- for k, v in ms2.items():
1043
- if v is not None:
1044
- perf["marks_ms"][k] = v
1045
- for k, v in sg2.items():
1046
- if v is not None:
1047
- perf["segments_ms"][k] = v
1048
-
1049
- # References formatting
1050
- if is_academic_query(message) and rag_used_chunks:
1051
- ref_text = format_references(rag_used_chunks)
1052
- else:
1053
- ref_text = ""
1054
 
 
1055
  if ref_text and new_history:
1056
  last_user, last_assistant = new_history[-1]
1057
  if "References (RAG context used):" not in (last_assistant or ""):
@@ -1059,7 +871,6 @@ with gr.Blocks(
1059
  new_history[-1] = [last_user, last_assistant]
1060
  answer = last_assistant
1061
 
1062
- # LangSmith event
1063
  student_id = user_id_val or "ANON"
1064
  experiment_id = "RESP_AI_W10"
1065
  try:
@@ -1068,18 +879,13 @@ with gr.Blocks(
1068
  "experiment_id": experiment_id,
1069
  "student_id": student_id,
1070
  "event_type": "chat_turn",
1071
- "timestamp": time.time(),
1072
- "latency_ms": perf["total_ms"],
1073
  "question": message,
1074
  "answer": answer,
1075
  "model_name": model_name_val,
1076
  "language": resolved_lang,
1077
  "learning_mode": mode_val,
1078
- # ✅ extra profiling fields if available
1079
- "ttft_ms": perf.get("segments_ms", {}).get("llm_ttft_ms"),
1080
- "tokens_per_sec": (perf.get("llm_profile", {}) or {}).get("tokens_per_sec_est"),
1081
- "output_tokens_est": (perf.get("llm_profile", {}) or {}).get("output_tokens_est"),
1082
- "streaming_used": (perf.get("llm_profile", {}) or {}).get("streaming_used"),
1083
  }
1084
  )
1085
  except Exception as e:
@@ -1087,7 +893,6 @@ with gr.Blocks(
1087
 
1088
  new_status = render_session_status(mode_val, weaknesses, cognitive_state)
1089
 
1090
- # ✅ 有新回答:重置 feedback_used=False,并解锁按钮(恢复文案)
1091
  return (
1092
  "",
1093
  new_history,
@@ -1099,8 +904,6 @@ with gr.Blocks(
1099
  False,
1100
  gr.update(interactive=True, value="👍 Helpful"),
1101
  gr.update(interactive=True, value="👎 Not helpful"),
1102
- perf, # ✅ perf_state
1103
- perf, # ✅ perf_output
1104
  )
1105
 
1106
  user_input.submit(
@@ -1130,8 +933,6 @@ with gr.Blocks(
1130
  feedback_used_state,
1131
  thumb_up_btn,
1132
  thumb_down_btn,
1133
- perf_state, # ✅
1134
- perf_output, # ✅
1135
  ],
1136
  )
1137
 
@@ -1148,16 +949,8 @@ with gr.Blocks(
1148
  doc_type_val,
1149
  user_id_val,
1150
  ):
1151
- t0 = time.perf_counter()
1152
- marks = {"start": 0}
1153
- segs = {}
1154
-
1155
- def mark(name: str):
1156
- marks[name] = (time.perf_counter() - t0) * 1000.0
1157
-
1158
  if not user_id_val:
1159
  gr.Info("Please log in first to start a micro-quiz.", title="Login required")
1160
- perf = {"marks_ms": marks, "segments_ms": segs, "total_ms": (time.perf_counter() - t0) * 1000.0}
1161
  return (
1162
  chat_history,
1163
  weaknesses,
@@ -1167,8 +960,6 @@ with gr.Blocks(
1167
  weaknesses or [],
1168
  cognitive_state or {"confusion": 0, "mastery": 0},
1169
  ),
1170
- perf,
1171
- perf,
1172
  )
1173
 
1174
  quiz_instruction = (
@@ -1183,8 +974,7 @@ with gr.Blocks(
1183
  "• Do NOT start a content question until I have answered 1 or 2.\n\n"
1184
  "Step 2 – After I choose the style:\n"
1185
  "• If I choose 1 (multiple-choice):\n"
1186
- " - Ask ONE multiple-choice question at a time, based on Module 10 concepts "
1187
- "(Responsible AI definition, risk types, mitigation layers, EU AI Act, etc.).\n"
1188
  " - Provide 3–4 options (A, B, C, D) and make only one option clearly correct.\n"
1189
  "• If I choose 2 (short-answer):\n"
1190
  " - Ask ONE short-answer question at a time, also based on Module 10 concepts.\n"
@@ -1198,19 +988,11 @@ with gr.Blocks(
1198
  "Do not ask any content question before I choose."
1199
  )
1200
 
1201
- resolved_lang = lang_pref
1202
- mark("language_detect_done")
1203
- segs["language_detect_done"] = marks["language_detect_done"]
1204
 
1205
- t_r0 = time.perf_counter()
1206
- quiz_ctx_text, _quiz_ctx_chunks = retrieve_relevant_chunks(
1207
- "Module 10 quiz", rag_chunks or []
1208
- )
1209
- mark("rag_retrieve_done")
1210
- segs["rag_retrieve_done"] = (time.perf_counter() - t_r0) * 1000.0
1211
-
1212
- t_llm0 = time.perf_counter()
1213
- answer, new_history, llm_stats = chat_with_clare(
1214
  message=quiz_instruction,
1215
  history=chat_history,
1216
  model_name=model_name_val,
@@ -1222,50 +1004,32 @@ with gr.Blocks(
1222
  cognitive_state=cognitive_state,
1223
  rag_context=quiz_ctx_text,
1224
  )
1225
- mark("llm_done")
1226
- segs["llm_done"] = (time.perf_counter() - t_llm0) * 1000.0
1227
-
1228
- perf = {
1229
- "marks_ms": marks,
1230
- "segments_ms": segs,
1231
- "total_ms": marks.get("llm_done", (time.perf_counter() - t0) * 1000.0),
1232
- }
1233
- if isinstance(llm_stats, dict):
1234
- if "llm_profile" in llm_stats:
1235
- perf.update({"llm_profile": llm_stats.get("llm_profile", {})})
1236
- ms2 = llm_stats.get("marks_ms") if isinstance(llm_stats.get("marks_ms"), dict) else {}
1237
- sg2 = llm_stats.get("segments_ms") if isinstance(llm_stats.get("segments_ms"), dict) else {}
1238
- for k, v in ms2.items():
1239
- if v is not None:
1240
- perf["marks_ms"][k] = v
1241
- for k, v in sg2.items():
1242
- if v is not None:
1243
- perf["segments_ms"][k] = v
1244
 
1245
  student_id = user_id_val or "ANON"
1246
  experiment_id = "RESP_AI_W10"
 
1247
  try:
1248
  log_event(
1249
  {
1250
  "experiment_id": experiment_id,
1251
  "student_id": student_id,
1252
- "event_type": "chat_turn",
1253
  "timestamp": end_ts,
1254
  "latency_ms": latency_ms,
1255
- "question": message,
1256
  "answer": answer,
1257
  "model_name": model_name_val,
1258
  "language": resolved_lang,
1259
  "learning_mode": mode_val,
1260
- "llm_stats": llm_stats,
1261
  }
1262
  )
1263
-
1264
  except Exception as e:
1265
  print("log_event error:", e)
1266
 
1267
  new_status = render_session_status(mode_val, weaknesses, cognitive_state)
1268
- return new_history, weaknesses, cognitive_state, new_status, perf, perf
1269
 
1270
  quiz_btn.click(
1271
  start_micro_quiz,
@@ -1281,48 +1045,27 @@ with gr.Blocks(
1281
  doc_type,
1282
  user_id_state,
1283
  ],
1284
- [chatbot, weakness_state, cognitive_state_state, session_status, perf_state, perf_output],
1285
  )
1286
 
1287
- # ===== Feedback Handlers (thumb + detailed) =====
1288
  def show_feedback_box():
1289
  return {
1290
  feedback_text: gr.update(visible=True),
1291
  feedback_submit_btn: gr.update(visible=True),
1292
  }
1293
 
1294
- feedback_toggle_btn.click(
1295
- show_feedback_box,
1296
- None,
1297
- [feedback_text, feedback_submit_btn],
1298
- )
1299
 
1300
- def send_thumb_up(
1301
- last_q,
1302
- last_a,
1303
- user_id_val,
1304
- mode_val,
1305
- model_name_val,
1306
- lang_pref,
1307
- feedback_used,
1308
- ):
1309
- # 没有可评价回答:保持禁用
1310
  if not last_q and not last_a:
1311
- print("No last QA to log for thumbs_up.")
1312
  return (
1313
  feedback_used,
1314
  gr.update(interactive=False, value="👍 Helpful"),
1315
  gr.update(interactive=False, value="👎 Not helpful"),
1316
  )
1317
-
1318
- # 已经反馈过:直接禁用
1319
  if feedback_used:
1320
- print("Feedback already sent for this answer (thumb_up).")
1321
- return (
1322
- feedback_used,
1323
- gr.update(interactive=False),
1324
- gr.update(interactive=False),
1325
- )
1326
 
1327
  try:
1328
  log_event(
@@ -1334,45 +1077,24 @@ with gr.Blocks(
1334
  "question": last_q,
1335
  "answer": last_a,
1336
  "model_name": model_name_val,
1337
- "language": lang_pref,
1338
  "learning_mode": mode_val,
1339
  }
1340
  )
1341
- print("[Feedback] thumbs_up logged to LangSmith.")
1342
  except Exception as e:
1343
  print("thumb_up log error:", e)
1344
 
1345
- # 点完一次:置 True + 按钮置灰 + 文案 sent
1346
- return (
1347
- True,
1348
- gr.update(interactive=False, value="👍 Helpful (sent)"),
1349
- gr.update(interactive=False),
1350
- )
1351
 
1352
- def send_thumb_down(
1353
- last_q,
1354
- last_a,
1355
- user_id_val,
1356
- mode_val,
1357
- model_name_val,
1358
- lang_pref,
1359
- feedback_used,
1360
- ):
1361
  if not last_q and not last_a:
1362
- print("No last QA to log for thumbs_down.")
1363
  return (
1364
  feedback_used,
1365
  gr.update(interactive=False, value="👍 Helpful"),
1366
  gr.update(interactive=False, value="👎 Not helpful"),
1367
  )
1368
-
1369
  if feedback_used:
1370
- print("Feedback already sent for this answer (thumb_down).")
1371
- return (
1372
- feedback_used,
1373
- gr.update(interactive=False),
1374
- gr.update(interactive=False),
1375
- )
1376
 
1377
  try:
1378
  log_event(
@@ -1384,19 +1106,14 @@ with gr.Blocks(
1384
  "question": last_q,
1385
  "answer": last_a,
1386
  "model_name": model_name_val,
1387
- "language": lang_pref,
1388
  "learning_mode": mode_val,
1389
  }
1390
  )
1391
- print("[Feedback] thumbs_down logged to LangSmith.")
1392
  except Exception as e:
1393
  print("thumb_down log error:", e)
1394
 
1395
- return (
1396
- True,
1397
- gr.update(interactive=False),
1398
- gr.update(interactive=False, value="👎 Not helpful (sent)"),
1399
- )
1400
 
1401
  thumb_up_btn.click(
1402
  send_thumb_up,
@@ -1426,14 +1143,9 @@ with gr.Blocks(
1426
  [feedback_used_state, thumb_up_btn, thumb_down_btn],
1427
  )
1428
 
1429
- def submit_detailed_feedback(
1430
- text, last_q, last_a, user_id_val, mode_val, model_name_val, lang_pref
1431
- ):
1432
  if not text or not text.strip():
1433
- return gr.update(
1434
- value="",
1435
- placeholder="Please enter some feedback before submitting.",
1436
- )
1437
 
1438
  try:
1439
  log_event(
@@ -1446,18 +1158,14 @@ with gr.Blocks(
1446
  "answer": last_a,
1447
  "feedback_text": text.strip(),
1448
  "model_name": model_name_val,
1449
- "language": lang_pref,
1450
  "learning_mode": mode_val,
1451
  }
1452
  )
1453
- print("[Feedback] detailed_feedback logged to LangSmith.")
1454
  except Exception as e:
1455
  print("detailed_feedback log error:", e)
1456
 
1457
- return gr.update(
1458
- value="",
1459
- placeholder="Thanks! Your feedback has been recorded.",
1460
- )
1461
 
1462
  feedback_submit_btn.click(
1463
  submit_detailed_feedback,
@@ -1481,17 +1189,8 @@ with gr.Blocks(
1481
  )
1482
 
1483
  summary_btn.click(
1484
- lambda h, c, w, cog, m, l: summarize_conversation(
1485
- h, c, w, cog, m, l
1486
- ),
1487
- [
1488
- chatbot,
1489
- course_outline_state,
1490
- weakness_state,
1491
- cognitive_state_state,
1492
- model_name,
1493
- language_preference,
1494
- ],
1495
  [result_display],
1496
  )
1497
 
@@ -1511,8 +1210,6 @@ with gr.Blocks(
1511
  False,
1512
  gr.update(interactive=False, value="👍 Helpful"),
1513
  gr.update(interactive=False, value="👎 Not helpful"),
1514
- {},
1515
- {},
1516
  )
1517
 
1518
  clear_btn.click(
@@ -1530,8 +1227,6 @@ with gr.Blocks(
1530
  feedback_used_state,
1531
  thumb_up_btn,
1532
  thumb_down_btn,
1533
- perf_state,
1534
- perf_output,
1535
  ],
1536
  queue=False,
1537
  )
 
1
+ # app.py
2
  import os
3
  import time
 
 
4
  import base64
5
  from collections import defaultdict
6
+ from typing import List, Dict, Tuple, Optional
7
 
8
  import gradio as gr
9
  from langsmith import Client # LangSmith 客户端
 
18
  update_weaknesses_from_message,
19
  update_cognitive_state_from_message,
20
  render_session_status,
 
21
  detect_language,
22
  chat_with_clare,
23
  export_conversation,
 
 
24
  summarize_conversation,
25
  )
26
  from rag_engine import (
 
177
 
178
  **Q: What languages does Clare support?**
179
  Currently: English & 简体中文.
180
+ """,
181
  }
182
 
183
+ # ================== CSS ==================
184
  CUSTOM_CSS = """
185
  /* --- Main Header --- */
186
  .header-container { padding: 10px 20px; background-color: #ffffff; border-bottom: 2px solid #f3f4f6; margin-bottom: 15px; display: flex; align-items: center; }
 
224
 
225
  /* Action Buttons */
226
  .action-btn { font-weight: bold !important; font-size: 0.9rem !important; position: relative; overflow: visible !important; }
 
 
227
 
228
  /* Tooltips & Memory Line */
229
  .html-tooltip { border-bottom: 1px dashed #999; cursor: help; position: relative; }
 
233
  /* Results Box Style */
234
  .result-box { border: 1px solid #e5e7eb; background: #ffffff; padding: 10px; border-radius: 8px; height: 100%; }
235
  .result-box .prose { font-size: 0.9rem; }
 
 
236
  """
237
 
238
  # ========== Preload Module 10 PDF ==========
 
244
 
245
  if os.path.exists(MODULE10_PATH):
246
  try:
247
+ preloaded_topics = extract_course_topics_from_file(MODULE10_PATH, MODULE10_DOC_TYPE)
248
+ preloaded_chunks = build_rag_chunks_from_file(MODULE10_PATH, MODULE10_DOC_TYPE)
 
 
 
 
249
  print("Module 10 PDF preloaded successfully.")
250
  except Exception as e:
251
  print("Module 10 preload failed:", e)
 
266
  "question": data.get("question"),
267
  "student_id": data.get("student_id"),
268
  }
 
269
  metadata = {k: v for k, v in data.items() if k not in ("question", "answer")}
270
 
271
  ls_client.create_example(
 
279
 
280
 
281
  # ===== Reference Formatting Helper =====
282
+ def format_references(rag_chunks: List[Dict], max_files: int = 2, max_sections_per_file: int = 3) -> str:
 
 
283
  if not rag_chunks:
284
  return ""
285
 
 
305
  else:
306
  lines.append(f"- *{file_name}*")
307
 
308
+ return "\n".join(lines) if len(lines) > 1 else ""
 
 
309
 
310
 
311
  def is_academic_query(message: str) -> bool:
312
  if not message:
313
  return False
 
314
  m = message.strip().lower()
315
  if not m:
316
  return False
 
317
  m = " ".join(m.split())
318
 
319
+ smalltalk_tokens = {"hi", "hello", "hey", "yo", "thanks", "thank", "thank you", "ok", "okay", "bye", "goodbye", "see you", "haha", "lol"}
 
 
 
 
 
 
320
  tokens = m.split()
321
 
322
  if "?" not in m and all(t in smalltalk_tokens for t in tokens):
 
347
  return True
348
 
349
 
350
+ def normalize_lang_pref(lang_pref: str) -> str:
351
+ # UI uses "简体中文", core uses "中文"
352
+ if lang_pref == "简体中文":
353
+ return "中文"
354
+ return lang_pref
355
 
356
+
357
+ # ================== Gradio App ==================
358
+ with gr.Blocks(title="Clare ��� Hanbridge AI Teaching Assistant", css=CUSTOM_CSS) as demo:
359
  # 全局状态
360
  course_outline_state = gr.State(preloaded_topics or DEFAULT_COURSE_TOPICS)
361
  weakness_state = gr.State([])
 
368
  user_name_state = gr.State("")
369
  user_id_state = gr.State("")
370
 
 
371
  feedback_used_state = gr.State(False)
372
 
 
 
 
373
  # --- Header ---
374
  with gr.Row(elem_classes="header-container"):
375
  with gr.Column(scale=3):
 
392
 
393
  # --- Main Layout ---
394
  with gr.Row():
 
395
  # === Left Sidebar ===
396
  with gr.Column(scale=1, min_width=200):
397
+ clear_btn = gr.Button("Reset Conversation", variant="stop", interactive=False)
 
 
398
 
399
  gr.Markdown("### Model Settings")
400
+ model_name = gr.Textbox(label="Model", value="gpt-4.1-mini", interactive=False, lines=1)
 
 
 
 
 
401
  language_preference = gr.Radio(
402
  choices=["Auto", "English", "简体中文"],
403
  value="Auto",
 
413
  interactive=False,
414
  )
415
 
416
+ with gr.Accordion("User Guide", open=True, elem_classes="main-user-guide"):
417
+ with gr.Accordion("Getting Started", open=False, elem_classes="clean-accordion"):
 
 
 
 
 
 
418
  gr.Markdown(USER_GUIDE_SECTIONS["getting_started"])
419
+ with gr.Accordion("Mode Definition", open=False, elem_classes="clean-accordion"):
 
 
 
 
420
  gr.Markdown(USER_GUIDE_SECTIONS["mode_definition"])
421
+ with gr.Accordion("How Clare Works", open=False, elem_classes="clean-accordion"):
 
 
 
 
422
  gr.Markdown(USER_GUIDE_SECTIONS["how_clare_works"])
423
+ with gr.Accordion("What is Memory Line", open=False, elem_classes="clean-accordion"):
 
 
 
 
424
  gr.Markdown(USER_GUIDE_SECTIONS["memory_line"])
425
+ with gr.Accordion("Learning Progress Report", open=False, elem_classes="clean-accordion"):
 
 
 
 
426
  gr.Markdown(USER_GUIDE_SECTIONS["learning_progress"])
427
+ with gr.Accordion("How Clare Uses Your Files", open=False, elem_classes="clean-accordion"):
 
 
 
 
428
  gr.Markdown(USER_GUIDE_SECTIONS["how_files"])
429
+ with gr.Accordion("Micro-Quiz", open=False, elem_classes="clean-accordion"):
 
 
430
  gr.Markdown(USER_GUIDE_SECTIONS["micro_quiz"])
431
+ with gr.Accordion("Summarization", open=False, elem_classes="clean-accordion"):
 
 
 
 
432
  gr.Markdown(USER_GUIDE_SECTIONS["summarization"])
433
+ with gr.Accordion("Export Conversation", open=False, elem_classes="clean-accordion"):
 
 
 
 
434
  gr.Markdown(USER_GUIDE_SECTIONS["export_conversation"])
435
+ with gr.Accordion("FAQ", open=False, elem_classes="clean-accordion"):
 
 
436
  gr.Markdown(USER_GUIDE_SECTIONS["faq"])
437
 
438
  gr.Markdown("---")
 
448
 
449
  # === Center Main ===
450
  with gr.Column(scale=3):
 
451
  gr.Markdown(
452
  """
453
  <div style="background-color:#f9fafb; padding:10px; border-radius:5px; margin-top:10px; font-size:0.9em; color:#555;">
 
466
  type="tuples",
467
  )
468
 
 
 
 
 
469
  gr.Markdown("#### Rate Clare’s last answer")
470
  with gr.Row():
471
+ thumb_up_btn = gr.Button("👍 Helpful", size="sm", interactive=False)
472
+ thumb_down_btn = gr.Button("👎 Not helpful", size="sm", interactive=False)
 
 
 
 
473
 
474
  feedback_toggle_btn = gr.Button(
475
  "Give detailed feedback", size="sm", variant="secondary", interactive=False
 
511
  interactive=False,
512
  )
513
  gr.HTML("<div style='height:5px'></div>")
514
+ docs_btn = gr.Button("📂 Loaded Docs", size="sm", variant="secondary", interactive=False)
 
 
 
 
 
515
  with gr.Column(scale=2):
516
  with gr.Group(elem_classes="memory-line-box"):
517
  gr.HTML(
 
530
  </div>
531
  """
532
  )
533
+ review_btn = gr.Button("Review Now", size="sm", variant="primary", interactive=False)
 
 
 
 
 
534
  session_status = gr.Markdown(visible=False)
535
 
536
  # === Right Sidebar ===
 
539
  gr.HTML(f"<img src='{image_to_base64(CLARE_READING_PATH)}'>")
540
 
541
  with gr.Group(visible=True) as login_state_1:
542
+ login_start_btn = gr.Button("Student Login", elem_classes="login-main-btn")
 
 
543
 
544
  with gr.Group(visible=False) as login_state_2:
545
+ name_input = gr.Textbox(label="Student Name", placeholder="Name", container=True)
546
+ id_input = gr.Textbox(label="Email/ID", placeholder="ID", container=True)
547
+ login_confirm_btn = gr.Button("Enter", variant="primary", size="sm")
 
 
 
 
 
 
548
 
549
  with gr.Group(visible=False) as login_state_3:
550
  student_info_html = gr.HTML()
551
+ logout_btn = gr.Button("Log out", elem_classes="logout-btn", size="sm")
 
 
552
 
553
  gr.Markdown("### Actions")
554
+ export_btn = gr.Button("Export Conversation", size="sm", elem_classes="action-btn", interactive=False)
555
+ quiz_btn = gr.Button("Let's Try (Micro-Quiz)", size="sm", elem_classes="action-btn", interactive=False)
556
+ summary_btn = gr.Button("Summarization", size="sm", elem_classes="action-btn", interactive=False)
 
 
 
 
 
 
557
 
558
  gr.Markdown("### Results")
559
  with gr.Group(elem_classes="result-box"):
 
563
  )
564
 
565
  # ================== Login Flow ==================
 
566
  def show_inputs():
567
  return {
568
  login_state_1: gr.update(visible=False),
 
570
  login_state_3: gr.update(visible=False),
571
  }
572
 
573
+ login_start_btn.click(show_inputs, outputs=[login_state_1, login_state_2, login_state_3])
 
 
574
 
575
  def confirm_login(name, id_val):
576
  if not name or not id_val:
 
601
  feedback_toggle_btn: gr.update(interactive=False),
602
  feedback_text: gr.update(visible=False, value=""),
603
  feedback_submit_btn: gr.update(interactive=False, visible=False),
 
 
604
  }
605
 
606
  info_html = f"""
 
632
  learning_mode: gr.update(interactive=True),
633
  model_name: gr.update(interactive=False),
634
  docs_btn: gr.update(interactive=True),
 
635
  thumb_up_btn: gr.update(interactive=False, value="👍 Helpful"),
636
  thumb_down_btn: gr.update(interactive=False, value="👎 Not helpful"),
637
  feedback_toggle_btn: gr.update(interactive=True),
638
  feedback_text: gr.update(visible=False, value=""),
639
  feedback_submit_btn: gr.update(interactive=True, visible=False),
 
 
640
  }
641
 
642
  login_confirm_btn.click(
 
667
  feedback_toggle_btn,
668
  feedback_text,
669
  feedback_submit_btn,
 
 
670
  ],
671
  )
672
 
 
701
  feedback_toggle_btn: gr.update(interactive=False),
702
  feedback_text: gr.update(visible=False, value=""),
703
  feedback_submit_btn: gr.update(interactive=False, visible=False),
 
 
704
  }
705
 
706
  logout_btn.click(
 
731
  feedback_toggle_btn,
732
  feedback_text,
733
  feedback_submit_btn,
 
 
734
  ],
735
  )
736
 
737
  # ================== Main Logic ==================
 
738
  def update_course_and_rag(file, doc_type_val):
739
  local_topics = preloaded_topics or []
740
  local_chunks = preloaded_chunks or []
 
793
  user_id_val,
794
  feedback_used,
795
  ):
 
 
 
 
 
 
 
 
 
796
  if not user_id_val:
797
  out_msg = (
798
  "🔒 Please log in with your Student Name and Email/ID on the right "
 
804
  weaknesses or [],
805
  cognitive_state or {"confusion": 0, "mastery": 0},
806
  )
 
807
  return (
808
  "",
809
  new_history,
 
815
  feedback_used,
816
  gr.update(interactive=False, value="👍 Helpful"),
817
  gr.update(interactive=False, value="👎 Not helpful"),
 
 
818
  )
819
 
820
+ lang_pref_norm = normalize_lang_pref(lang_pref)
821
+ resolved_lang = detect_language(message or "", lang_pref_norm)
 
 
822
 
 
823
  if not message or not message.strip():
824
  new_status = render_session_status(
825
  mode_val or "Concept Explainer",
826
  weaknesses or [],
827
  cognitive_state or {"confusion": 0, "mastery": 0},
828
  )
 
829
  return (
830
  "",
831
  chat_history,
 
837
  feedback_used,
838
  gr.update(),
839
  gr.update(),
 
 
840
  )
841
 
 
 
842
  weaknesses = update_weaknesses_from_message(message, weaknesses or [])
 
 
 
 
843
  cognitive_state = update_cognitive_state_from_message(message, cognitive_state)
 
 
844
 
 
 
845
  if is_academic_query(message):
846
+ rag_context_text, rag_used_chunks = retrieve_relevant_chunks(message, rag_chunks or [])
 
 
847
  else:
848
  rag_context_text, rag_used_chunks = "", []
 
 
849
 
 
850
  start_ts = time.time()
851
+ answer, new_history = chat_with_clare(
 
852
  message=message,
853
  history=chat_history,
854
  model_name=model_name_val,
 
860
  cognitive_state=cognitive_state,
861
  rag_context=rag_context_text,
862
  )
 
863
  end_ts = time.time()
864
  latency_ms = (end_ts - start_ts) * 1000.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
865
 
866
+ ref_text = format_references(rag_used_chunks) if (is_academic_query(message) and rag_used_chunks) else ""
867
  if ref_text and new_history:
868
  last_user, last_assistant = new_history[-1]
869
  if "References (RAG context used):" not in (last_assistant or ""):
 
871
  new_history[-1] = [last_user, last_assistant]
872
  answer = last_assistant
873
 
 
874
  student_id = user_id_val or "ANON"
875
  experiment_id = "RESP_AI_W10"
876
  try:
 
879
  "experiment_id": experiment_id,
880
  "student_id": student_id,
881
  "event_type": "chat_turn",
882
+ "timestamp": end_ts,
883
+ "latency_ms": latency_ms,
884
  "question": message,
885
  "answer": answer,
886
  "model_name": model_name_val,
887
  "language": resolved_lang,
888
  "learning_mode": mode_val,
 
 
 
 
 
889
  }
890
  )
891
  except Exception as e:
 
893
 
894
  new_status = render_session_status(mode_val, weaknesses, cognitive_state)
895
 
 
896
  return (
897
  "",
898
  new_history,
 
904
  False,
905
  gr.update(interactive=True, value="👍 Helpful"),
906
  gr.update(interactive=True, value="👎 Not helpful"),
 
 
907
  )
908
 
909
  user_input.submit(
 
933
  feedback_used_state,
934
  thumb_up_btn,
935
  thumb_down_btn,
 
 
936
  ],
937
  )
938
 
 
949
  doc_type_val,
950
  user_id_val,
951
  ):
 
 
 
 
 
 
 
952
  if not user_id_val:
953
  gr.Info("Please log in first to start a micro-quiz.", title="Login required")
 
954
  return (
955
  chat_history,
956
  weaknesses,
 
960
  weaknesses or [],
961
  cognitive_state or {"confusion": 0, "mastery": 0},
962
  ),
 
 
963
  )
964
 
965
  quiz_instruction = (
 
974
  "• Do NOT start a content question until I have answered 1 or 2.\n\n"
975
  "Step 2 – After I choose the style:\n"
976
  "• If I choose 1 (multiple-choice):\n"
977
+ " - Ask ONE multiple-choice question at a time, based on Module 10 concepts.\n"
 
978
  " - Provide 3–4 options (A, B, C, D) and make only one option clearly correct.\n"
979
  "• If I choose 2 (short-answer):\n"
980
  " - Ask ONE short-answer question at a time, also based on Module 10 concepts.\n"
 
988
  "Do not ask any content question before I choose."
989
  )
990
 
991
+ resolved_lang = normalize_lang_pref(lang_pref)
 
 
992
 
993
+ start_ts = time.time()
994
+ quiz_ctx_text, _ = retrieve_relevant_chunks("Module 10 quiz", rag_chunks or [])
995
+ answer, new_history = chat_with_clare(
 
 
 
 
 
 
996
  message=quiz_instruction,
997
  history=chat_history,
998
  model_name=model_name_val,
 
1004
  cognitive_state=cognitive_state,
1005
  rag_context=quiz_ctx_text,
1006
  )
1007
+ end_ts = time.time()
1008
+ latency_ms = (end_ts - start_ts) * 1000.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009
 
1010
  student_id = user_id_val or "ANON"
1011
  experiment_id = "RESP_AI_W10"
1012
+
1013
  try:
1014
  log_event(
1015
  {
1016
  "experiment_id": experiment_id,
1017
  "student_id": student_id,
1018
+ "event_type": "micro_quiz_start",
1019
  "timestamp": end_ts,
1020
  "latency_ms": latency_ms,
1021
+ "question": quiz_instruction,
1022
  "answer": answer,
1023
  "model_name": model_name_val,
1024
  "language": resolved_lang,
1025
  "learning_mode": mode_val,
 
1026
  }
1027
  )
 
1028
  except Exception as e:
1029
  print("log_event error:", e)
1030
 
1031
  new_status = render_session_status(mode_val, weaknesses, cognitive_state)
1032
+ return new_history, weaknesses, cognitive_state, new_status
1033
 
1034
  quiz_btn.click(
1035
  start_micro_quiz,
 
1045
  doc_type,
1046
  user_id_state,
1047
  ],
1048
+ [chatbot, weakness_state, cognitive_state_state, session_status],
1049
  )
1050
 
1051
+ # ===== Feedback UI =====
1052
  def show_feedback_box():
1053
  return {
1054
  feedback_text: gr.update(visible=True),
1055
  feedback_submit_btn: gr.update(visible=True),
1056
  }
1057
 
1058
+ feedback_toggle_btn.click(show_feedback_box, None, [feedback_text, feedback_submit_btn])
 
 
 
 
1059
 
1060
+ def send_thumb_up(last_q, last_a, user_id_val, mode_val, model_name_val, lang_pref, feedback_used):
 
 
 
 
 
 
 
 
 
1061
  if not last_q and not last_a:
 
1062
  return (
1063
  feedback_used,
1064
  gr.update(interactive=False, value="👍 Helpful"),
1065
  gr.update(interactive=False, value="👎 Not helpful"),
1066
  )
 
 
1067
  if feedback_used:
1068
+ return (feedback_used, gr.update(interactive=False), gr.update(interactive=False))
 
 
 
 
 
1069
 
1070
  try:
1071
  log_event(
 
1077
  "question": last_q,
1078
  "answer": last_a,
1079
  "model_name": model_name_val,
1080
+ "language": normalize_lang_pref(lang_pref),
1081
  "learning_mode": mode_val,
1082
  }
1083
  )
 
1084
  except Exception as e:
1085
  print("thumb_up log error:", e)
1086
 
1087
+ return (True, gr.update(interactive=False, value="👍 Helpful (sent)"), gr.update(interactive=False))
 
 
 
 
 
1088
 
1089
+ def send_thumb_down(last_q, last_a, user_id_val, mode_val, model_name_val, lang_pref, feedback_used):
 
 
 
 
 
 
 
 
1090
  if not last_q and not last_a:
 
1091
  return (
1092
  feedback_used,
1093
  gr.update(interactive=False, value="👍 Helpful"),
1094
  gr.update(interactive=False, value="👎 Not helpful"),
1095
  )
 
1096
  if feedback_used:
1097
+ return (feedback_used, gr.update(interactive=False), gr.update(interactive=False))
 
 
 
 
 
1098
 
1099
  try:
1100
  log_event(
 
1106
  "question": last_q,
1107
  "answer": last_a,
1108
  "model_name": model_name_val,
1109
+ "language": normalize_lang_pref(lang_pref),
1110
  "learning_mode": mode_val,
1111
  }
1112
  )
 
1113
  except Exception as e:
1114
  print("thumb_down log error:", e)
1115
 
1116
+ return (True, gr.update(interactive=False), gr.update(interactive=False, value="👎 Not helpful (sent)"))
 
 
 
 
1117
 
1118
  thumb_up_btn.click(
1119
  send_thumb_up,
 
1143
  [feedback_used_state, thumb_up_btn, thumb_down_btn],
1144
  )
1145
 
1146
+ def submit_detailed_feedback(text, last_q, last_a, user_id_val, mode_val, model_name_val, lang_pref):
 
 
1147
  if not text or not text.strip():
1148
+ return gr.update(value="", placeholder="Please enter some feedback before submitting.")
 
 
 
1149
 
1150
  try:
1151
  log_event(
 
1158
  "answer": last_a,
1159
  "feedback_text": text.strip(),
1160
  "model_name": model_name_val,
1161
+ "language": normalize_lang_pref(lang_pref),
1162
  "learning_mode": mode_val,
1163
  }
1164
  )
 
1165
  except Exception as e:
1166
  print("detailed_feedback log error:", e)
1167
 
1168
+ return gr.update(value="", placeholder="Thanks! Your feedback has been recorded.")
 
 
 
1169
 
1170
  feedback_submit_btn.click(
1171
  submit_detailed_feedback,
 
1189
  )
1190
 
1191
  summary_btn.click(
1192
+ lambda h, c, w, cog, m, l: summarize_conversation(h, c, w, cog, m, normalize_lang_pref(l)),
1193
+ [chatbot, course_outline_state, weakness_state, cognitive_state_state, model_name, language_preference],
 
 
 
 
 
 
 
 
 
1194
  [result_display],
1195
  )
1196
 
 
1210
  False,
1211
  gr.update(interactive=False, value="👍 Helpful"),
1212
  gr.update(interactive=False, value="👎 Not helpful"),
 
 
1213
  )
1214
 
1215
  clear_btn.click(
 
1227
  feedback_used_state,
1228
  thumb_up_btn,
1229
  thumb_down_btn,
 
 
1230
  ],
1231
  queue=False,
1232
  )