Jiaxuan Yang commited on
Commit
56aacde
·
1 Parent(s): 2693f1d

Updated UI and prompt

Browse files
app.py CHANGED
@@ -1,7 +1,9 @@
1
  import base64
 
2
  import json
3
  import math
4
  import os
 
5
  import re
6
  import uuid
7
  import wave
@@ -134,9 +136,14 @@ def _read_text_if_exists(path: Path, fallback: str) -> str:
134
  return fallback
135
 
136
 
137
- def render_prompt_template(template: str, document: str) -> str:
138
  # Avoid `str.format(...)` because character prompt files may contain JSON braces.
139
- return str(template).replace("{document}", document)
 
 
 
 
 
140
 
141
 
142
  def load_character_configs() -> Dict[str, Dict[str, Any]]:
@@ -234,12 +241,14 @@ def new_session_state() -> Dict[str, Any]:
234
  "pdf_excerpt": "",
235
  "character_id": DEFAULT_CHARACTER_ID,
236
  "exam_character_id": None,
 
237
  "current_page": "explain",
238
  "mcqs": [],
239
  "current_index": 0,
240
  "score": 0,
241
  "awaiting_next_after_wrong": False,
242
  "completed": False,
 
243
  "status": "Idle",
244
  }
245
 
@@ -297,6 +306,18 @@ def write_tone_wav(text: str, out_path: str, seconds: float = 2.0, sample_rate:
297
  return out_path
298
 
299
 
 
 
 
 
 
 
 
 
 
 
 
 
300
  def render_pdf_pages_for_vl(pdf_path: str, max_pages: int, scale: float) -> List[str]:
301
  if pdfium is None:
302
  raise RuntimeError("pypdfium2 is required to render PDF pages for Qwen3-VL.")
@@ -526,7 +547,14 @@ class QwenPipelineEngine:
526
  self._pdf_page_cache[cache_key] = page_paths
527
  return page_paths
528
 
529
- def _chat_completions(self, messages: List[Dict[str, Any]], max_tokens: int) -> str:
 
 
 
 
 
 
 
530
  url = f"{_require_api_url()}/chat/completions"
531
  payload: Dict[str, Any] = {
532
  "model": CHAT_MODEL_ID,
@@ -534,6 +562,10 @@ class QwenPipelineEngine:
534
  "max_tokens": max_tokens,
535
  "stream": False,
536
  }
 
 
 
 
537
  resp = requests.post(url, headers=_api_headers(), json=payload, timeout=API_TIMEOUT_SEC)
538
  if resp.status_code >= 400:
539
  raise RuntimeError(f"VL API error {resp.status_code}: {resp.text[:1000]}")
@@ -552,14 +584,27 @@ class QwenPipelineEngine:
552
  return "\n".join([p for p in parts if p]).strip()
553
  return str(content).strip()
554
 
555
- def _real_generate_text_from_pdf(self, pdf_path: str, prompt: str, max_tokens: Optional[int] = None) -> str:
 
 
 
 
 
 
 
 
556
  page_image_paths = self._get_pdf_page_images(pdf_path)
557
  content: List[Dict[str, Any]] = []
558
  for p in page_image_paths:
559
  content.append({"type": "image_url", "image_url": {"url": image_file_to_data_url(p)}})
560
  content.append({"type": "text", "text": prompt})
561
  messages = [{"role": "user", "content": content}]
562
- return self._chat_completions(messages, max_tokens=max_tokens or QWEN_VL_MAX_NEW_TOKENS)
 
 
 
 
 
563
 
564
  def _real_tts_single(self, text: str, out_path: str) -> str:
565
  if not text.strip():
@@ -638,10 +683,26 @@ class QwenPipelineEngine:
638
  lecture_text = self._mock_generate_lecture(pdf_excerpt)
639
  mcqs = self._mock_generate_mcqs(lecture_text)
640
  else:
641
- lecture_prompt = render_prompt_template(str(lecture_template), pdf_excerpt)
642
- lecture_text = self._real_generate_text_from_pdf(pdf_path, lecture_prompt, max_tokens=QWEN_VL_MAX_NEW_TOKENS)
 
 
 
 
 
 
 
 
 
 
643
  quiz_prompt = render_prompt_template(str(mcq_template), pdf_excerpt)
644
- raw_mcq_json = self._real_generate_text_from_pdf(pdf_path, quiz_prompt, max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS)
 
 
 
 
 
 
645
  try:
646
  mcqs = parse_mcq_json(raw_mcq_json)
647
  except json.JSONDecodeError:
@@ -650,6 +711,8 @@ class QwenPipelineEngine:
650
  pdf_path,
651
  retry_prompt,
652
  max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS,
 
 
653
  )
654
  mcqs = parse_mcq_json(retry_raw)
655
 
@@ -669,8 +732,18 @@ class QwenPipelineEngine:
669
  if self.mock_mode:
670
  lecture_text = self._mock_generate_lecture(pdf_excerpt)
671
  else:
672
- lecture_prompt = render_prompt_template(str(lecture_template), pdf_excerpt)
673
- lecture_text = self._real_generate_text_from_pdf(pdf_path, lecture_prompt, max_tokens=QWEN_VL_MAX_NEW_TOKENS)
 
 
 
 
 
 
 
 
 
 
674
 
675
  return {
676
  "lecture_text": lecture_text,
@@ -686,10 +759,16 @@ class QwenPipelineEngine:
686
 
687
  if self.mock_mode:
688
  mcqs = self._mock_generate_mcqs(pdf_excerpt)
689
- return [asdict(q) for q in mcqs]
690
 
691
  quiz_prompt = render_prompt_template(str(mcq_template), pdf_excerpt)
692
- raw_mcq_json = self._real_generate_text_from_pdf(pdf_path, quiz_prompt, max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS)
 
 
 
 
 
 
693
  try:
694
  mcqs = parse_mcq_json(raw_mcq_json)
695
  except json.JSONDecodeError:
@@ -698,9 +777,11 @@ class QwenPipelineEngine:
698
  pdf_path,
699
  retry_prompt,
700
  max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS,
 
 
701
  )
702
  mcqs = parse_mcq_json(retry_raw)
703
- return [asdict(q) for q in mcqs]
704
 
705
  @spaces.GPU
706
  def synthesize_tts(self, text: str, name_prefix: str = "audio") -> str:
@@ -726,7 +807,7 @@ def parse_mcq_json(raw: str) -> List[MCQItem]:
726
  parsed: List[MCQItem] = []
727
  for item in questions[:5]:
728
  q = str(item.get("question", "")).strip()
729
- options = [str(x).strip() for x in item.get("options", [])][:4]
730
  answer = str(item.get("answer", "")).strip().upper()
731
  explanation = str(item.get("explanation", "")).strip()
732
  if len(options) != 4:
@@ -741,6 +822,40 @@ def parse_mcq_json(raw: str) -> List[MCQItem]:
741
  return parsed
742
 
743
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  engine = QwenPipelineEngine()
745
 
746
 
@@ -769,7 +884,7 @@ def current_choices(state: Dict[str, Any]) -> List[str]:
769
  if mcq is None:
770
  return []
771
  labels = ["A", "B", "C", "D"]
772
- return [f"{labels[i]}. {opt}" for i, opt in enumerate(mcq["options"])]
773
 
774
 
775
  def score_text(state: Dict[str, Any]) -> str:
@@ -777,44 +892,396 @@ def score_text(state: Dict[str, Any]) -> str:
777
  return f"Score: {state.get('score', 0)} / {total}"
778
 
779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
780
  def reset_ui_from_state(
781
  state: Dict[str, Any],
782
  feedback: str = "",
783
  *,
784
  results_visible: bool = True,
785
  loading_visible: bool = False,
786
- loading_text: str = "正在生成中,请稍候...",
 
787
  ):
788
  quiz_ready = bool(state.get("mcqs"))
789
  current_page = state.get("current_page", "explain")
 
 
 
 
790
  show_explain_page = results_visible and current_page != "exam"
791
  show_exam_page = results_visible and current_page == "exam"
792
- next_visible = bool(state.get("awaiting_next_after_wrong"))
793
  submit_interactive = quiz_ready and not state.get("completed", False)
794
- radio_interactive = submit_interactive and not state.get("awaiting_next_after_wrong", False)
795
  lecture_tts_ready = bool(state.get("lecture_text"))
796
- explanation_tts_ready = bool(state.get("last_explanation_tts_text"))
797
  if state.get("completed"):
798
- next_visible = False
799
  radio_interactive = False
800
  return (
801
  state,
802
- gr.update(value=loading_text, visible=loading_visible),
 
 
 
 
803
  gr.update(visible=show_explain_page),
804
  gr.update(visible=show_exam_page),
805
- gr.update(),
806
  state.get("status", "Idle"),
807
  state.get("lecture_text", ""),
808
  state.get("lecture_audio_path", None),
809
  gr.update(interactive=lecture_tts_ready),
810
  gr.update(visible=lecture_tts_ready, interactive=lecture_tts_ready),
811
- gr.update(value=format_question_block(state), visible=quiz_ready),
 
812
  gr.update(choices=current_choices(state), value=None, interactive=radio_interactive),
813
  score_text(state),
814
  feedback,
815
- state.get("explanation_audio_path", None),
816
- gr.update(visible=explanation_tts_ready, interactive=explanation_tts_ready),
817
- gr.update(visible=next_visible),
818
  gr.update(interactive=submit_interactive),
819
  gr.update(interactive=quiz_ready),
820
  )
@@ -834,7 +1301,7 @@ def process_pdf(pdf_file: Optional[str], character_id: str, state: Dict[str, Any
834
  feedback="正在读取论文并生成讲解与题目,请稍候...",
835
  results_visible=False,
836
  loading_visible=True,
837
- loading_text="正在生成中,请稍候...",
838
  )
839
  try:
840
  result = engine.build_lecture(pdf_file, get_character_config(state["character_id"]))
@@ -856,7 +1323,7 @@ def process_pdf(pdf_file: Optional[str], character_id: str, state: Dict[str, Any
856
  state["status"] = "Lecture generated."
857
  yield reset_ui_from_state(
858
  state,
859
- feedback="Lecture is ready. Click 'Play Lecture Audio' if needed, then press 'Exam' to generate MCQs.",
860
  results_visible=True,
861
  loading_visible=False,
862
  )
@@ -878,8 +1345,6 @@ def submit_answer(choice: Optional[str], state: Dict[str, Any]):
878
  return reset_ui_from_state(state, feedback="Load a PDF first.")
879
  if state.get("completed"):
880
  return reset_ui_from_state(state, feedback="Quiz already completed.")
881
- if state.get("awaiting_next_after_wrong"):
882
- return reset_ui_from_state(state, feedback="Click Next Question to continue.")
883
  if not choice:
884
  return reset_ui_from_state(state, feedback="Please select an option.")
885
 
@@ -888,64 +1353,69 @@ def submit_answer(choice: Optional[str], state: Dict[str, Any]):
888
  state["status"] = "No current question."
889
  return reset_ui_from_state(state, feedback="No current question.")
890
 
 
 
 
891
  selected_label = choice.split(".", 1)[0].strip().upper()
892
  correct_label = str(mcq["answer"]).upper()
 
 
893
 
894
  if selected_label == correct_label:
895
  state["score"] += 1
896
  state["last_explanation_tts_text"] = ""
897
  state["explanation_audio_path"] = None
898
- state["status"] = "Correct answer."
 
 
899
  if state["current_index"] >= len(state["mcqs"]) - 1:
900
  state["completed"] = True
901
  state["status"] = "Quiz completed."
 
 
 
 
 
 
 
902
  return reset_ui_from_state(
903
  state,
904
- feedback=f"Correct. Quiz finished. Final score: {state['score']} / {len(state['mcqs'])}.",
905
  )
906
 
 
907
  state["current_index"] += 1
908
- return reset_ui_from_state(state, feedback="Correct. Moving to the next question.")
 
909
 
910
  correct_idx = ["A", "B", "C", "D"].index(correct_label)
911
  correct_choice_display = f"{correct_label}. {mcq['options'][correct_idx]}"
912
- explanation = mcq["explanation"]
913
- state["last_explanation_tts_text"] = explanation
914
  state["explanation_audio_path"] = None
915
- state["awaiting_next_after_wrong"] = True
916
- state["status"] = "Incorrect answer. Review explanation, then continue."
917
- feedback = (
918
- f"Incorrect.\n\nCorrect answer: {correct_choice_display}\n\nExplanation: {explanation}\n\n"
919
- "Click 'Play Explanation Audio' to generate speech for the explanation."
 
920
  )
921
- return reset_ui_from_state(state, feedback=feedback)
922
-
923
-
924
- def next_question(state: Dict[str, Any]):
925
- if not state.get("mcqs"):
926
- return reset_ui_from_state(state, feedback="Load a PDF first.")
927
- if state.get("completed"):
928
- return reset_ui_from_state(state, feedback="Quiz already completed.")
929
- if not state.get("awaiting_next_after_wrong"):
930
- return reset_ui_from_state(state, feedback="Use Submit Answer for the current question.")
931
-
932
  if state["current_index"] >= len(state["mcqs"]) - 1:
933
  state["completed"] = True
934
- state["awaiting_next_after_wrong"] = False
935
- state["last_explanation_tts_text"] = ""
936
- state["explanation_audio_path"] = None
937
  state["status"] = "Quiz completed."
938
- return reset_ui_from_state(
 
 
939
  state,
940
- feedback=f"Quiz finished. Final score: {state['score']} / {len(state['mcqs'])}.",
 
941
  )
942
-
943
  state["current_index"] += 1
944
- state["awaiting_next_after_wrong"] = False
945
- state["last_explanation_tts_text"] = ""
946
- state["explanation_audio_path"] = None
947
- state["status"] = "Next question loaded."
948
- return reset_ui_from_state(state, feedback="Moved to the next question.")
949
 
950
 
951
  def restart_quiz(state: Dict[str, Any]):
@@ -957,32 +1427,40 @@ def restart_quiz(state: Dict[str, Any]):
957
  state["completed"] = False
958
  state["last_explanation_tts_text"] = ""
959
  state["explanation_audio_path"] = None
 
 
960
  state["status"] = "Quiz restarted."
961
  return reset_ui_from_state(state, feedback="Quiz restarted.")
962
 
963
 
964
- def go_to_exam_page(state: Dict[str, Any]):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
965
  if not state.get("lecture_text"):
966
  state["status"] = "No lecture loaded."
967
  yield reset_ui_from_state(state, feedback="Generate lecture first.", results_visible=False, loading_visible=False)
968
  return
969
-
970
- state["current_page"] = "exam"
971
- state["exam_character_id"] = None
972
- state["mcqs"] = []
973
- state["current_index"] = 0
974
- state["score"] = 0
975
- state["awaiting_next_after_wrong"] = False
976
- state["completed"] = False
977
- state["last_explanation_tts_text"] = ""
978
- state["explanation_audio_path"] = None
979
- state["status"] = "请选择角色以生成 MCQ"
980
- yield reset_ui_from_state(
981
- state,
982
- feedback="",
983
- results_visible=True,
984
- loading_visible=False,
985
- )
986
 
987
 
988
  def generate_exam_mcq(selected_character_id: Optional[str], state: Dict[str, Any]):
@@ -991,22 +1469,26 @@ def generate_exam_mcq(selected_character_id: Optional[str], state: Dict[str, Any
991
  yield reset_ui_from_state(state, feedback="Generate lecture first.", results_visible=False, loading_visible=False)
992
  return
993
  if not selected_character_id:
994
- state["status"] = "请选择角色以生成 MCQ"
995
  yield reset_ui_from_state(state, feedback="", results_visible=True, loading_visible=False)
996
  return
997
 
998
  state["current_page"] = "exam"
999
  state["exam_character_id"] = selected_character_id
1000
- state["status"] = "正在生成 MCQ..."
 
 
 
1001
  state["last_explanation_tts_text"] = ""
1002
  state["explanation_audio_path"] = None
1003
  state["mcqs"] = []
 
1004
  yield reset_ui_from_state(
1005
  state,
1006
  feedback="",
1007
- results_visible=False,
1008
  loading_visible=True,
1009
- loading_text="正在生成 MCQ,请稍候...",
1010
  )
1011
 
1012
  try:
@@ -1021,7 +1503,9 @@ def generate_exam_mcq(selected_character_id: Optional[str], state: Dict[str, Any
1021
  state["awaiting_next_after_wrong"] = False
1022
  state["completed"] = False
1023
  state["current_page"] = "exam"
1024
- state["status"] = "MCQ generated."
 
 
1025
  yield reset_ui_from_state(
1026
  state,
1027
  feedback="",
@@ -1030,20 +1514,22 @@ def generate_exam_mcq(selected_character_id: Optional[str], state: Dict[str, Any
1030
  )
1031
  except Exception as exc:
1032
  state["current_page"] = "exam"
 
1033
  state["status"] = "Failed during MCQ generation."
 
 
 
 
 
1034
  yield reset_ui_from_state(
1035
  state,
1036
- feedback=f"Error: {type(exc).__name__}: {exc}",
1037
  results_visible=True,
1038
  loading_visible=False,
1039
  )
1040
 
1041
 
1042
- def on_generate_click(pdf_file: Optional[str], explain_character_id: str, exam_character_id: Optional[str], state: Dict[str, Any]):
1043
- current_page = state.get("current_page", "explain")
1044
- if current_page == "exam":
1045
- yield from generate_exam_mcq(exam_character_id, state)
1046
- return
1047
  yield from process_pdf(pdf_file, explain_character_id, state)
1048
 
1049
 
@@ -1054,6 +1540,20 @@ def go_to_explain_page(state: Dict[str, Any]):
1054
 
1055
  def on_character_change(character_id: str, state: Dict[str, Any]):
1056
  cfg = get_character_config(character_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1057
  state["character_id"] = cfg["id"]
1058
  state["current_page"] = "explain"
1059
  state["lecture_audio_path"] = None
@@ -1067,17 +1567,11 @@ def on_character_change(character_id: str, state: Dict[str, Any]):
1067
  build_chat_meta_html(cfg["id"]),
1068
  gr.update(visible=False),
1069
  gr.update(visible=False),
1070
- gr.update(visible=False),
1071
  "Character switched. Upload PDF and click Generate.",
1072
  )
1073
 
1074
 
1075
- def on_exam_character_select(character_id: Optional[str], state: Dict[str, Any]):
1076
- state["exam_character_id"] = character_id
1077
- state["status"] = "Exam character selected. Click Generate to create MCQs."
1078
- return state, state["status"]
1079
-
1080
-
1081
  def play_lecture_audio(state: Dict[str, Any]):
1082
  if not state.get("lecture_text"):
1083
  state["status"] = "No lecture text available."
@@ -1287,17 +1781,30 @@ body {{
1287
  gap: 0.35rem !important;
1288
  flex-wrap: wrap !important;
1289
  justify-content: center !important;
 
1290
  }}
1291
  #character-select-wrap label {{
1292
  background: transparent !important;
1293
  border: 1px solid rgba(255,255,255,0.14) !important;
1294
  border-radius: 999px !important;
1295
- padding: 0.18rem 0.65rem !important;
1296
- min-height: 0 !important;
 
 
 
 
 
1297
  }}
1298
  #character-select-wrap label span {{
1299
  color: rgba(240,243,250,0.78) !important;
1300
  font-size: 0.88rem !important;
 
 
 
 
 
 
 
1301
  }}
1302
  #character-select-wrap input[type="radio"] {{
1303
  display: none !important;
@@ -1319,6 +1826,31 @@ body {{
1319
  border-radius: 12px;
1320
  backdrop-filter: blur(3px);
1321
  }}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322
  #results-panel {{
1323
  background: transparent !important;
1324
  border: none !important;
@@ -1366,7 +1898,8 @@ body {{
1366
  border-radius: 20px !important;
1367
  padding: 0.35rem 0.45rem !important;
1368
  }}
1369
- #lecture-wrap textarea {{
 
1370
  font-style: italic;
1371
  line-height: 1.45 !important;
1372
  color: rgba(244,246,251,0.95) !important;
@@ -1389,13 +1922,15 @@ body {{
1389
  transform: translateX(-50%);
1390
  bottom: 18px;
1391
  width: min(860px, calc(100vw - 28px));
1392
- z-index: 50;
1393
  background: rgba(24, 26, 34, 0.88);
1394
  border: 1px solid rgba(255,255,255,0.08);
1395
  border-radius: 999px;
1396
  box-shadow: 0 16px 40px rgba(0,0,0,0.22);
1397
  backdrop-filter: blur(10px);
1398
  padding: 8px 10px;
 
 
1399
  }}
1400
  #bottom-composer .wrap {{
1401
  border: none !important;
@@ -1409,12 +1944,20 @@ body {{
1409
  border-radius: 999px !important;
1410
  }}
1411
  #generate-btn button {{
1412
- min-height: 38px !important;
1413
- height: 38px !important;
1414
  padding: 0 18px !important;
1415
  font-size: 0.9rem !important;
1416
- line-height: 1 !important;
1417
  min-width: 132px !important;
 
 
 
 
 
 
 
 
1418
  }}
1419
  #pdf-uploader {{
1420
  min-height: 42px;
@@ -1451,6 +1994,373 @@ body {{
1451
  justify-content: space-between;
1452
  align-items: center;
1453
  }}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1454
  .container {{max-width: 980px; margin: 0 auto;}}
1455
  .mono {{font-family: ui-monospace, Menlo, Consolas, monospace;}}
1456
  {bg_css}
@@ -1496,6 +2406,17 @@ def build_chat_meta_html(character_id: Optional[str] = None) -> str:
1496
  """
1497
 
1498
 
 
 
 
 
 
 
 
 
 
 
 
1499
  with gr.Blocks(css=CSS) as demo:
1500
  with gr.Column(elem_id="page-shell"):
1501
  character_header_html = gr.HTML(build_character_header_html(DEFAULT_CHARACTER_ID), elem_id="character-card")
@@ -1511,7 +2432,7 @@ with gr.Blocks(css=CSS) as demo:
1511
 
1512
  state = gr.State(new_session_state())
1513
 
1514
- loading_md = gr.Markdown("正在生成中,请稍候...", elem_id="gen-loading", visible=False)
1515
 
1516
  with gr.Column(visible=False, elem_id="results-panel") as explain_page:
1517
  with gr.Row(elem_id="chat-row"):
@@ -1520,47 +2441,102 @@ with gr.Blocks(css=CSS) as demo:
1520
  with gr.Column(elem_id="chat-main"):
1521
  chat_meta_html = gr.HTML(build_chat_meta_html(DEFAULT_CHARACTER_ID))
1522
  with gr.Column(elem_id="lecture-wrap"):
1523
- lecture_box = gr.Textbox(
1524
- label="",
1525
- show_label=False,
1526
- lines=10,
1527
- interactive=False,
1528
- placeholder="Generated lecture explanation will appear here...",
 
 
1529
  )
1530
  with gr.Row(elem_id="lecture-actions"):
1531
  play_lecture_btn = gr.Button("Play Lecture Audio", interactive=False, scale=0)
1532
  with gr.Row(elem_id="exam-entry-wrap"):
1533
- exam_btn = gr.Button("Exam", interactive=False, variant="secondary", scale=0)
1534
 
1535
  with gr.Column(elem_id="tts-wrap"):
1536
  lecture_audio = gr.Audio(label="Lecture TTS", type="filepath")
1537
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
  with gr.Column(visible=False, elem_id="exam-page") as exam_page:
1539
  with gr.Row(elem_id="exam-nav"):
1540
  back_btn = gr.Button("Back", variant="secondary", scale=0)
1541
- exam_character_radio = gr.Radio(
1542
- choices=[(cfg["display_name"], cid) for cid, cfg in CHARACTER_CONFIGS.items()],
1543
- value=None,
1544
- label="Choose character for MCQ",
1545
- interactive=True,
1546
- elem_id="character-select-wrap",
1547
- container=False,
1548
- )
1549
- with gr.Column(elem_id="status-wrap"):
1550
- status_box = gr.Textbox(label="Status", value="Idle", interactive=False)
1551
  with gr.Column(elem_id="quiz-wrap"):
1552
- quiz_header = gr.Markdown("### No question loaded", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1553
  choice_radio = gr.Radio(choices=[], label="Select one answer", interactive=False)
1554
  with gr.Row():
1555
  submit_btn = gr.Button("Submit Answer", interactive=False)
1556
- next_btn = gr.Button("Next Question", visible=False)
1557
  restart_btn = gr.Button("Restart Quiz", interactive=False)
1558
- score_box = gr.Textbox(label="Score", value="Score: 0 / 0", interactive=False)
1559
- feedback_box = gr.Textbox(label="Feedback / Explanation", lines=8, interactive=False)
1560
-
1561
- with gr.Column(elem_id="explain-wrap"):
1562
- explanation_audio = gr.Audio(label="Explanation TTS (shown after wrong answer)", type="filepath")
1563
- play_expl_btn = gr.Button("Play Explanation Audio", visible=False, interactive=False)
1564
 
1565
  with gr.Row(elem_id="bottom-composer"):
1566
  pdf_input = gr.File(
@@ -1576,50 +2552,49 @@ with gr.Blocks(css=CSS) as demo:
1576
 
1577
  outputs = [
1578
  state,
 
 
 
 
1579
  loading_md,
1580
  explain_page,
1581
  exam_page,
1582
- exam_character_radio,
1583
  status_box,
1584
  lecture_box,
1585
  lecture_audio,
1586
  play_lecture_btn,
1587
  exam_btn,
1588
- quiz_header,
 
1589
  choice_radio,
1590
  score_box,
1591
  feedback_box,
1592
- explanation_audio,
1593
- play_expl_btn,
1594
- next_btn,
1595
  submit_btn,
1596
  restart_btn,
1597
  ]
1598
 
1599
- run_btn.click(fn=on_generate_click, inputs=[pdf_input, character_dropdown, exam_character_radio, state], outputs=outputs)
1600
  character_dropdown.change(
1601
  fn=on_character_change,
1602
  inputs=[character_dropdown, state],
1603
  outputs=[state, character_header_html, chat_avatar_html, chat_meta_html, explain_page, exam_page, loading_md, status_box],
1604
  )
1605
- exam_btn.click(fn=go_to_exam_page, inputs=[state], outputs=outputs)
1606
- exam_character_radio.change(fn=on_exam_character_select, inputs=[exam_character_radio, state], outputs=[state, status_box])
1607
- back_btn.click(fn=go_to_explain_page, inputs=[state], outputs=outputs)
1608
- submit_btn.click(fn=submit_answer, inputs=[choice_radio, state], outputs=outputs)
1609
- next_btn.click(fn=next_question, inputs=[state], outputs=outputs)
1610
- restart_btn.click(fn=restart_quiz, inputs=[state], outputs=outputs)
 
1611
  play_lecture_btn.click(
1612
  fn=play_lecture_audio,
1613
  inputs=[state],
1614
  outputs=[state, status_box, lecture_audio, feedback_box],
 
1615
  )
1616
- play_expl_btn.click(
1617
- fn=play_explanation_audio,
1618
- inputs=[state],
1619
- outputs=[state, status_box, explanation_audio, feedback_box],
1620
- )
1621
 
1622
 
 
 
1623
  if __name__ == "__main__":
1624
- demo.queue()
1625
  demo.launch()
 
1
  import base64
2
+ import html
3
  import json
4
  import math
5
  import os
6
+ import random
7
  import re
8
  import uuid
9
  import wave
 
136
  return fallback
137
 
138
 
139
+ def render_prompt_template(template: str, document: str, replacements: Optional[Dict[str, str]] = None) -> str:
140
  # Avoid `str.format(...)` because character prompt files may contain JSON braces.
141
+ s = str(template)
142
+ s = s.replace("{document}", document).replace("{paper_text}", document)
143
+ if replacements:
144
+ for k, v in replacements.items():
145
+ s = s.replace("{" + str(k) + "}", str(v))
146
+ return s
147
 
148
 
149
  def load_character_configs() -> Dict[str, Dict[str, Any]]:
 
241
  "pdf_excerpt": "",
242
  "character_id": DEFAULT_CHARACTER_ID,
243
  "exam_character_id": None,
244
+ "mcq_generating": False,
245
  "current_page": "explain",
246
  "mcqs": [],
247
  "current_index": 0,
248
  "score": 0,
249
  "awaiting_next_after_wrong": False,
250
  "completed": False,
251
+ "exam_chat": [],
252
  "status": "Idle",
253
  }
254
 
 
306
  return out_path
307
 
308
 
309
+ def normalize_option_text(text: Any) -> str:
310
+ s = str(text or "").strip()
311
+ s = re.sub(r"^\s*(?:[A-Da-d]\s*[\.\)\:\-]\s*)+", "", s).strip()
312
+ return s
313
+
314
+
315
+ def normalize_explanation_text(text: Any) -> str:
316
+ s = str(text or "").strip()
317
+ s = re.sub(r"^\s*(?:Explanation|Reason)\s*:\s*", "", s, flags=re.IGNORECASE).strip()
318
+ return s
319
+
320
+
321
  def render_pdf_pages_for_vl(pdf_path: str, max_pages: int, scale: float) -> List[str]:
322
  if pdfium is None:
323
  raise RuntimeError("pypdfium2 is required to render PDF pages for Qwen3-VL.")
 
547
  self._pdf_page_cache[cache_key] = page_paths
548
  return page_paths
549
 
550
+ def _chat_completions(
551
+ self,
552
+ messages: List[Dict[str, Any]],
553
+ max_tokens: int,
554
+ *,
555
+ temperature: Optional[float] = None,
556
+ top_p: Optional[float] = None,
557
+ ) -> str:
558
  url = f"{_require_api_url()}/chat/completions"
559
  payload: Dict[str, Any] = {
560
  "model": CHAT_MODEL_ID,
 
562
  "max_tokens": max_tokens,
563
  "stream": False,
564
  }
565
+ if temperature is not None:
566
+ payload["temperature"] = float(temperature)
567
+ if top_p is not None:
568
+ payload["top_p"] = float(top_p)
569
  resp = requests.post(url, headers=_api_headers(), json=payload, timeout=API_TIMEOUT_SEC)
570
  if resp.status_code >= 400:
571
  raise RuntimeError(f"VL API error {resp.status_code}: {resp.text[:1000]}")
 
584
  return "\n".join([p for p in parts if p]).strip()
585
  return str(content).strip()
586
 
587
+ def _real_generate_text_from_pdf(
588
+ self,
589
+ pdf_path: str,
590
+ prompt: str,
591
+ max_tokens: Optional[int] = None,
592
+ *,
593
+ temperature: Optional[float] = None,
594
+ top_p: Optional[float] = None,
595
+ ) -> str:
596
  page_image_paths = self._get_pdf_page_images(pdf_path)
597
  content: List[Dict[str, Any]] = []
598
  for p in page_image_paths:
599
  content.append({"type": "image_url", "image_url": {"url": image_file_to_data_url(p)}})
600
  content.append({"type": "text", "text": prompt})
601
  messages = [{"role": "user", "content": content}]
602
+ return self._chat_completions(
603
+ messages,
604
+ max_tokens=max_tokens or QWEN_VL_MAX_NEW_TOKENS,
605
+ temperature=temperature,
606
+ top_p=top_p,
607
+ )
608
 
609
  def _real_tts_single(self, text: str, out_path: str) -> str:
610
  if not text.strip():
 
683
  lecture_text = self._mock_generate_lecture(pdf_excerpt)
684
  mcqs = self._mock_generate_mcqs(lecture_text)
685
  else:
686
+ lecture_prompt = render_prompt_template(
687
+ str(lecture_template),
688
+ pdf_excerpt,
689
+ replacements={"style_seed": uuid.uuid4().hex},
690
+ )
691
+ lecture_text = self._real_generate_text_from_pdf(
692
+ pdf_path,
693
+ lecture_prompt,
694
+ max_tokens=QWEN_VL_MAX_NEW_TOKENS,
695
+ temperature=0.9,
696
+ top_p=0.95,
697
+ )
698
  quiz_prompt = render_prompt_template(str(mcq_template), pdf_excerpt)
699
+ raw_mcq_json = self._real_generate_text_from_pdf(
700
+ pdf_path,
701
+ quiz_prompt,
702
+ max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS,
703
+ temperature=0.2,
704
+ top_p=0.9,
705
+ )
706
  try:
707
  mcqs = parse_mcq_json(raw_mcq_json)
708
  except json.JSONDecodeError:
 
711
  pdf_path,
712
  retry_prompt,
713
  max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS,
714
+ temperature=0.2,
715
+ top_p=0.9,
716
  )
717
  mcqs = parse_mcq_json(retry_raw)
718
 
 
732
  if self.mock_mode:
733
  lecture_text = self._mock_generate_lecture(pdf_excerpt)
734
  else:
735
+ lecture_prompt = render_prompt_template(
736
+ str(lecture_template),
737
+ pdf_excerpt,
738
+ replacements={"style_seed": uuid.uuid4().hex},
739
+ )
740
+ lecture_text = self._real_generate_text_from_pdf(
741
+ pdf_path,
742
+ lecture_prompt,
743
+ max_tokens=QWEN_VL_MAX_NEW_TOKENS,
744
+ temperature=0.9,
745
+ top_p=0.95,
746
+ )
747
 
748
  return {
749
  "lecture_text": lecture_text,
 
759
 
760
  if self.mock_mode:
761
  mcqs = self._mock_generate_mcqs(pdf_excerpt)
762
+ return rebalance_mcq_answers([asdict(q) for q in mcqs])
763
 
764
  quiz_prompt = render_prompt_template(str(mcq_template), pdf_excerpt)
765
+ raw_mcq_json = self._real_generate_text_from_pdf(
766
+ pdf_path,
767
+ quiz_prompt,
768
+ max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS,
769
+ temperature=0.2,
770
+ top_p=0.9,
771
+ )
772
  try:
773
  mcqs = parse_mcq_json(raw_mcq_json)
774
  except json.JSONDecodeError:
 
777
  pdf_path,
778
  retry_prompt,
779
  max_tokens=QWEN_VL_MCQ_MAX_NEW_TOKENS,
780
+ temperature=0.2,
781
+ top_p=0.9,
782
  )
783
  mcqs = parse_mcq_json(retry_raw)
784
+ return rebalance_mcq_answers([asdict(q) for q in mcqs])
785
 
786
  @spaces.GPU
787
  def synthesize_tts(self, text: str, name_prefix: str = "audio") -> str:
 
807
  parsed: List[MCQItem] = []
808
  for item in questions[:5]:
809
  q = str(item.get("question", "")).strip()
810
+ options = [normalize_option_text(x) for x in item.get("options", [])][:4]
811
  answer = str(item.get("answer", "")).strip().upper()
812
  explanation = str(item.get("explanation", "")).strip()
813
  if len(options) != 4:
 
822
  return parsed
823
 
824
 
825
+ def rebalance_mcq_answers(mcqs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
826
+ labels = ["A", "B", "C", "D"]
827
+ n = min(5, len(mcqs))
828
+ rng = random.Random(uuid.uuid4().int)
829
+ targets = labels[:]
830
+ rng.shuffle(targets)
831
+ while len(targets) < n:
832
+ targets.append(rng.choice(labels))
833
+ out: List[Dict[str, Any]] = []
834
+ for i, q in enumerate(mcqs[:n]):
835
+ opts = list(q.get("options", []) or [])
836
+ ans = str(q.get("answer", "")).strip().upper()
837
+ if len(opts) != 4 or ans not in {"A", "B", "C", "D"}:
838
+ out.append(q)
839
+ continue
840
+ correct_idx = labels.index(ans)
841
+ correct_opt = opts[correct_idx]
842
+ distractors = [opts[j] for j in range(4) if j != correct_idx]
843
+ target_idx = labels.index(targets[i])
844
+ new_opts: List[str] = []
845
+ d_i = 0
846
+ for j in range(4):
847
+ if j == target_idx:
848
+ new_opts.append(correct_opt)
849
+ else:
850
+ new_opts.append(distractors[d_i])
851
+ d_i += 1
852
+ q2 = dict(q)
853
+ q2["options"] = new_opts
854
+ q2["answer"] = labels[target_idx]
855
+ out.append(q2)
856
+ return out
857
+
858
+
859
  engine = QwenPipelineEngine()
860
 
861
 
 
884
  if mcq is None:
885
  return []
886
  labels = ["A", "B", "C", "D"]
887
+ return [f"{labels[i]}. {normalize_option_text(opt)}" for i, opt in enumerate(mcq["options"])]
888
 
889
 
890
  def score_text(state: Dict[str, Any]) -> str:
 
892
  return f"Score: {state.get('score', 0)} / {total}"
893
 
894
 
895
+ def _exam_chat_text_for_question(state: Dict[str, Any], mcq: Dict[str, Any]) -> str:
896
+ qn = state.get("current_index", 0) + 1
897
+ total = len(state.get("mcqs", []))
898
+ labels = ["A", "B", "C", "D"]
899
+ options = mcq.get("options", [])
900
+ lines = [f"Question {qn}/{total}", str(mcq.get("question", "")).strip(), ""]
901
+ for i in range(min(4, len(options))):
902
+ lines.append(f"{labels[i]}. {normalize_option_text(options[i])}")
903
+ return "\n".join([x for x in lines if x is not None]).strip()
904
+
905
+
906
+ def _ensure_current_question_in_exam_chat(state: Dict[str, Any]) -> None:
907
+ if not state.get("mcqs") or state.get("completed"):
908
+ return
909
+ chat: List[Dict[str, Any]] = state.setdefault("exam_chat", [])
910
+ q_index = int(state.get("current_index", 0))
911
+ for msg in reversed(chat):
912
+ if msg.get("kind") == "mcq":
913
+ if int(msg.get("q_index", -1)) == q_index:
914
+ return
915
+ break
916
+ mcq = get_current_mcq(state)
917
+ if mcq is None:
918
+ return
919
+ chat.append({"role": "assistant", "kind": "mcq", "q_index": q_index, "text": _exam_chat_text_for_question(state, mcq)})
920
+
921
+
922
+ def _append_exam_user_answer(state: Dict[str, Any], choice: str) -> None:
923
+ chat: List[Dict[str, Any]] = state.setdefault("exam_chat", [])
924
+ q_index = int(state.get("current_index", 0))
925
+ display = choice
926
+ if "." in choice:
927
+ _, rest = choice.split(".", 1)
928
+ if rest.strip():
929
+ display = rest.strip()
930
+ chat.append({"role": "user", "kind": "answer", "q_index": q_index, "text": display})
931
+
932
+
933
+ def _append_exam_assistant_text(state: Dict[str, Any], text: str, *, kind: str = "note") -> None:
934
+ chat: List[Dict[str, Any]] = state.setdefault("exam_chat", [])
935
+ q_index = int(state.get("current_index", 0))
936
+ chat.append({"role": "assistant", "kind": kind, "q_index": q_index, "text": text})
937
+
938
+
939
+ def _score_band(score: int, total: int) -> str:
940
+ if total <= 0:
941
+ return "none"
942
+ ratio = score / total
943
+ if ratio >= 0.9:
944
+ return "excellent"
945
+ if ratio >= 0.7:
946
+ return "good"
947
+ if ratio >= 0.5:
948
+ return "fair"
949
+ return "poor"
950
+
951
+
952
+ def _pick_variant(items: List[str], seed: int) -> str:
953
+ if not items:
954
+ return ""
955
+ return items[seed % len(items)]
956
+
957
+
958
+ def _examiner_style_prompt(character_id: str) -> str:
959
+ cid = (character_id or "").lower()
960
+ guardrails = (
961
+ "Do NOT invent magical facts. "
962
+ "You may use light Hogwarts classroom metaphors (e.g., 'like your Potions class' / marks / exam / detention), "
963
+ "but do NOT mention spells, incantations, wands, named artifacts, named potions, or made-up magical theory. "
964
+ "Do NOT mention Harry Potter or any specific character names. "
965
+ "Keep remarks grounded in the student's performance on this paper/exam."
966
+ )
967
+ if "snape" in cid:
968
+ return (
969
+ "You are Professor Severus Snape. Cold, cutting, impatient with sloppy thinking; no emojis; no stage directions. "
970
+ "Sound like Snape: sharp disdain, controlled cruelty, short jabs, second-person address ('you'). "
971
+ "Avoid generic academic HR language like 'fundamental lack of precision' or 'theoretical principles'. "
972
+ f"{guardrails} Be brief and exam-focused."
973
+ )
974
+ if "mcgonagall" in cid or "mcg" in cid:
975
+ return (
976
+ "You are Professor Minerva McGonagall. Crisp, strict, decisive; no emojis; no stage directions. "
977
+ "Sound like McGonagall: brisk, no-nonsense, disciplined; firm standards with a controlled, teacherly tone; second-person address ('you'). "
978
+ "Be less harsh than Snape: no contempt, no insults; correct firmly and encourage disciplined improvement. "
979
+ "Avoid academic report phrasing (e.g., 'demonstrates', 'fundamental', 'theoretical', 'principles', 'application'). "
980
+ "Prefer plain classroom language: 'That will not do', 'Pay attention', 'Be precise', 'Again', 'Good—carry on'. "
981
+ "Be pointed, practical, and supportive when appropriate. "
982
+ f"{guardrails} Be brief, firm, and exam-focused."
983
+ )
984
+ return (
985
+ "You are a strict examiner. Be brief, precise, and exam-focused. "
986
+ f"{guardrails} No emojis."
987
+ )
988
+
989
+
990
+ def _llm_exam_feedback(messages: List[Dict[str, Any]], *, max_tokens: int = 120) -> str:
991
+ engine.ensure_vl_loaded()
992
+ return engine._chat_completions(messages, max_tokens=max_tokens, temperature=0.9, top_p=0.95)
993
+
994
+
995
+ def _llm_short_exam_remark(character_id: str, *, kind: str, context: str = "") -> str:
996
+ if engine.mock_mode:
997
+ return ""
998
+ ctx = " ".join(str(context or "").strip().split())
999
+ if ctx:
1000
+ ctx = f"Context: {ctx}\n"
1001
+ style_seed = uuid.uuid4().hex
1002
+ cid = (character_id or "").lower()
1003
+ if kind == "correct":
1004
+ if "snape" in cid:
1005
+ openers = ["Correct.", "Precisely.", "Good.", "Exactly.", "That's right.", "Adequate.", "Very well."]
1006
+ elif "mcgonagall" in cid or "mcg" in cid:
1007
+ openers = ["Good.", "Correct.", "Quite right.", "That's right.", "That's better.", "Well done.", "Exactly."]
1008
+ else:
1009
+ openers = ["That's right.", "That's correct.", "Correct.", "Exactly.", "Good.", "Well done."]
1010
+ instruction = (
1011
+ f"{ctx}"
1012
+ f"Style seed (do not repeat it): {style_seed}\n"
1013
+ "Write ONE short, in-character sentence reacting to a correct answer. "
1014
+ "Start the sentence with ONE of these openers exactly: "
1015
+ + ", ".join([f"'{o}'" for o in openers])
1016
+ + ". "
1017
+ "Choose the opener to maximize variety across runs. "
1018
+ "Max 16 words. No markdown. No emojis. "
1019
+ "Make it sound like the character, not a generic professor."
1020
+ )
1021
+ elif kind == "incorrect":
1022
+ if "snape" in cid:
1023
+ openers = ["Wrong!", "Wrong.", "No.", "Obviously not.", "Incorrect.", "Not even close."]
1024
+ elif "mcgonagall" in cid or "mcg" in cid:
1025
+ openers = ["No.", "Not quite.", "That will not do.", "Incorrect.", "Careful.", "Stop guessing."]
1026
+ else:
1027
+ openers = ["Wrong!", "Wrong.", "Not quite.", "No.", "Incorrect.", "That's wrong."]
1028
+ instruction = (
1029
+ f"{ctx}"
1030
+ f"Style seed (do not repeat it): {style_seed}\n"
1031
+ "Write ONE short, in-character sentence reacting to an incorrect answer. "
1032
+ "Start the sentence with ONE of these openers exactly: "
1033
+ + ", ".join([f"'{o}'" for o in openers])
1034
+ + ". "
1035
+ "Choose the opener to maximize variety across runs. "
1036
+ "Do NOT mention the correct option letter. "
1037
+ "Do NOT include the phrase 'The correct answer is'. "
1038
+ "Max 20 words. No markdown. No emojis. "
1039
+ "Make it sound like the character, not a generic professor."
1040
+ )
1041
+ else:
1042
+ instruction = (
1043
+ f"{ctx}"
1044
+ f"Style seed (do not repeat it): {style_seed}\n"
1045
+ "Write 1–2 short, in-character sentences as a final examiner remark, with ONE concrete revision instruction. "
1046
+ "Max 28 words total. No markdown. No emojis. "
1047
+ "Do not sound like a generic academic report."
1048
+ )
1049
+ text = _llm_exam_feedback(
1050
+ [
1051
+ {"role": "system", "content": _examiner_style_prompt(character_id)},
1052
+ {"role": "user", "content": instruction},
1053
+ ],
1054
+ max_tokens=80 if kind in {"correct", "incorrect"} else 120,
1055
+ )
1056
+ return " ".join(str(text or "").strip().split())
1057
+
1058
+
1059
+ def exam_feedback_correct(character_id: str, *, q_index: int) -> str:
1060
+ if engine.mock_mode:
1061
+ cid = (character_id or "").lower()
1062
+ if "snape" in cid:
1063
+ return _pick_variant(
1064
+ [
1065
+ "That's right. Try not to look so astonished; it is unbecoming.",
1066
+ "Correct. At least you read something other than the title.",
1067
+ "Precisely. Keep up—this is not a guessing game.",
1068
+ "Good. Acceptable. Proceed before you ruin it.",
1069
+ ],
1070
+ q_index,
1071
+ )
1072
+ if "mcgonagall" in cid or "mcg" in cid:
1073
+ return _pick_variant(
1074
+ [
1075
+ "That's correct. Good—do not get complacent.",
1076
+ "Good. Sensible. Keep your notes straight and move on.",
1077
+ "Well done. Stay focused; the next will not be kinder.",
1078
+ "Precisely. Continue, and keep the standard.",
1079
+ ],
1080
+ q_index,
1081
+ )
1082
+ return "That's right."
1083
+ try:
1084
+ remark = _llm_short_exam_remark(
1085
+ character_id,
1086
+ kind="correct",
1087
+ context=f"Question {q_index + 1} answered correctly.",
1088
+ )
1089
+ if remark:
1090
+ return remark
1091
+ except Exception:
1092
+ pass
1093
+ return "That's right."
1094
+
1095
+
1096
+ def exam_feedback_incorrect(
1097
+ character_id: str,
1098
+ *,
1099
+ q_index: int,
1100
+ correct_choice_display: str,
1101
+ explanation: str,
1102
+ ) -> str:
1103
+ explanation = normalize_explanation_text(explanation)
1104
+ if engine.mock_mode:
1105
+ cid = (character_id or "").lower()
1106
+ if "snape" in cid:
1107
+ opener = _pick_variant(
1108
+ [
1109
+ "Wrong! Listen carefully—if you are capable of it.",
1110
+ "Incorrect. Pay attention; your confidence is not evidence.",
1111
+ "Wrong. This is what careless reading looks like in public.",
1112
+ "Incorrect. I expected better discipline. Clearly, that was optimistic.",
1113
+ ],
1114
+ q_index,
1115
+ )
1116
+ return f"{opener}\nThe correct answer is {correct_choice_display}\n\n{explanation}"
1117
+ if "mcgonagall" in cid or "mcg" in cid:
1118
+ opener = _pick_variant(
1119
+ [
1120
+ "Incorrect. Think it through properly—do not guess.",
1121
+ "Not quite. Slow down and read precisely; words matter.",
1122
+ "Incorrect. You are guessing—stop it at once.",
1123
+ "Not correct. Focus on the method, not the surface wording.",
1124
+ ],
1125
+ q_index,
1126
+ )
1127
+ return f"{opener}\nThe correct answer is {correct_choice_display}\n\n{explanation}"
1128
+ return f"Incorrect.\nThe correct answer is {correct_choice_display}\n\n{explanation}"
1129
+ try:
1130
+ remark = _llm_short_exam_remark(
1131
+ character_id,
1132
+ kind="incorrect",
1133
+ context=f"Question {q_index + 1} answered incorrectly.",
1134
+ )
1135
+ if remark:
1136
+ return f"{remark}\nThe correct answer is {correct_choice_display}\n\n{explanation}"
1137
+ except Exception:
1138
+ pass
1139
+ return f"Incorrect.\nThe correct answer is {correct_choice_display}\n\n{explanation}"
1140
+
1141
+
1142
+ def exam_feedback_final(character_id: str, *, score: int, total: int) -> str:
1143
+ if engine.mock_mode:
1144
+ cid = (character_id or "").lower()
1145
+ band = _score_band(score, total)
1146
+ if "snape" in cid:
1147
+ mapping = {
1148
+ "excellent": "Excellent. For once, you have not wasted my time.",
1149
+ "good": "Adequate. Do not mistake adequacy for insight.",
1150
+ "fair": "Mediocre. You have work to do—start now, not later.",
1151
+ "poor": "Disappointing. Guesswork is not scholarship; it is laziness.",
1152
+ "none": "No score to judge—how convenient for you.",
1153
+ }
1154
+ return mapping.get(band, "Enough.")
1155
+ if "mcgonagall" in cid or "mcg" in cid:
1156
+ mapping = {
1157
+ "excellent": "Excellent work. That is the standard I expect—keep it there.",
1158
+ "good": "Good. Solid understanding—polish the details and stop rushing.",
1159
+ "fair": "Passable, but uneven. Review the method carefully and be exact.",
1160
+ "poor": "Not acceptable. Go back and study properly, then try again.",
1161
+ "none": "No score to judge—start when you are ready to work seriously.",
1162
+ }
1163
+ return mapping.get(band, "Well.")
1164
+ return f"Final score: {score} / {total}."
1165
+ try:
1166
+ remark = _llm_short_exam_remark(
1167
+ character_id,
1168
+ kind="final",
1169
+ context=f"Final score: {score} / {total}.",
1170
+ )
1171
+ if remark:
1172
+ return remark
1173
+ except Exception:
1174
+ pass
1175
+ return f"Final score: {score} / {total}."
1176
+
1177
+
1178
+ def _roleplay_explain_feedback(character_id: str) -> str:
1179
+ cid = (character_id or "").lower()
1180
+ if "snape" in cid:
1181
+ return "Lecture is ready. If you insist, press ‘Play Lecture Audio’; then go to the exam and try not to disgrace yourself."
1182
+ if "mcgonagall" in cid or "mcg" in cid:
1183
+ return "Lecture is ready. Review it properly, then go to the exam when you are prepared to be examined."
1184
+ return "Lecture is ready. Review it, then go to the exam when you are ready."
1185
+
1186
+
1187
+ def _roleplay_loading_text(character_id: str, *, phase: str) -> str:
1188
+ cfg = get_character_config(character_id)
1189
+ name = str(cfg.get("display_name", "Professor"))
1190
+ cid = (character_id or "").lower()
1191
+ if phase == "lecture":
1192
+ if "snape" in cid:
1193
+ return f"Professor {name} is scrutinizing your paper…"
1194
+ if "mcgonagall" in cid or "mcg" in cid:
1195
+ return f"Professor {name} is reviewing your paper with unforgiving precision…"
1196
+ return f"Professor {name} is reviewing your paper…"
1197
+ if "snape" in cid:
1198
+ return f"Professor {name} is preparing something unpleasantly rigorous…"
1199
+ if "mcgonagall" in cid or "mcg" in cid:
1200
+ return f"Professor {name} is preparing a properly challenging set of questions…"
1201
+ return f"Professor {name} is preparing your materials…"
1202
+
1203
+
1204
+ def build_loading_html(text: str) -> str:
1205
+ safe = html.escape(str(text or ""), quote=False)
1206
+ if not safe:
1207
+ return ""
1208
+ return f"""
1209
+ <div class="gen-loading-inner">
1210
+ <div class="loader"></div>
1211
+ <div class="gen-loading-text">{safe}</div>
1212
+ </div>
1213
+ """.strip()
1214
+
1215
+
1216
+ def _build_exam_chat_avatar_html(character_id: Optional[str]) -> str:
1217
+ cfg = get_character_config(character_id)
1218
+ avatar_url = _image_data_url(Path(cfg.get("avatar_path", ""))) if cfg.get("avatar_path") else ""
1219
+ return f'<img class="exam-chat-avatar" src="{avatar_url}" alt="avatar" />' if avatar_url else ""
1220
+
1221
+
1222
+ def build_exam_chat_html(state: Dict[str, Any]) -> str:
1223
+ chat: List[Dict[str, Any]] = state.get("exam_chat", []) or []
1224
+ if not chat and state.get("mcqs") and not state.get("completed"):
1225
+ mcq = get_current_mcq(state)
1226
+ if mcq is not None:
1227
+ chat = [{"role": "assistant", "kind": "mcq", "q_index": int(state.get("current_index", 0)), "text": _exam_chat_text_for_question(state, mcq)}]
1228
+
1229
+ character_id = state.get("exam_character_id") or DEFAULT_CHARACTER_ID
1230
+ avatar_html = _build_exam_chat_avatar_html(character_id)
1231
+
1232
+ parts: List[str] = ['<div class="exam-chat-wrap">']
1233
+ for msg in chat:
1234
+ role = msg.get("role", "assistant")
1235
+ safe = html.escape(str(msg.get("text", "")), quote=False).replace("\n", "<br>")
1236
+ if role == "user":
1237
+ parts.append(f'<div class="exam-msg user"><div class="bubble user">{safe}</div></div>')
1238
+ else:
1239
+ parts.append(f'<div class="exam-msg assistant">{avatar_html}<div class="bubble assistant">{safe}</div></div>')
1240
+ parts.append("</div>")
1241
+ return "".join(parts)
1242
+
1243
+
1244
  def reset_ui_from_state(
1245
  state: Dict[str, Any],
1246
  feedback: str = "",
1247
  *,
1248
  results_visible: bool = True,
1249
  loading_visible: bool = False,
1250
+ loading_text: str = "",
1251
+ exam_picker_visible: bool = False,
1252
  ):
1253
  quiz_ready = bool(state.get("mcqs"))
1254
  current_page = state.get("current_page", "explain")
1255
+ explain_character_id = state.get("character_id") or DEFAULT_CHARACTER_ID
1256
+ exam_character_id = state.get("exam_character_id") or explain_character_id
1257
+ top_character_id = exam_character_id if current_page == "exam" else explain_character_id
1258
+ top_picker_value = top_character_id
1259
  show_explain_page = results_visible and current_page != "exam"
1260
  show_exam_page = results_visible and current_page == "exam"
 
1261
  submit_interactive = quiz_ready and not state.get("completed", False)
1262
+ radio_interactive = submit_interactive
1263
  lecture_tts_ready = bool(state.get("lecture_text"))
 
1264
  if state.get("completed"):
 
1265
  radio_interactive = False
1266
  return (
1267
  state,
1268
+ build_character_header_html(top_character_id),
1269
+ gr.update(value=top_picker_value),
1270
+ build_chat_avatar_html(top_character_id),
1271
+ build_chat_meta_html(top_character_id),
1272
+ gr.update(value=build_loading_html(loading_text), visible=loading_visible),
1273
  gr.update(visible=show_explain_page),
1274
  gr.update(visible=show_exam_page),
 
1275
  state.get("status", "Idle"),
1276
  state.get("lecture_text", ""),
1277
  state.get("lecture_audio_path", None),
1278
  gr.update(interactive=lecture_tts_ready),
1279
  gr.update(visible=lecture_tts_ready, interactive=lecture_tts_ready),
1280
+ gr.update(visible=exam_picker_visible),
1281
+ gr.update(value=build_exam_chat_html(state), visible=show_exam_page and (quiz_ready or bool(state.get("exam_chat")))),
1282
  gr.update(choices=current_choices(state), value=None, interactive=radio_interactive),
1283
  score_text(state),
1284
  feedback,
 
 
 
1285
  gr.update(interactive=submit_interactive),
1286
  gr.update(interactive=quiz_ready),
1287
  )
 
1301
  feedback="正在读取论文并生成讲解与题目,请稍候...",
1302
  results_visible=False,
1303
  loading_visible=True,
1304
+ loading_text=_roleplay_loading_text(state.get("character_id") or DEFAULT_CHARACTER_ID, phase="lecture"),
1305
  )
1306
  try:
1307
  result = engine.build_lecture(pdf_file, get_character_config(state["character_id"]))
 
1323
  state["status"] = "Lecture generated."
1324
  yield reset_ui_from_state(
1325
  state,
1326
+ feedback=_roleplay_explain_feedback(state.get("character_id") or DEFAULT_CHARACTER_ID),
1327
  results_visible=True,
1328
  loading_visible=False,
1329
  )
 
1345
  return reset_ui_from_state(state, feedback="Load a PDF first.")
1346
  if state.get("completed"):
1347
  return reset_ui_from_state(state, feedback="Quiz already completed.")
 
 
1348
  if not choice:
1349
  return reset_ui_from_state(state, feedback="Please select an option.")
1350
 
 
1353
  state["status"] = "No current question."
1354
  return reset_ui_from_state(state, feedback="No current question.")
1355
 
1356
+ _ensure_current_question_in_exam_chat(state)
1357
+ _append_exam_user_answer(state, choice)
1358
+
1359
  selected_label = choice.split(".", 1)[0].strip().upper()
1360
  correct_label = str(mcq["answer"]).upper()
1361
+ exam_character_id = state.get("exam_character_id") or state.get("character_id") or DEFAULT_CHARACTER_ID
1362
+ q_index = int(state.get("current_index", 0))
1363
 
1364
  if selected_label == correct_label:
1365
  state["score"] += 1
1366
  state["last_explanation_tts_text"] = ""
1367
  state["explanation_audio_path"] = None
1368
+ state["awaiting_next_after_wrong"] = False
1369
+ correct_text = exam_feedback_correct(str(exam_character_id), q_index=q_index)
1370
+ state["status"] = correct_text
1371
  if state["current_index"] >= len(state["mcqs"]) - 1:
1372
  state["completed"] = True
1373
  state["status"] = "Quiz completed."
1374
+ total = len(state.get("mcqs") or [])
1375
+ final_comment = exam_feedback_final(str(exam_character_id), score=int(state.get("score", 0)), total=total)
1376
+ _append_exam_assistant_text(
1377
+ state,
1378
+ f"Quiz finished.\nFinal score: {state['score']} / {len(state['mcqs'])}.\n{final_comment}",
1379
+ kind="summary",
1380
+ )
1381
  return reset_ui_from_state(
1382
  state,
1383
+ feedback="",
1384
  )
1385
 
1386
+ _append_exam_assistant_text(state, correct_text, kind="result")
1387
  state["current_index"] += 1
1388
+ _ensure_current_question_in_exam_chat(state)
1389
+ return reset_ui_from_state(state, feedback="")
1390
 
1391
  correct_idx = ["A", "B", "C", "D"].index(correct_label)
1392
  correct_choice_display = f"{correct_label}. {mcq['options'][correct_idx]}"
1393
+ explanation = normalize_explanation_text(mcq.get("explanation", ""))
1394
+ state["last_explanation_tts_text"] = ""
1395
  state["explanation_audio_path"] = None
1396
+ state["awaiting_next_after_wrong"] = False
1397
+ incorrect_text = exam_feedback_incorrect(
1398
+ str(exam_character_id),
1399
+ q_index=q_index,
1400
+ correct_choice_display=str(correct_choice_display),
1401
+ explanation=str(explanation or "").strip(),
1402
  )
1403
+ state["status"] = incorrect_text.splitlines()[0] if incorrect_text else "Incorrect."
1404
+ _append_exam_assistant_text(state, incorrect_text or "Incorrect.", kind="explanation" if explanation else "result")
 
 
 
 
 
 
 
 
 
1405
  if state["current_index"] >= len(state["mcqs"]) - 1:
1406
  state["completed"] = True
 
 
 
1407
  state["status"] = "Quiz completed."
1408
+ total = len(state.get("mcqs") or [])
1409
+ final_comment = exam_feedback_final(str(exam_character_id), score=int(state.get("score", 0)), total=total)
1410
+ _append_exam_assistant_text(
1411
  state,
1412
+ f"Quiz finished.\nFinal score: {state['score']} / {len(state['mcqs'])}.\n{final_comment}",
1413
+ kind="summary",
1414
  )
1415
+ return reset_ui_from_state(state, feedback="")
1416
  state["current_index"] += 1
1417
+ _ensure_current_question_in_exam_chat(state)
1418
+ return reset_ui_from_state(state, feedback="")
 
 
 
1419
 
1420
 
1421
  def restart_quiz(state: Dict[str, Any]):
 
1427
  state["completed"] = False
1428
  state["last_explanation_tts_text"] = ""
1429
  state["explanation_audio_path"] = None
1430
+ state["exam_chat"] = []
1431
+ _ensure_current_question_in_exam_chat(state)
1432
  state["status"] = "Quiz restarted."
1433
  return reset_ui_from_state(state, feedback="Quiz restarted.")
1434
 
1435
 
1436
+ def open_exam_picker(state: Dict[str, Any]):
1437
+ if not state.get("lecture_text"):
1438
+ state["status"] = "No lecture loaded."
1439
+ return reset_ui_from_state(state, feedback="Generate lecture first.", results_visible=False, loading_visible=False)
1440
+ state["status"] = "Choose an examiner."
1441
+ state["current_page"] = "explain"
1442
+ return reset_ui_from_state(state, feedback="", results_visible=True, loading_visible=False, exam_picker_visible=True)
1443
+
1444
+
1445
+ def close_exam_picker(state: Dict[str, Any]):
1446
+ return reset_ui_from_state(state, feedback="")
1447
+
1448
+
1449
+ def start_exam_mcgonagall(state: Dict[str, Any]):
1450
+ yield from generate_exam_mcq("Mcgonagall", state)
1451
+
1452
+
1453
+ def start_exam_snape(state: Dict[str, Any]):
1454
+ yield from generate_exam_mcq("snape", state)
1455
+
1456
+
1457
+ def start_exam(state: Dict[str, Any]):
1458
  if not state.get("lecture_text"):
1459
  state["status"] = "No lecture loaded."
1460
  yield reset_ui_from_state(state, feedback="Generate lecture first.", results_visible=False, loading_visible=False)
1461
  return
1462
+ character_id = state.get("character_id") or DEFAULT_CHARACTER_ID
1463
+ yield from generate_exam_mcq(character_id, state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464
 
1465
 
1466
  def generate_exam_mcq(selected_character_id: Optional[str], state: Dict[str, Any]):
 
1469
  yield reset_ui_from_state(state, feedback="Generate lecture first.", results_visible=False, loading_visible=False)
1470
  return
1471
  if not selected_character_id:
1472
+ state["status"] = "Please select a character to generate MCQs."
1473
  yield reset_ui_from_state(state, feedback="", results_visible=True, loading_visible=False)
1474
  return
1475
 
1476
  state["current_page"] = "exam"
1477
  state["exam_character_id"] = selected_character_id
1478
+ cfg = get_character_config(selected_character_id)
1479
+ display_name = str(cfg.get("display_name", "Professor"))
1480
+ state["status"] = f"{display_name} is preparing your exam..."
1481
+ state["mcq_generating"] = True
1482
  state["last_explanation_tts_text"] = ""
1483
  state["explanation_audio_path"] = None
1484
  state["mcqs"] = []
1485
+ state["exam_chat"] = []
1486
  yield reset_ui_from_state(
1487
  state,
1488
  feedback="",
1489
+ results_visible=True,
1490
  loading_visible=True,
1491
+ loading_text=_roleplay_loading_text(selected_character_id, phase="exam"),
1492
  )
1493
 
1494
  try:
 
1503
  state["awaiting_next_after_wrong"] = False
1504
  state["completed"] = False
1505
  state["current_page"] = "exam"
1506
+ state["mcq_generating"] = False
1507
+ _ensure_current_question_in_exam_chat(state)
1508
+ state["status"] = "Your exam is prepared."
1509
  yield reset_ui_from_state(
1510
  state,
1511
  feedback="",
 
1514
  )
1515
  except Exception as exc:
1516
  state["current_page"] = "exam"
1517
+ state["mcq_generating"] = False
1518
  state["status"] = "Failed during MCQ generation."
1519
+ _append_exam_assistant_text(
1520
+ state,
1521
+ f"Failed to generate the exam.\nError: {type(exc).__name__}: {exc}",
1522
+ kind="note",
1523
+ )
1524
  yield reset_ui_from_state(
1525
  state,
1526
+ feedback="",
1527
  results_visible=True,
1528
  loading_visible=False,
1529
  )
1530
 
1531
 
1532
+ def on_generate_click(pdf_file: Optional[str], explain_character_id: str, state: Dict[str, Any]):
 
 
 
 
1533
  yield from process_pdf(pdf_file, explain_character_id, state)
1534
 
1535
 
 
1540
 
1541
  def on_character_change(character_id: str, state: Dict[str, Any]):
1542
  cfg = get_character_config(character_id)
1543
+ if state.get("current_page") == "exam":
1544
+ state["exam_character_id"] = cfg["id"]
1545
+ loading_on = bool(state.get("mcq_generating"))
1546
+ loading_text = _roleplay_loading_text(cfg["id"], phase="exam") if loading_on else ""
1547
+ return (
1548
+ state,
1549
+ build_character_header_html(cfg["id"]),
1550
+ build_chat_avatar_html(cfg["id"]),
1551
+ build_chat_meta_html(cfg["id"]),
1552
+ gr.update(visible=False),
1553
+ gr.update(visible=True),
1554
+ gr.update(value=build_loading_html(loading_text), visible=loading_on),
1555
+ state.get("status", "Exam"),
1556
+ )
1557
  state["character_id"] = cfg["id"]
1558
  state["current_page"] = "explain"
1559
  state["lecture_audio_path"] = None
 
1567
  build_chat_meta_html(cfg["id"]),
1568
  gr.update(visible=False),
1569
  gr.update(visible=False),
1570
+ gr.update(value="", visible=False),
1571
  "Character switched. Upload PDF and click Generate.",
1572
  )
1573
 
1574
 
 
 
 
 
 
 
1575
  def play_lecture_audio(state: Dict[str, Any]):
1576
  if not state.get("lecture_text"):
1577
  state["status"] = "No lecture text available."
 
1781
  gap: 0.35rem !important;
1782
  flex-wrap: wrap !important;
1783
  justify-content: center !important;
1784
+ align-items: center !important;
1785
  }}
1786
  #character-select-wrap label {{
1787
  background: transparent !important;
1788
  border: 1px solid rgba(255,255,255,0.14) !important;
1789
  border-radius: 999px !important;
1790
+ padding: 0 !important;
1791
+ min-height: 42px !important;
1792
+ height: 42px !important;
1793
+ display: inline-flex !important;
1794
+ align-items: center !important;
1795
+ justify-content: center !important;
1796
+ line-height: 1 !important;
1797
  }}
1798
  #character-select-wrap label span {{
1799
  color: rgba(240,243,250,0.78) !important;
1800
  font-size: 0.88rem !important;
1801
+ display: inline-flex !important;
1802
+ align-items: center !important;
1803
+ justify-content: center !important;
1804
+ height: 100% !important;
1805
+ padding: 0 0.8rem !important;
1806
+ line-height: 1 !important;
1807
+ text-align: center !important;
1808
  }}
1809
  #character-select-wrap input[type="radio"] {{
1810
  display: none !important;
 
1826
  border-radius: 12px;
1827
  backdrop-filter: blur(3px);
1828
  }}
1829
+ .gen-loading-inner {{
1830
+ display: flex;
1831
+ flex-direction: column;
1832
+ align-items: center;
1833
+ gap: 10px;
1834
+ }}
1835
+ .loader {{
1836
+ width: 120px;
1837
+ height: 20px;
1838
+ border-radius: 20px;
1839
+ background: linear-gradient(#f97316 0 0) 0/0% no-repeat #93c5fd;
1840
+ animation: l2 2s infinite steps(10);
1841
+ }}
1842
+ @keyframes l2 {{
1843
+ 100% {{ background-size: 110%; }}
1844
+ }}
1845
+ .gradio-container [data-testid="progress-bar"],
1846
+ .gradio-container [data-testid="progress-bar"] *,
1847
+ .gradio-container .progress-bar,
1848
+ .gradio-container .progress-bar-container,
1849
+ .gradio-container .progress-bar-wrap,
1850
+ .gradio-container .top-progress,
1851
+ .gradio-container .progress {{
1852
+ display: none !important;
1853
+ }}
1854
  #results-panel {{
1855
  background: transparent !important;
1856
  border: none !important;
 
1898
  border-radius: 20px !important;
1899
  padding: 0.35rem 0.45rem !important;
1900
  }}
1901
+ #lecture-wrap textarea,
1902
+ #lecture-wrap .prose {{
1903
  font-style: italic;
1904
  line-height: 1.45 !important;
1905
  color: rgba(244,246,251,0.95) !important;
 
1922
  transform: translateX(-50%);
1923
  bottom: 18px;
1924
  width: min(860px, calc(100vw - 28px));
1925
+ z-index: 40;
1926
  background: rgba(24, 26, 34, 0.88);
1927
  border: 1px solid rgba(255,255,255,0.08);
1928
  border-radius: 999px;
1929
  box-shadow: 0 16px 40px rgba(0,0,0,0.22);
1930
  backdrop-filter: blur(10px);
1931
  padding: 8px 10px;
1932
+ align-items: center !important;
1933
+ gap: 10px !important;
1934
  }}
1935
  #bottom-composer .wrap {{
1936
  border: none !important;
 
1944
  border-radius: 999px !important;
1945
  }}
1946
  #generate-btn button {{
1947
+ min-height: 42px !important;
1948
+ height: 42px !important;
1949
  padding: 0 18px !important;
1950
  font-size: 0.9rem !important;
1951
+ line-height: 42px !important;
1952
  min-width: 132px !important;
1953
+ display: inline-flex !important;
1954
+ align-items: center !important;
1955
+ justify-content: center !important;
1956
+ }}
1957
+ #generate-btn .wrap {{
1958
+ min-height: 42px !important;
1959
+ display: flex !important;
1960
+ align-items: center !important;
1961
  }}
1962
  #pdf-uploader {{
1963
  min-height: 42px;
 
1994
  justify-content: space-between;
1995
  align-items: center;
1996
  }}
1997
+ #exam-chat .exam-chat-wrap {{
1998
+ width: 100%;
1999
+ display: flex;
2000
+ flex-direction: column;
2001
+ gap: 10px;
2002
+ padding: 0;
2003
+ border-radius: 0;
2004
+ background: transparent;
2005
+ border: none;
2006
+ max-height: 420px;
2007
+ overflow-y: auto;
2008
+ }}
2009
+ #exam-chat .exam-msg {{
2010
+ display: flex;
2011
+ gap: 10px;
2012
+ align-items: flex-end;
2013
+ }}
2014
+ #exam-chat .exam-msg.user {{
2015
+ justify-content: flex-end;
2016
+ }}
2017
+ #exam-chat .exam-msg.assistant {{
2018
+ justify-content: flex-start;
2019
+ }}
2020
+ #exam-chat .exam-chat-avatar {{
2021
+ width: 34px;
2022
+ height: 34px;
2023
+ border-radius: 999px;
2024
+ object-fit: cover;
2025
+ }}
2026
+ #exam-chat .bubble {{
2027
+ max-width: 82%;
2028
+ padding: 10px 12px;
2029
+ border-radius: 14px;
2030
+ font-size: 0.95rem;
2031
+ line-height: 1.35;
2032
+ white-space: normal;
2033
+ }}
2034
+ #exam-chat .bubble.assistant {{
2035
+ background: rgba(255, 255, 255, 0.10);
2036
+ border: 1px solid rgba(255, 255, 255, 0.14);
2037
+ color: rgba(255, 255, 255, 0.95);
2038
+ }}
2039
+ #exam-chat .bubble.user {{
2040
+ background: rgba(59, 130, 246, 0.22);
2041
+ border: 1px solid rgba(59, 130, 246, 0.28);
2042
+ color: rgba(255, 255, 255, 0.95);
2043
+ }}
2044
+ @media (prefers-color-scheme: light) {{
2045
+ body {{
2046
+ background: linear-gradient(180deg, #f5f7fb 0%, #eef2f8 100%) !important;
2047
+ }}
2048
+ .gradio-container .block,
2049
+ .gradio-container .panel,
2050
+ .gradio-container .gr-box,
2051
+ .gradio-container .gr-form,
2052
+ .gradio-container .gr-group {{
2053
+ background: rgba(255, 255, 255, 0.96) !important;
2054
+ border-color: rgba(15, 23, 42, 0.10) !important;
2055
+ }}
2056
+ .gradio-container textarea,
2057
+ .gradio-container input,
2058
+ .gradio-container label,
2059
+ .gradio-container .prose,
2060
+ .gradio-container .prose p,
2061
+ .gradio-container .prose code,
2062
+ .gradio-container .prose strong {{
2063
+ color: #0f172a !important;
2064
+ }}
2065
+ .gradio-container .prose span,
2066
+ .gradio-container .prose em,
2067
+ .gradio-container .prose li,
2068
+ .gradio-container .prose a,
2069
+ .gradio-container .prose blockquote,
2070
+ .gradio-container .prose h1,
2071
+ .gradio-container .prose h2,
2072
+ .gradio-container .prose h3,
2073
+ .gradio-container .prose h4,
2074
+ .gradio-container .prose h5,
2075
+ .gradio-container .prose h6 {{
2076
+ color: #0f172a !important;
2077
+ opacity: 1 !important;
2078
+ }}
2079
+ #lecture-wrap .prose,
2080
+ #lecture-wrap .prose * {{
2081
+ color: #0f172a !important;
2082
+ opacity: 1 !important;
2083
+ }}
2084
+ #lecture-wrap .prose code,
2085
+ #lecture-wrap .prose pre {{
2086
+ color: #0f172a !important;
2087
+ opacity: 1 !important;
2088
+ }}
2089
+ .char-name {{
2090
+ color: #0f172a !important;
2091
+ }}
2092
+ .char-tag {{
2093
+ color: rgba(15, 23, 42, 0.78) !important;
2094
+ }}
2095
+ .char-byline {{
2096
+ color: rgba(15, 23, 42, 0.58) !important;
2097
+ }}
2098
+ #character-select-wrap label {{
2099
+ border-color: rgba(15, 23, 42, 0.22) !important;
2100
+ background: rgba(255, 255, 255, 0.85) !important;
2101
+ min-height: 42px !important;
2102
+ height: 42px !important;
2103
+ display: inline-flex !important;
2104
+ align-items: center !important;
2105
+ justify-content: center !important;
2106
+ }}
2107
+ #character-select-wrap label span {{
2108
+ color: rgba(15, 23, 42, 0.82) !important;
2109
+ height: 100% !important;
2110
+ display: inline-flex !important;
2111
+ align-items: center !important;
2112
+ justify-content: center !important;
2113
+ text-align: center !important;
2114
+ }}
2115
+ #character-select-wrap label:has(input[type="radio"]:checked) {{
2116
+ background: rgba(15, 23, 42, 0.10) !important;
2117
+ border-color: rgba(15, 23, 42, 0.32) !important;
2118
+ }}
2119
+ #character-select-wrap label:has(input[type="radio"]:checked) span {{
2120
+ color: #0f172a !important;
2121
+ }}
2122
+ #character-select-wrap svg,
2123
+ #character-select-wrap [data-icon] {{
2124
+ color: rgba(15, 23, 42, 0.70) !important;
2125
+ }}
2126
+ #chat-meta {{
2127
+ color: #0f172a !important;
2128
+ background: rgba(255, 255, 255, 0.92) !important;
2129
+ border: 1px solid rgba(15, 23, 42, 0.10) !important;
2130
+ border-radius: 12px !important;
2131
+ padding: 0.45rem 0.7rem !important;
2132
+ }}
2133
+ #chat-meta .pill {{
2134
+ background: rgba(15, 23, 42, 0.10) !important;
2135
+ color: rgba(15, 23, 42, 0.75) !important;
2136
+ }}
2137
+ #lecture-wrap {{
2138
+ background: rgba(255, 255, 255, 0.95) !important;
2139
+ border-color: rgba(15, 23, 42, 0.10) !important;
2140
+ }}
2141
+ #lecture-wrap .wrap,
2142
+ #lecture-wrap .block,
2143
+ #lecture-wrap [data-testid="textbox"] {{
2144
+ background: transparent !important;
2145
+ border: none !important;
2146
+ box-shadow: none !important;
2147
+ }}
2148
+ #lecture-wrap textarea {{
2149
+ background: #ffffff !important;
2150
+ color: #0f172a !important;
2151
+ border: 1px solid rgba(15, 23, 42, 0.16) !important;
2152
+ border-radius: 10px !important;
2153
+ }}
2154
+ #gen-loading {{
2155
+ color: #0f172a !important;
2156
+ background: rgba(255, 255, 255, 0.90) !important;
2157
+ border-color: rgba(15, 23, 42, 0.14) !important;
2158
+ }}
2159
+ #gen-loading,
2160
+ #gen-loading *,
2161
+ #gen-loading p,
2162
+ #gen-loading span {{
2163
+ color: #111827 !important;
2164
+ opacity: 1 !important;
2165
+ }}
2166
+ #bottom-composer {{
2167
+ background: rgba(255, 255, 255, 0.94) !important;
2168
+ border-color: rgba(15, 23, 42, 0.14) !important;
2169
+ box-shadow: 0 16px 40px rgba(15, 23, 42, 0.16) !important;
2170
+ }}
2171
+ #pdf-uploader [data-testid="file-upload-dropzone"] {{
2172
+ border-color: rgba(15, 23, 42, 0.20) !important;
2173
+ }}
2174
+ #pdf-uploader [data-testid="file-upload-dropzone"] * {{
2175
+ color: #0f172a !important;
2176
+ }}
2177
+ #status-wrap, #quiz-wrap, #tts-wrap, #explain-wrap {{
2178
+ background: #ffffff !important;
2179
+ border: 1px solid rgba(15, 23, 42, 0.10) !important;
2180
+ box-shadow: 0 6px 18px rgba(15, 23, 42, 0.06) !important;
2181
+ }}
2182
+ #status-wrap .block,
2183
+ #quiz-wrap .block,
2184
+ #tts-wrap .block,
2185
+ #explain-wrap .block,
2186
+ #status-wrap .wrap,
2187
+ #quiz-wrap .wrap,
2188
+ #tts-wrap .wrap,
2189
+ #explain-wrap .wrap {{
2190
+ background: #ffffff !important;
2191
+ border-color: rgba(15, 23, 42, 0.10) !important;
2192
+ box-shadow: none !important;
2193
+ }}
2194
+ #status-wrap textarea,
2195
+ #quiz-wrap textarea,
2196
+ #explain-wrap textarea,
2197
+ #quiz-wrap input,
2198
+ #status-wrap input,
2199
+ #explain-wrap input {{
2200
+ background: #ffffff !important;
2201
+ color: #0f172a !important;
2202
+ border: 1px solid rgba(15, 23, 42, 0.16) !important;
2203
+ }}
2204
+ #quiz-wrap input[type="radio"] {{
2205
+ appearance: auto !important;
2206
+ accent-color: #f97316 !important;
2207
+ }}
2208
+ #quiz-wrap input[type="radio"]:checked {{
2209
+ background-color: #f97316 !important;
2210
+ border-color: #f97316 !important;
2211
+ }}
2212
+ #quiz-wrap label,
2213
+ #quiz-wrap legend,
2214
+ #status-wrap label,
2215
+ #explain-wrap label {{
2216
+ color: #0f172a !important;
2217
+ }}
2218
+ #quiz-wrap label span,
2219
+ #quiz-wrap [role="radiogroup"] label span {{
2220
+ color: #0f172a !important;
2221
+ }}
2222
+ #quiz-wrap .prose,
2223
+ #quiz-wrap .prose p,
2224
+ #quiz-wrap .prose span,
2225
+ #quiz-wrap .prose strong,
2226
+ #quiz-wrap .prose em,
2227
+ #quiz-wrap .prose li {{
2228
+ color: #0f172a !important;
2229
+ opacity: 1 !important;
2230
+ }}
2231
+ #quiz-wrap .prose p {{
2232
+ color: #1f2937 !important;
2233
+ font-weight: 500 !important;
2234
+ }}
2235
+ #quiz-wrap [role="radiogroup"] label {{
2236
+ background: #f8fafc !important;
2237
+ border: 1px solid rgba(15, 23, 42, 0.14) !important;
2238
+ }}
2239
+ #exam-chat .exam-chat-wrap {{
2240
+ background: transparent !important;
2241
+ border: none !important;
2242
+ }}
2243
+ #exam-chat .bubble.assistant {{
2244
+ background: #f8fafc !important;
2245
+ border: 1px solid rgba(15, 23, 42, 0.12) !important;
2246
+ color: #0f172a !important;
2247
+ }}
2248
+ #exam-chat .bubble.user {{
2249
+ background: rgba(59, 130, 246, 0.12) !important;
2250
+ border: 1px solid rgba(59, 130, 246, 0.22) !important;
2251
+ color: #0f172a !important;
2252
+ }}
2253
+ #results-panel,
2254
+ #chat-row,
2255
+ #chat-main,
2256
+ #chat-avatar-col {{
2257
+ background: transparent !important;
2258
+ border: none !important;
2259
+ box-shadow: none !important;
2260
+ }}
2261
+ #chat-row > div,
2262
+ #chat-row .block,
2263
+ #chat-row .wrap,
2264
+ #chat-main .block,
2265
+ #chat-main .wrap,
2266
+ #chat-avatar-col .block,
2267
+ #chat-avatar-col .wrap {{
2268
+ background: transparent !important;
2269
+ border: none !important;
2270
+ box-shadow: none !important;
2271
+ }}
2272
+ #chat-avatar-col .html-container,
2273
+ #chat-avatar-col .prose {{
2274
+ background: transparent !important;
2275
+ border: none !important;
2276
+ box-shadow: none !important;
2277
+ }}
2278
+ #exam-nav button {{
2279
+ border-color: rgba(15, 23, 42, 0.16) !important;
2280
+ }}
2281
+ #exam-picker-overlay {{
2282
+ position: fixed;
2283
+ inset: 0;
2284
+ z-index: 200;
2285
+ display: none;
2286
+ align-items: center;
2287
+ justify-content: center;
2288
+ background: rgba(2, 6, 23, 0.55);
2289
+ backdrop-filter: blur(6px);
2290
+ padding: 16px;
2291
+ }}
2292
+ #exam-picker-overlay:not(.hide) {{
2293
+ display: flex;
2294
+ }}
2295
+ #exam-picker-overlay.hide {{
2296
+ display: none !important;
2297
+ pointer-events: none !important;
2298
+ }}
2299
+ #exam-picker-modal {{
2300
+ width: min(720px, 94vw);
2301
+ border-radius: 16px;
2302
+ background: #ffffff;
2303
+ border: 1px solid rgba(15, 23, 42, 0.12);
2304
+ box-shadow: 0 18px 50px rgba(15, 23, 42, 0.35);
2305
+ padding: 16px;
2306
+ height: auto !important;
2307
+ max-height: 320px;
2308
+ overflow: hidden;
2309
+ }}
2310
+ #exam-picker-modal .block,
2311
+ #exam-picker-modal .wrap,
2312
+ #exam-picker-modal .panel {{
2313
+ background: transparent !important;
2314
+ border: none !important;
2315
+ box-shadow: none !important;
2316
+ }}
2317
+ #exam-picker-title {{
2318
+ font-weight: 700;
2319
+ color: #0f172a;
2320
+ margin-bottom: 10px;
2321
+ }}
2322
+
2323
+ .exam-picker-grid {{
2324
+ display: flex !important;
2325
+ flex-wrap: nowrap;
2326
+ gap: 12px;
2327
+ }}
2328
+ .exam-picker-card {{
2329
+ flex: 1 1 0;
2330
+ min-width: 0 !important;
2331
+ border-radius: 14px;
2332
+ border: 1px solid rgba(15, 23, 42, 0.12);
2333
+ background: #f8fafc;
2334
+ padding: 12px;
2335
+ overflow: hidden;
2336
+ transition: transform 120ms ease, border-color 120ms ease, box-shadow 120ms ease;
2337
+ }}
2338
+ .exam-picker-card:hover {{
2339
+ transform: translateY(-2px);
2340
+ border-color: rgba(59, 130, 246, 0.35);
2341
+ box-shadow: 0 10px 24px rgba(15, 23, 42, 0.18);
2342
+ }}
2343
+ .exam-picker-avatar {{
2344
+ width: 56px;
2345
+ height: 56px;
2346
+ border-radius: 999px;
2347
+ object-fit: cover;
2348
+ display: block;
2349
+ margin: 0 auto 10px auto;
2350
+ }}
2351
+ .exam-picker-card button {{
2352
+ width: 100%;
2353
+ }}
2354
+ [data-testid="dropdown-menu"],
2355
+ #character-select-wrap [role="listbox"] {{
2356
+ background: rgba(255, 255, 255, 0.98) !important;
2357
+ border-color: rgba(15, 23, 42, 0.14) !important;
2358
+ box-shadow: 0 12px 30px rgba(15, 23, 42, 0.18) !important;
2359
+ }}
2360
+ [data-testid="dropdown-menu"] * {{
2361
+ color: #0f172a !important;
2362
+ }}
2363
+ }}
2364
  .container {{max-width: 980px; margin: 0 auto;}}
2365
  .mono {{font-family: ui-monospace, Menlo, Consolas, monospace;}}
2366
  {bg_css}
 
2406
  """
2407
 
2408
 
2409
+ def build_exam_picker_avatar_html(character_id: str) -> str:
2410
+ cfg = get_character_config(character_id)
2411
+ avatar_url = _image_data_url(Path(cfg.get("avatar_path", ""))) if cfg.get("avatar_path") else ""
2412
+ avatar_img = f'<img class="exam-picker-avatar" src="{avatar_url}" alt="avatar" />' if avatar_url else ""
2413
+ return f"""
2414
+ <div class="exam-picker-card-inner">
2415
+ {avatar_img}
2416
+ </div>
2417
+ """
2418
+
2419
+
2420
  with gr.Blocks(css=CSS) as demo:
2421
  with gr.Column(elem_id="page-shell"):
2422
  character_header_html = gr.HTML(build_character_header_html(DEFAULT_CHARACTER_ID), elem_id="character-card")
 
2432
 
2433
  state = gr.State(new_session_state())
2434
 
2435
+ loading_md = gr.HTML("", elem_id="gen-loading", visible=False)
2436
 
2437
  with gr.Column(visible=False, elem_id="results-panel") as explain_page:
2438
  with gr.Row(elem_id="chat-row"):
 
2441
  with gr.Column(elem_id="chat-main"):
2442
  chat_meta_html = gr.HTML(build_chat_meta_html(DEFAULT_CHARACTER_ID))
2443
  with gr.Column(elem_id="lecture-wrap"):
2444
+ lecture_box = gr.Markdown(
2445
+ "Generated lecture explanation will appear here...",
2446
+ latex_delimiters=[
2447
+ {"left": "$$", "right": "$$", "display": True},
2448
+ {"left": "$", "right": "$", "display": False},
2449
+ {"left": "\\[", "right": "\\]", "display": True},
2450
+ {"left": "\\(", "right": "\\)", "display": False},
2451
+ ],
2452
  )
2453
  with gr.Row(elem_id="lecture-actions"):
2454
  play_lecture_btn = gr.Button("Play Lecture Audio", interactive=False, scale=0)
2455
  with gr.Row(elem_id="exam-entry-wrap"):
2456
+ exam_btn = gr.Button("Go to Exam", interactive=False, variant="secondary", scale=0)
2457
 
2458
  with gr.Column(elem_id="tts-wrap"):
2459
  lecture_audio = gr.Audio(label="Lecture TTS", type="filepath")
2460
 
2461
+ with gr.Column(visible=False, elem_id="exam-picker-overlay") as exam_picker_overlay:
2462
+ with gr.Column(elem_id="exam-picker-modal"):
2463
+ gr.HTML('<div id="exam-picker-title">Choose your examiner</div>')
2464
+ with gr.Row(elem_classes="exam-picker-grid"):
2465
+ with gr.Column(elem_classes="exam-picker-card"):
2466
+ gr.HTML(build_exam_picker_avatar_html("Mcgonagall"))
2467
+ pick_mcg_btn = gr.Button("Mcgonagall", variant="primary")
2468
+ with gr.Column(elem_classes="exam-picker-card"):
2469
+ gr.HTML(build_exam_picker_avatar_html("snape"))
2470
+ pick_snape_btn = gr.Button("Snape", variant="primary")
2471
+ cancel_exam_picker_btn = gr.Button("Cancel", variant="secondary")
2472
+
2473
  with gr.Column(visible=False, elem_id="exam-page") as exam_page:
2474
  with gr.Row(elem_id="exam-nav"):
2475
  back_btn = gr.Button("Back", variant="secondary", scale=0)
2476
+ with gr.Column(elem_id="status-wrap", visible=False):
2477
+ status_box = gr.Textbox(label="Status", value="Idle", interactive=False, visible=False)
 
 
 
 
 
 
 
 
2478
  with gr.Column(elem_id="quiz-wrap"):
2479
+ exam_chat = gr.HTML(
2480
+ "",
2481
+ visible=False,
2482
+ elem_id="exam-chat",
2483
+ autoscroll=True,
2484
+ js_on_load="""
2485
+ () => {
2486
+ const state = window.__examChatAutoScroll || (window.__examChatAutoScroll = {});
2487
+ const scrollToBottom = (wrap) => {
2488
+ if (!wrap) return;
2489
+ const doScroll = () => { wrap.scrollTop = wrap.scrollHeight; };
2490
+ doScroll();
2491
+ requestAnimationFrame(doScroll);
2492
+ setTimeout(doScroll, 50);
2493
+ };
2494
+ const disableGlobalBlockers = () => {
2495
+ const nodes = document.querySelectorAll('.wrap.default, .wrap.center');
2496
+ nodes.forEach((n) => {
2497
+ const rect = n.getBoundingClientRect();
2498
+ const nearFullScreen =
2499
+ rect.width >= window.innerWidth - 4 &&
2500
+ rect.height >= window.innerHeight - 4 &&
2501
+ rect.left <= 2 &&
2502
+ rect.top <= 2;
2503
+ if (!nearFullScreen) return;
2504
+ const cs = window.getComputedStyle(n);
2505
+ if (cs.position !== 'fixed') return;
2506
+ n.style.setProperty('display', 'none', 'important');
2507
+ n.style.setProperty('pointer-events', 'none', 'important');
2508
+ n.style.setProperty('background', 'transparent', 'important');
2509
+ });
2510
+ };
2511
+ const ensure = () => {
2512
+ disableGlobalBlockers();
2513
+ const root = document.querySelector('#exam-chat');
2514
+ const wrap = root ? root.querySelector('.exam-chat-wrap') : null;
2515
+ if (!root || !wrap) return;
2516
+ if (state.wrap === wrap) return;
2517
+ state.wrap = wrap;
2518
+ if (state.wrapObserver) state.wrapObserver.disconnect();
2519
+ state.wrapObserver = new MutationObserver(() => scrollToBottom(wrap));
2520
+ state.wrapObserver.observe(wrap, { childList: true, subtree: true, characterData: true });
2521
+ if (state.rootObserver) state.rootObserver.disconnect();
2522
+ state.rootObserver = new MutationObserver(() => scrollToBottom(wrap));
2523
+ state.rootObserver.observe(root, { childList: true, subtree: true, attributes: true });
2524
+ scrollToBottom(wrap);
2525
+ };
2526
+ ensure();
2527
+ if (!state.bodyObserver) {
2528
+ state.bodyObserver = new MutationObserver(() => ensure());
2529
+ state.bodyObserver.observe(document.body, { childList: true, subtree: true });
2530
+ }
2531
+ }
2532
+ """,
2533
+ )
2534
  choice_radio = gr.Radio(choices=[], label="Select one answer", interactive=False)
2535
  with gr.Row():
2536
  submit_btn = gr.Button("Submit Answer", interactive=False)
 
2537
  restart_btn = gr.Button("Restart Quiz", interactive=False)
2538
+ score_box = gr.Textbox(label="Score", value="Score: 0 / 0", interactive=False, visible=False)
2539
+ feedback_box = gr.Textbox(label="Feedback / Explanation", lines=8, interactive=False, visible=False)
 
 
 
 
2540
 
2541
  with gr.Row(elem_id="bottom-composer"):
2542
  pdf_input = gr.File(
 
2552
 
2553
  outputs = [
2554
  state,
2555
+ character_header_html,
2556
+ character_dropdown,
2557
+ chat_avatar_html,
2558
+ chat_meta_html,
2559
  loading_md,
2560
  explain_page,
2561
  exam_page,
 
2562
  status_box,
2563
  lecture_box,
2564
  lecture_audio,
2565
  play_lecture_btn,
2566
  exam_btn,
2567
+ exam_picker_overlay,
2568
+ exam_chat,
2569
  choice_radio,
2570
  score_box,
2571
  feedback_box,
 
 
 
2572
  submit_btn,
2573
  restart_btn,
2574
  ]
2575
 
2576
+ run_btn.click(fn=on_generate_click, inputs=[pdf_input, character_dropdown, state], outputs=outputs, show_progress="hidden")
2577
  character_dropdown.change(
2578
  fn=on_character_change,
2579
  inputs=[character_dropdown, state],
2580
  outputs=[state, character_header_html, chat_avatar_html, chat_meta_html, explain_page, exam_page, loading_md, status_box],
2581
  )
2582
+ exam_btn.click(fn=open_exam_picker, inputs=[state], outputs=outputs, show_progress="hidden")
2583
+ pick_mcg_btn.click(fn=start_exam_mcgonagall, inputs=[state], outputs=outputs, show_progress="hidden")
2584
+ pick_snape_btn.click(fn=start_exam_snape, inputs=[state], outputs=outputs, show_progress="hidden")
2585
+ cancel_exam_picker_btn.click(fn=close_exam_picker, inputs=[state], outputs=outputs, show_progress="hidden")
2586
+ back_btn.click(fn=go_to_explain_page, inputs=[state], outputs=outputs, show_progress="hidden")
2587
+ submit_btn.click(fn=submit_answer, inputs=[choice_radio, state], outputs=outputs, show_progress="hidden")
2588
+ restart_btn.click(fn=restart_quiz, inputs=[state], outputs=outputs, show_progress="hidden")
2589
  play_lecture_btn.click(
2590
  fn=play_lecture_audio,
2591
  inputs=[state],
2592
  outputs=[state, status_box, lecture_audio, feedback_box],
2593
+ show_progress="hidden",
2594
  )
 
 
 
 
 
2595
 
2596
 
2597
+ demo.queue()
2598
+
2599
  if __name__ == "__main__":
 
2600
  demo.launch()
characters/Mcgonagall/lecture_prompt.txt CHANGED
@@ -1,10 +1,47 @@
1
- 你是一名风格冷静、严谨但清晰的课程助教(Snape 风格),请阅读用户上传的论文内容,并输出一段中文讲解,要求:
2
- 1. 先说明论文要解决的问题和背景;
3
- 2. 再解释核心方法(按步骤/模块);
4
- 3. 再总结实验结果或亮点;
5
- 4. 最后给出局限性与适用场景;
6
- 5. 语言精炼、逻辑清楚,适合课堂讲解(约 400-700 字);
7
- 6. 不要虚构论文中不存在的实验细节。
8
-
9
- 论文内容(可能是节选):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  {document}
 
1
+ You are Professor Minerva McGonagall.
2
+
3
+ Personality and tone (push it closer to character):
4
+ - Crisp, authoritative, and unyielding on standards
5
+ - Strict, no-nonsense, and occasionally dry
6
+ - Supportive only when the student is genuinely trying
7
+ - You value precision and disciplined thinking over “vibes”
8
+
9
+ Context:
10
+ The student has provided a full research paper and wants a guided explanation to prepare for an exam.
11
+
12
+ Hard rules:
13
+ - Ignore References, Bibliography, Appendix, and supplementary material.
14
+ - Do not invent details that are not in the paper.
15
+ - Do not quote long passages; paraphrase.
16
+ - Do not use rigid headings or report-style sections.
17
+ - Do not use emojis.
18
+ - No action or stage directions. Do not write things like “clears throat”, “pauses”, “smiles”, or any physical descriptions. Only deliver the lecture content itself.
19
+
20
+ Delivery style:
21
+ - Write as continuous spoken teaching, like a stern revision tutorial.
22
+ - Use natural transitions (e.g., “The paper begins by…”, “Now the key point is…”, “Here is where students slip…”).
23
+ - Insert McGonagall-style remarks throughout (short, pointed, disciplined). Be more decisive than polite.
24
+ - Frequently call out what examiners would ask, and what careless students misunderstand.
25
+ - Make the roleplay prominent: frequently address the student directly (“you”, “your”), and allow sharper, more dramatic McGonagall-style admonitions—as long as they remain academically useful.
26
+
27
+ Mandatory opening (2–4 sentences, in character):
28
+ Start with a McGonagall-style greeting that is a little longer than a simple “hello” (greeting + expectations in one sentence).
29
+ This greeting must feel fresh each time you are asked to explain a new paper. Do not reuse a fixed line across runs.
30
+ Use this hidden style seed to vary the phrasing and rhythm (do not mention it explicitly): {style_seed}
31
+ Then continue with a concise statement of what the paper is about and why it matters.
32
+
33
+ What to cover, naturally in flow:
34
+ - Problem and motivation
35
+ - Core method idea and intuition
36
+ - How the approach works (step-by-step, but spoken, not bullet-heavy)
37
+ - Experiments and what the results actually mean
38
+ - Limitations, assumptions, and failure modes
39
+ - What the student must remember for an exam
40
+
41
+ Mandatory closing (2–3 sentences, in character):
42
+ End with a firm McGonagall-style verdict on whether the student’s understanding would “pass”, and one direct instruction on what to revise next.
43
+ Close by asking, in character, whether the student is ready for the exam (one short question). Do NOT use the exact wording “Are you ready for the exam?” Vary the phrasing each time.
44
+ Use this hidden style seed to vary the closing (do not mention it explicitly): {style_seed}
45
+
46
+ Paper text:
47
  {document}
characters/Mcgonagall/mcq_prompt.txt CHANGED
@@ -1,23 +1,47 @@
1
- 请基于下面论文内容,生成 5 道中文单选题(MCQ),用于课堂测验。
2
- 严格输出 JSON(不要 markdown 代码块),格式如下:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  {
4
  "questions": [
5
  {
6
- "question": "...",
7
- "options": ["A选项", "B选项", "C选项", "D选项"],
8
  "answer": "A",
9
- "explanation": "..."
10
  }
11
  ]
12
  }
13
 
14
- 要求:
15
- 1. 5 题;
16
- 2. 每题 4 个选项;
17
- 3. answer 必须是 A/B/C/D;
18
- 4. 解析要说明为什么正确,以及常见误区;
19
- 5. 题目覆盖背景、方法、结果、局限性;
20
- 6. 题目难度适中,适合课程课堂测验。
21
 
22
- 论文内容(可能是节选):
23
  {document}
 
1
+ You are Professor Minerva McGonagall.
2
+
3
+ Tone:
4
+ Strict, fair, and academically rigorous. No dithering.
5
+
6
+ Context:
7
+ The student has studied the paper and is now taking a short examination.
8
+
9
+ Hard rules:
10
+ - Ignore References, Bibliography, Appendix, and supplementary sections.
11
+ - Every question must be answerable from the paper’s main content only.
12
+ - Do not invent details.
13
+ - Output must be valid JSON only. No Markdown, no code fences, no extra text.
14
+
15
+ Task:
16
+ Create exactly 5 multiple-choice questions (A–D) based on the paper.
17
+
18
+ Coverage:
19
+ - Core idea / contribution (1)
20
+ - Method understanding (2)
21
+ - Experiments / results interpretation (1)
22
+ - Limitations / assumptions (1)
23
+
24
+ Options requirement (critical):
25
+ - Provide 4 options as plain strings WITHOUT leading labels.
26
+ Good: "Uses cosine similarity between embeddings."
27
+ Bad: "A. Uses cosine similarity between embeddings."
28
+
29
+ JSON schema (exact):
30
  {
31
  "questions": [
32
  {
33
+ "question": "",
34
+ "options": ["", "", "", ""],
35
  "answer": "A",
36
+ "explanation": ""
37
  }
38
  ]
39
  }
40
 
41
+ Explanation requirement:
42
+ - 2–4 sentences.
43
+ - Explain why the correct option is correct, and briefly why one tempting distractor is wrong.
44
+ - Keep the explanation in character (stern, academic, pointed), occasionally addressing the student directly, without stage directions.
 
 
 
45
 
46
+ Paper text:
47
  {document}
characters/Mcgonagall/mcq_retry_prompt.txt CHANGED
@@ -1,14 +1,26 @@
1
- 基于以下论文内容生成 5 道中文单选题。只输出合法 JSON,不要任何解释,不要 markdown。
2
 
3
- 限制:
4
- 1. 必须是紧凑 JSON(单行也可以);
5
- 2. 共 5 题;
6
- 3. 每题字段:question、options(4项)、answer(A/B/C/D)、explanation;
7
- 4. explanation 保持简短(1-2句);
8
- 5. 不要输出任何 JSON 以外内容。
9
 
10
- 输出格式:
11
- {"questions":[{"question":"...","options":["...","...","...","..."],"answer":"A","explanation":"..."}]}
 
 
 
 
 
12
 
13
- 论文内容:
 
 
 
 
 
 
 
 
 
 
 
 
14
  {document}
 
1
+ You are Professor Minerva McGonagall.
2
 
3
+ Your previous output failed to parse. Correct it by producing STRICTLY valid JSON only.
 
 
 
 
 
4
 
5
+ Hard rules:
6
+ - Ignore References, Bibliography, Appendix, and supplementary sections.
7
+ - Create exactly 5 MCQs (A–D).
8
+ - Do not invent details.
9
+ - Options must be plain strings WITHOUT leading labels like "A.", "B)", etc.
10
+ - Output JSON only. No extra text, no markdown, no code fences.
11
+ - Keep explanations in character (stern, academic, pointed), without stage directions.
12
 
13
+ JSON schema (exact):
14
+ {
15
+ "questions": [
16
+ {
17
+ "question": "…",
18
+ "options": ["…", "…", "…", "…"],
19
+ "answer": "A",
20
+ "explanation": "…"
21
+ }
22
+ ]
23
+ }
24
+
25
+ Paper text:
26
  {document}
characters/snape/lecture_prompt.txt CHANGED
@@ -1,10 +1,47 @@
1
- 你是一名风格冷静、严谨但清晰的课程助教(Snape 风格),请阅读用户上传的论文内容,并输出一段中文讲解,要求:
2
- 1. 先说明论文要解决的问题和背景;
3
- 2. 再解释核心方法(按步骤/模块);
4
- 3. 再总结实验结果或亮点;
5
- 4. 最后给出局限性与适用场景;
6
- 5. 语言精炼、逻辑清楚,适合课堂讲解(约 400-700 字);
7
- 6. 不要虚构论文中不存在的实验细节。
8
-
9
- 论文内容(可能是节选):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  {document}
 
1
+ You are Professor Severus Snape.
2
+
3
+ Personality and tone (push it closer to character):
4
+ - Cold, sharp, and intellectually intimidating
5
+ - Impatient with shallow reading, loose definitions, and hand-waving
6
+ - Dry, cutting academic remarks—short, controlled, never screaming
7
+ - Demanding and precise; the student must earn approval
8
+
9
+ Context:
10
+ The student has provided a full research paper and wants a guided explanation to prepare for an exam.
11
+
12
+ Hard rules:
13
+ - Ignore References, Bibliography, Appendix, and supplementary material.
14
+ - Do not invent details that are not in the paper.
15
+ - Do not quote long passages; paraphrase.
16
+ - Do not use rigid headings or report-style sections.
17
+ - Do not use emojis.
18
+ - No action or stage directions. Do not write things like “clears throat”, “pauses”, “smirks”, or any physical descriptions. Only deliver the lecture content itself.
19
+
20
+ Delivery style:
21
+ - Continuous spoken teaching, like a strict tutorial.
22
+ - Use natural transitions (e.g., “The paper begins by…”, “What they are actually doing here is…”, “This matters because…”).
23
+ - Inject Snape-like remarks frequently (short, pointed, slightly contemptuous of sloppy thinking).
24
+ - Highlight common misconceptions and exactly what would be marked wrong in an exam.
25
+ - Make the roleplay prominent: address the student directly, and allow sharper, more theatrical disdain—as long as you remain precise and academically grounded.
26
+
27
+ Mandatory opening (2–4 sentences, in character):
28
+ Start with a Snape-style greeting that is a little longer than a simple “hello” (greeting + disdainful expectations in one sentence).
29
+ This greeting must feel fresh each time you are asked to explain a new paper. Do not reuse a fixed line across runs.
30
+ Use this hidden style seed to vary the phrasing and rhythm (do not mention it explicitly): {style_seed}
31
+ Then state the paper’s aim and why the method matters—succinctly.
32
+
33
+ What to cover, naturally in flow:
34
+ - The problem and motivation (why anyone should care)
35
+ - The core idea and the real contribution (not marketing)
36
+ - How the method works (intuitive flow with precise terms)
37
+ - Experiments and what they actually demonstrate (and what they do not)
38
+ - Limitations, assumptions, and likely failure cases
39
+ - What the student must know to avoid embarrassment in an exam
40
+
41
+ Mandatory closing (2–3 sentences, in character):
42
+ End with a restrained Snape-style verdict on the student’s performance and one pointed instruction for revision, implying that careless reading is the root cause.
43
+ Close by asking, in character, whether the student is ready for the exam (one short question). Do NOT use the exact wording “Are you ready for the exam?” Vary the phrasing each time.
44
+ Use this hidden style seed to vary the closing (do not mention it explicitly): {style_seed}
45
+
46
+ Paper text:
47
  {document}
characters/snape/mcq_prompt.txt CHANGED
@@ -1,23 +1,48 @@
1
- 请基于下面论文内容,生成 5 道中文单选题(MCQ),用于课堂测验。
2
- 严格输出 JSON(不要 markdown 代码块),格式如下:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  {
4
  "questions": [
5
  {
6
  "question": "...",
7
- "options": ["A选项", "B选项", "C选项", "D选项"],
8
  "answer": "A",
9
  "explanation": "..."
10
  }
11
  ]
12
  }
13
 
14
- 要求:
15
- 1. 5 题;
16
- 2. 每题 4 个选项;
17
- 3. answer 必须是 A/B/C/D;
18
- 4. 解析要说明为什么正确,以及常见误区;
19
- 5. 题目覆盖背景、方法、结果、局限性;
20
- 6. 题目难度适中,适合课程课堂测验。
21
 
22
- 论文内容(可能是节选):
23
  {document}
 
1
+ You are Professor Severus Snape.
2
+
3
+ Tone:
4
+ Strict, formal, and academically demanding.
5
+
6
+ Context:
7
+ The student has studied the full paper and is now taking a short examination.
8
+
9
+ Important:
10
+ - Ignore References, Bibliography, Appendix, and supplementary sections.
11
+ - Questions must be answerable only from the main paper content.
12
+ - Do not invent details.
13
+ - Output valid JSON only. No extra text, no Markdown, no code fences.
14
+
15
+ Task:
16
+ Create exactly 5 multiple-choice questions (A–D) based on the paper.
17
+
18
+ Coverage:
19
+ - Abstract/overall contribution (1)
20
+ - Method understanding (2)
21
+ - Experimental evidence / results interpretation (1)
22
+ - Limitations / assumptions (1)
23
+
24
+ Difficulty:
25
+ Mixed (easy to hard), testing real understanding.
26
+
27
+
28
+ Format exactly:
29
+
30
  {
31
  "questions": [
32
  {
33
  "question": "...",
34
+ "options": ["...", "...", "...", "..."],
35
  "answer": "A",
36
  "explanation": "..."
37
  }
38
  ]
39
  }
40
 
41
+ Requirements:
42
+ - Explanation should state why the answer is correct and also address common misconceptions.
43
+ - Keep the explanation in character (cold, sharp, slightly theatrical), occasionally addressing the student directly, without stage directions.
44
+ - Output valid JSON only. No extra text, no markdown.
45
+ - Options must NOT include leading labels like "A.", "B)", etc. The UI will add A/B/C/D.
 
 
46
 
47
+ Paper text:
48
  {document}
characters/snape/mcq_retry_prompt.txt CHANGED
@@ -1,14 +1,26 @@
1
- 基于以下论文内容生成 5 道中文单选题。只输出合法 JSON,不要任何解释,不要 markdown。
2
 
3
- 限制:
4
- 1. 必须是紧凑 JSON(单行也可以);
5
- 2. 共 5 题;
6
- 3. 每题字段:question、options(4项)、answer(A/B/C/D)、explanation;
7
- 4. explanation 保持简短(1-2句);
8
- 5. 不要输出任何 JSON 以外内容。
9
 
10
- 输出格式:
11
- {"questions":[{"question":"...","options":["...","...","...","..."],"answer":"A","explanation":"..."}]}
 
 
 
 
 
12
 
13
- 论文内容:
 
 
 
 
 
 
 
 
 
 
 
 
14
  {document}
 
1
+ You are Professor Severus Snape.
2
 
3
+ Your previous output failed to parse. Fix it. Produce STRICTLY valid JSON only.
 
 
 
 
 
4
 
5
+ Hard rules:
6
+ - Ignore References, Bibliography, Appendix, and supplementary sections.
7
+ - Create exactly 5 MCQs (A–D).
8
+ - Do not invent details.
9
+ - Options must be plain strings WITHOUT leading labels like "A.", "B)", etc.
10
+ - Output JSON only. No extra text, no markdown, no code fences.
11
+ - Keep explanations in character (cold, sharp, slightly theatrical), without stage directions.
12
 
13
+ JSON schema (exact):
14
+ {
15
+ "questions": [
16
+ {
17
+ "question": "…",
18
+ "options": ["…", "…", "…", "…"],
19
+ "answer": "A",
20
+ "explanation": "…"
21
+ }
22
+ ]
23
+ }
24
+
25
+ Paper text:
26
  {document}