jpruzcuen commited on
Commit ·
fa2ae91
1
Parent(s): 14e10ce
quiz ui to navigate questions
Browse files- app.py +92 -71
- chatbot.py +44 -0
- quiz.py +159 -0
app.py
CHANGED
|
@@ -1,71 +1,12 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from
|
| 3 |
-
from
|
| 4 |
-
|
| 5 |
-
print("Downloading model...")
|
| 6 |
-
model_path = hf_hub_download(
|
| 7 |
-
repo_id="ebbalg/llama-finetome",
|
| 8 |
-
filename="llama-3.2-1b-instruct.Q4_K_M.gguf"
|
| 9 |
-
)
|
| 10 |
-
|
| 11 |
-
print("Loading model...")
|
| 12 |
-
llm = Llama(
|
| 13 |
-
model_path=model_path,
|
| 14 |
-
n_ctx=2048,
|
| 15 |
-
n_threads=2,
|
| 16 |
-
verbose=False,
|
| 17 |
-
chat_format="llama-3"
|
| 18 |
-
)
|
| 19 |
-
|
| 20 |
-
def chat(message, history):
|
| 21 |
-
"""Simple chat function"""
|
| 22 |
-
messages = []
|
| 23 |
-
|
| 24 |
-
# Handle history - it might be a list of tuples or list of dicts
|
| 25 |
-
if history:
|
| 26 |
-
for h in history:
|
| 27 |
-
if isinstance(h, (list, tuple)):
|
| 28 |
-
# Format: [user_msg, assistant_msg]
|
| 29 |
-
messages.append({"role": "user", "content": str(h[0])})
|
| 30 |
-
messages.append({"role": "assistant", "content": str(h[1])})
|
| 31 |
-
elif isinstance(h, dict):
|
| 32 |
-
# Format: {"role": "...", "content": "..."}
|
| 33 |
-
messages.append(h)
|
| 34 |
-
|
| 35 |
-
# Add current message
|
| 36 |
-
messages.append({"role": "user", "content": message})
|
| 37 |
-
|
| 38 |
-
# Generate response
|
| 39 |
-
response = llm.create_chat_completion(
|
| 40 |
-
messages=messages,
|
| 41 |
-
max_tokens=256,
|
| 42 |
-
temperature=0.7,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
return response["choices"][0]["message"]["content"]
|
| 46 |
-
|
| 47 |
-
def quiz():
|
| 48 |
-
'''Generate a multiple choice quiz with 10 questions (created by the llm)'''
|
| 49 |
-
system_prompt = {
|
| 50 |
-
"role": "system",
|
| 51 |
-
"content": (
|
| 52 |
-
"Generate a set of 10 multiple-choice questions about machine learning for a student."
|
| 53 |
-
"Each question should have 4 answer options (A–D) with a single correct answer.\n\n"
|
| 54 |
-
"Format exactly like this:\n\n"
|
| 55 |
-
"1. Question text...\n"
|
| 56 |
-
"A) ...\nB) ...\nC) ...\nD) ...\n**Correct Answer: X**\n\n"
|
| 57 |
-
)
|
| 58 |
-
}
|
| 59 |
-
|
| 60 |
-
response = llm.create_chat_completion(
|
| 61 |
-
messages=[system_prompt],
|
| 62 |
-
max_tokens=512,
|
| 63 |
-
temperature=0.7
|
| 64 |
-
)
|
| 65 |
-
|
| 66 |
-
return response["choices"][0]["message"]["content"]
|
| 67 |
|
| 68 |
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
|
| 71 |
with gr.Blocks(title="TAI: AI Teacher Assistant") as demo:
|
|
@@ -76,10 +17,15 @@ with gr.Blocks(title="TAI: AI Teacher Assistant") as demo:
|
|
| 76 |
|
| 77 |
This Llama 3.2 1B model was fine-tuned on the FineTome-100k instruction dataset.
|
| 78 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
with gr.Row():
|
| 80 |
|
| 81 |
# Left column: chat
|
| 82 |
-
with gr.Column(scale=
|
| 83 |
chatbot = gr.ChatInterface(
|
| 84 |
chat,
|
| 85 |
examples=[
|
|
@@ -91,12 +37,87 @@ with gr.Blocks(title="TAI: AI Teacher Assistant") as demo:
|
|
| 91 |
)
|
| 92 |
|
| 93 |
# Right column: quiz
|
| 94 |
-
with gr.Column(scale=
|
| 95 |
-
gr.Markdown("##
|
| 96 |
-
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
|
| 102 |
demo.launch()
|
|
|
|
| 1 |
+
import re
|
| 2 |
import gradio as gr
|
| 3 |
+
from quiz import create_quiz, parse_quiz, start_quiz, submit_and_next, restart_quiz
|
| 4 |
+
from chatbot import chat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
+
print("Creating quiz...")
|
| 8 |
+
raw_quiz = create_quiz()
|
| 9 |
+
parsed_quiz = parse_quiz(raw_quiz)
|
| 10 |
|
| 11 |
|
| 12 |
with gr.Blocks(title="TAI: AI Teacher Assistant") as demo:
|
|
|
|
| 17 |
|
| 18 |
This Llama 3.2 1B model was fine-tuned on the FineTome-100k instruction dataset.
|
| 19 |
""")
|
| 20 |
+
|
| 21 |
+
initial_quiz = create_quiz()
|
| 22 |
+
quiz_state = gr.State(initial_quiz)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
with gr.Row():
|
| 26 |
|
| 27 |
# Left column: chat
|
| 28 |
+
with gr.Column(scale=1):
|
| 29 |
chatbot = gr.ChatInterface(
|
| 30 |
chat,
|
| 31 |
examples=[
|
|
|
|
| 37 |
)
|
| 38 |
|
| 39 |
# Right column: quiz
|
| 40 |
+
with gr.Column(scale=2):
|
| 41 |
+
gr.Markdown("## Test Yourself")
|
| 42 |
+
# Visible elements
|
| 43 |
+
progress = gr.Markdown(f"**Progress:** 0/0", elem_id="quiz-progress")
|
| 44 |
+
question_md = gr.Markdown("", elem_id="quiz-question")
|
| 45 |
+
options = gr.Radio(choices=[], label="Choose an answer", type="value")
|
| 46 |
+
next_btn = gr.Button("Next")
|
| 47 |
+
start_btn = gr.Button("Start Quiz", variant="primary")
|
| 48 |
+
restart_btn = gr.Button("Restart Quiz")
|
| 49 |
+
result_md = gr.Markdown("", visible=False)
|
| 50 |
+
|
| 51 |
+
# State holders
|
| 52 |
+
quiz_raw_state = gr.State(raw_quiz) # raw quiz text
|
| 53 |
+
quiz_parsed_state = gr.State(parsed_quiz) # parsed list of questions
|
| 54 |
+
idx_state = gr.State(0) # current index
|
| 55 |
+
score_state = gr.State(0) # current score
|
| 56 |
+
finished_state = gr.State(False) # finished flag
|
| 57 |
+
|
| 58 |
+
# Start Quiz -> show first question (no model call)
|
| 59 |
+
def on_start(quiz_raw, quiz_parsed):
|
| 60 |
+
payload, idx, score = start_quiz(quiz_raw, quiz_parsed)
|
| 61 |
+
# return question_md, options choices, idx, score, progress, result_md_visible
|
| 62 |
+
return payload["question_md"], payload["options"], gr.update(value=idx), gr.update(value=score), payload["progress"], gr.update(visible=False), gr.update(visible=True)
|
| 63 |
+
# Note: last two are for hiding/showing result_md (kept simple)
|
| 64 |
+
|
| 65 |
+
start_btn.click(
|
| 66 |
+
fn=lambda qr, qp: start_quiz(qr, qp)[0], # we only want the payload (question data) but mapping below
|
| 67 |
+
inputs=[quiz_raw_state, quiz_parsed_state],
|
| 68 |
+
outputs=[question_md]
|
| 69 |
+
)
|
| 70 |
|
| 71 |
+
# After start, we also need to set options, progress, idx, score and ensure result is hidden.
|
| 72 |
+
def start_full(quiz_raw, quiz_parsed):
|
| 73 |
+
payload, idx, score = start_quiz(quiz_raw, quiz_parsed)
|
| 74 |
+
return payload["question_md"], payload["options"], idx, score, payload["progress"], "", False
|
| 75 |
+
|
| 76 |
+
start_btn.click(
|
| 77 |
+
fn=start_full,
|
| 78 |
+
inputs=[quiz_raw_state, quiz_parsed_state],
|
| 79 |
+
outputs=[question_md, options, idx_state, score_state, progress, result_md, finished_state]
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# Next button: check selected and move forward
|
| 83 |
+
def on_next(selected, quiz_parsed, idx, score):
|
| 84 |
+
md_or_q, opts, new_idx, new_score, prog, finished = submit_and_next(selected, quiz_parsed, idx, score)
|
| 85 |
+
if finished:
|
| 86 |
+
# show final result
|
| 87 |
+
return md_or_q, [], new_idx, new_score, prog, True
|
| 88 |
+
else:
|
| 89 |
+
return md_or_q, opts, new_idx, new_score, prog, False
|
| 90 |
+
|
| 91 |
+
next_btn.click(
|
| 92 |
+
fn=on_next,
|
| 93 |
+
inputs=[options, quiz_parsed_state, idx_state, score_state],
|
| 94 |
+
outputs=[question_md, options, idx_state, score_state, progress, finished_state]
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# When finished_state becomes True, show the result markdown and hide options
|
| 98 |
+
def show_result_if_finished(finished_flag, current_question_md):
|
| 99 |
+
if finished_flag:
|
| 100 |
+
# current_question_md will be the result text returned earlier
|
| 101 |
+
return gr.update(value=current_question_md), gr.update(visible=True), gr.update(visible=False)
|
| 102 |
+
else:
|
| 103 |
+
return gr.update(value=""), gr.update(visible=False), gr.update(visible=True)
|
| 104 |
+
|
| 105 |
+
finished_state.change(
|
| 106 |
+
fn=show_result_if_finished,
|
| 107 |
+
inputs=[finished_state, question_md],
|
| 108 |
+
outputs=[result_md, result_md, options] # show result_md & hide options when finished
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# Restart quiz (without regenerating)
|
| 112 |
+
def on_restart():
|
| 113 |
+
q_md, q_opts, idx, score, prog = restart_quiz(raw_quiz, parsed_quiz)
|
| 114 |
+
return q_md, q_opts, idx, score, prog, "", False
|
| 115 |
+
|
| 116 |
+
restart_btn.click(
|
| 117 |
+
fn=on_restart,
|
| 118 |
+
inputs=[],
|
| 119 |
+
outputs=[question_md, options, idx_state, score_state, progress, result_md, finished_state]
|
| 120 |
+
)
|
| 121 |
|
| 122 |
|
| 123 |
demo.launch()
|
chatbot.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_cpp import Llama
|
| 2 |
+
from huggingface_hub import hf_hub_download
|
| 3 |
+
|
| 4 |
+
print("Downloading model...")
|
| 5 |
+
model_path = hf_hub_download(
|
| 6 |
+
repo_id="ebbalg/llama-finetome",
|
| 7 |
+
filename="llama-3.2-1b-instruct.Q4_K_M.gguf"
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
print("Loading model...")
|
| 11 |
+
llm = Llama(
|
| 12 |
+
model_path=model_path,
|
| 13 |
+
n_ctx=2048,
|
| 14 |
+
n_threads=2,
|
| 15 |
+
verbose=False,
|
| 16 |
+
chat_format="llama-3"
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
def chat(message, history):
|
| 20 |
+
"""Simple chat function"""
|
| 21 |
+
messages = []
|
| 22 |
+
|
| 23 |
+
# Handle history - it might be a list of tuples or list of dicts
|
| 24 |
+
if history:
|
| 25 |
+
for h in history:
|
| 26 |
+
if isinstance(h, (list, tuple)):
|
| 27 |
+
# Format: [user_msg, assistant_msg]
|
| 28 |
+
messages.append({"role": "user", "content": str(h[0])})
|
| 29 |
+
messages.append({"role": "assistant", "content": str(h[1])})
|
| 30 |
+
elif isinstance(h, dict):
|
| 31 |
+
# Format: {"role": "...", "content": "..."}
|
| 32 |
+
messages.append(h)
|
| 33 |
+
|
| 34 |
+
# Add current message
|
| 35 |
+
messages.append({"role": "user", "content": message})
|
| 36 |
+
|
| 37 |
+
# Generate response
|
| 38 |
+
response = llm.create_chat_completion(
|
| 39 |
+
messages=messages,
|
| 40 |
+
max_tokens=256,
|
| 41 |
+
temperature=0.7,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
return response["choices"][0]["message"]["content"]
|
quiz.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from chatbot import llm
|
| 3 |
+
|
| 4 |
+
### CREATE AND PARSE QUIZ
|
| 5 |
+
|
| 6 |
+
def create_quiz(llm):
|
| 7 |
+
'''Generate a multiple choice quiz with 10 questions (created by the llm)'''
|
| 8 |
+
system_prompt = {
|
| 9 |
+
"role": "system",
|
| 10 |
+
"content": (
|
| 11 |
+
"Generate a set of 10 multiple-choice questions about machine learning for a student."
|
| 12 |
+
"Each question should have 4 answer options (A–D) with a single correct answer.\n\n"
|
| 13 |
+
"Format exactly like this:\n\n"
|
| 14 |
+
"1. Question text...\n"
|
| 15 |
+
"A) ...\nB) ...\nC) ...\nD) ...\n**Correct Answer: X**\n\n"
|
| 16 |
+
)
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
response = llm.create_chat_completion(
|
| 20 |
+
messages=[system_prompt],
|
| 21 |
+
max_tokens=512,
|
| 22 |
+
temperature=0.7
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
return response["choices"][0]["message"]["content"]
|
| 26 |
+
|
| 27 |
+
def parse_quiz(text):
|
| 28 |
+
question_blocks = re.findall(r'(\d+)\.\s*(.*?)(?=\n\d+\.|\Z)', text, flags=re.S)
|
| 29 |
+
parsed = []
|
| 30 |
+
for idx, block_text in question_blocks:
|
| 31 |
+
block = block_text.strip()
|
| 32 |
+
# Extract the question line before options
|
| 33 |
+
# Find A) position
|
| 34 |
+
m_a = re.search(r'\nA[\)\.]', block)
|
| 35 |
+
if m_a:
|
| 36 |
+
question_text = block[:m_a.start()].strip()
|
| 37 |
+
rest = block[m_a.start():].strip()
|
| 38 |
+
else:
|
| 39 |
+
# if no A) found, entire block as question
|
| 40 |
+
question_text = block
|
| 41 |
+
rest = ""
|
| 42 |
+
|
| 43 |
+
# Extract options A-D
|
| 44 |
+
opts = re.findall(r'([A-D])[\)\.]\s*(.+?)(?=\n[A-D][\)\.]|\n\*\*Correct|\n\d+\.|\Z)', rest, flags=re.S)
|
| 45 |
+
options = []
|
| 46 |
+
# sort options by letter just in case
|
| 47 |
+
opts_sorted = sorted(opts, key=lambda x: x[0]) if opts else []
|
| 48 |
+
for letter, opt_text in opts_sorted:
|
| 49 |
+
options.append(opt_text.strip().replace("\n", " "))
|
| 50 |
+
|
| 51 |
+
# Extract correct answer
|
| 52 |
+
ans = None
|
| 53 |
+
m_corr = re.search(r'\*\*\s*Correct Answer\s*:\s*([A-D])\s*\*\*', block, flags=re.I)
|
| 54 |
+
if not m_corr:
|
| 55 |
+
m_corr = re.search(r'Correct Answer\s*:\s*([A-D])', block, flags=re.I)
|
| 56 |
+
if not m_corr:
|
| 57 |
+
# possible "Correct: A" or "Answer: A"
|
| 58 |
+
m_corr = re.search(r'(Correct|Answer)\s*[:\-]\s*([A-D])', block, flags=re.I)
|
| 59 |
+
if m_corr:
|
| 60 |
+
ans = m_corr.group(2).upper()
|
| 61 |
+
else:
|
| 62 |
+
ans = m_corr.group(1).upper()
|
| 63 |
+
|
| 64 |
+
parsed.append({
|
| 65 |
+
"q": question_text,
|
| 66 |
+
"options": options,
|
| 67 |
+
"answer": ans
|
| 68 |
+
})
|
| 69 |
+
|
| 70 |
+
return parsed
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def start_quiz(quiz_raw, quiz_parsed):
|
| 74 |
+
"""Return first question payload and initialize index & score."""
|
| 75 |
+
# If parse failed or zero questions, show raw text
|
| 76 |
+
if not quiz_parsed:
|
| 77 |
+
return {
|
| 78 |
+
"question_md": "⚠️ Could not parse generated quiz. Showing raw output below:\n\n" + quiz_raw,
|
| 79 |
+
"options": [],
|
| 80 |
+
"progress": "0/0"
|
| 81 |
+
}, 0, 0 # index, score
|
| 82 |
+
|
| 83 |
+
idx = 0
|
| 84 |
+
q = quiz_parsed[idx]
|
| 85 |
+
opts = q["options"]
|
| 86 |
+
# If options are empty, present the whole block as markdown
|
| 87 |
+
if not opts:
|
| 88 |
+
return {
|
| 89 |
+
"question_md": f"**Q {idx+1}.** {q['q']}",
|
| 90 |
+
"options": [],
|
| 91 |
+
"progress": f"{idx+1}/{len(quiz_parsed)}"
|
| 92 |
+
}, idx, 0
|
| 93 |
+
|
| 94 |
+
# present options as list of strings
|
| 95 |
+
return {
|
| 96 |
+
"question_md": f"**Q {idx+1}.** {q['q']}",
|
| 97 |
+
"options": [f"{letter}) {opt}" for letter, opt in zip(['A','B','C','D'], opts)],
|
| 98 |
+
"progress": f"{idx+1}/{len(quiz_parsed)}"
|
| 99 |
+
}, idx, 0
|
| 100 |
+
|
| 101 |
+
def show_question(quiz_parsed, idx):
|
| 102 |
+
"""Return the question (md), options list, progress for a given index."""
|
| 103 |
+
if not quiz_parsed or idx < 0 or idx >= len(quiz_parsed):
|
| 104 |
+
return "No question", [], "0/0"
|
| 105 |
+
q = quiz_parsed[idx]
|
| 106 |
+
opts = q["options"]
|
| 107 |
+
md = f"**Q {idx+1}.** {q['q']}"
|
| 108 |
+
if not opts:
|
| 109 |
+
return md, [], f"{idx+1}/{len(quiz_parsed)}"
|
| 110 |
+
return md, [f"{letter}) {opt}" for letter, opt in zip(['A','B','C','D'], opts)], f"{idx+1}/{len(quiz_parsed)}"
|
| 111 |
+
|
| 112 |
+
def submit_and_next(selected, quiz_parsed, idx, score):
|
| 113 |
+
"""
|
| 114 |
+
Process user's selected option for question idx, return:
|
| 115 |
+
- question text for next idx or results message if finished
|
| 116 |
+
- options list for next question
|
| 117 |
+
- updated index, updated score
|
| 118 |
+
- status/progress string
|
| 119 |
+
"""
|
| 120 |
+
# Validate
|
| 121 |
+
if not quiz_parsed:
|
| 122 |
+
return "No quiz parsed.", [], idx, score, "0/0", False
|
| 123 |
+
|
| 124 |
+
# Check current answer
|
| 125 |
+
if 0 <= idx < len(quiz_parsed):
|
| 126 |
+
current = quiz_parsed[idx]
|
| 127 |
+
correct = current.get("answer")
|
| 128 |
+
# normalize selected like "A) text" -> "A"
|
| 129 |
+
sel_letter = None
|
| 130 |
+
if selected:
|
| 131 |
+
m = re.match(r'\s*([A-D])[\)\.]', selected)
|
| 132 |
+
if m:
|
| 133 |
+
sel_letter = m.group(1).upper()
|
| 134 |
+
# If there is a correct letter available, compare
|
| 135 |
+
if correct and sel_letter:
|
| 136 |
+
if sel_letter == correct:
|
| 137 |
+
score += 1
|
| 138 |
+
|
| 139 |
+
# Move to next question
|
| 140 |
+
idx += 1
|
| 141 |
+
if idx >= len(quiz_parsed):
|
| 142 |
+
# Quiz finished
|
| 143 |
+
result_md = f"### 🧾 Quiz complete!\n\nYour score: **{score} / {len(quiz_parsed)}**"
|
| 144 |
+
# Optionally show correct answers summary
|
| 145 |
+
answers_lines = []
|
| 146 |
+
for i, q in enumerate(quiz_parsed):
|
| 147 |
+
ans = q.get("answer") or "?"
|
| 148 |
+
answers_lines.append(f"{i+1}. {ans}")
|
| 149 |
+
result_md += "\n\n**Correct answers:**\n\n" + "\n".join(answers_lines)
|
| 150 |
+
return result_md, [], idx, score, f"{len(quiz_parsed)}/{len(quiz_parsed)}", True
|
| 151 |
+
|
| 152 |
+
# Otherwise return next question
|
| 153 |
+
md, opts, progress = show_question(quiz_parsed, idx)
|
| 154 |
+
return md, opts, idx, score, progress, False
|
| 155 |
+
|
| 156 |
+
def restart_quiz(raw_quiz, parsed_quiz):
|
| 157 |
+
"""Reset to the first question, keep the same parsed quiz."""
|
| 158 |
+
payload, idx, score = start_quiz(raw_quiz, parsed_quiz)
|
| 159 |
+
return payload["question_md"], payload["options"], idx, score, payload["progress"]
|