Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,107 +1,99 @@
|
|
| 1 |
# --------------------------------------------------------------
|
| 2 |
# IGCSE Science Platform – Question Practice & Deep Marking
|
| 3 |
-
#
|
|
|
|
| 4 |
# --------------------------------------------------------------
|
| 5 |
|
| 6 |
-
|
| 7 |
import os
|
| 8 |
import json
|
| 9 |
-
import re
|
| 10 |
import time
|
| 11 |
-
|
| 12 |
import gradio as gr
|
| 13 |
|
| 14 |
-
# ---------- 1.
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
except Exception as e:
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
def ask_ai(prompt, temperature=0.7, max_retries=2):
|
|
|
|
| 50 |
last_error = None
|
| 51 |
|
| 52 |
-
|
|
|
|
| 53 |
for attempt in range(max_retries):
|
| 54 |
try:
|
| 55 |
-
|
|
|
|
| 56 |
prompt,
|
| 57 |
generation_config=genai.types.GenerationConfig(temperature=temperature)
|
| 58 |
)
|
| 59 |
return resp.text, "gemini"
|
| 60 |
except Exception as e:
|
| 61 |
last_error = e
|
|
|
|
| 62 |
if attempt < max_retries - 1:
|
| 63 |
time.sleep(1)
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
try:
|
| 68 |
-
resp = cohere_client.chat(model="command-r-plus-08-2024", message=prompt, temperature=temperature)
|
| 69 |
-
return resp.text, "cohere"
|
| 70 |
-
except Exception as e:
|
| 71 |
-
last_error = e
|
| 72 |
-
if attempt < max_retries - 1:
|
| 73 |
-
time.sleep(1)
|
| 74 |
-
|
| 75 |
-
if zai_client:
|
| 76 |
for attempt in range(max_retries):
|
| 77 |
try:
|
| 78 |
-
|
| 79 |
-
model="
|
| 80 |
-
|
| 81 |
temperature=temperature
|
| 82 |
)
|
| 83 |
-
return
|
| 84 |
except Exception as e:
|
| 85 |
last_error = e
|
|
|
|
| 86 |
if attempt < max_retries - 1:
|
| 87 |
time.sleep(1)
|
| 88 |
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
messages=[{"role": "user", "content": prompt}],
|
| 94 |
-
temperature=temperature
|
| 95 |
-
)
|
| 96 |
-
return comp.choices[0].message.content, "minimax"
|
| 97 |
-
except Exception as e:
|
| 98 |
-
last_error = e
|
| 99 |
-
|
| 100 |
-
return f"❌ All AI services failed. Last error: {str(last_error)}", "error"
|
| 101 |
|
| 102 |
|
| 103 |
# ---------- 3. Topic Lists ----------
|
| 104 |
-
|
| 105 |
"States of Matter", "Atoms, Elements & Compounds", "Mixtures & Separation Techniques",
|
| 106 |
"Atomic Structure", "Electronic Configuration", "Periodic Table",
|
| 107 |
"Chemical Bonding: Ionic", "Chemical Bonding: Covalent", "Chemical Bonding: Metallic",
|
|
@@ -124,7 +116,7 @@ chemistry_topics = [
|
|
| 124 |
"Laboratory Safety", "Experimental Techniques", "Analysis & Evaluation"
|
| 125 |
]
|
| 126 |
|
| 127 |
-
|
| 128 |
"Cell Structure & Function", "Specialised Cells", "Microscopy",
|
| 129 |
"Cell Division: Mitosis", "Cell Division: Meiosis", "Stem Cells",
|
| 130 |
"Diffusion", "Osmosis", "Active Transport",
|
|
@@ -154,17 +146,16 @@ biology_topics = [
|
|
| 154 |
"Biological Techniques", "Field Studies"
|
| 155 |
]
|
| 156 |
|
| 157 |
-
# ---------- 4. Question Types per Subject ----------
|
| 158 |
QUESTION_TYPES = {
|
| 159 |
"Chemistry": [
|
| 160 |
-
"Multiple Choice", "Short Answer
|
| 161 |
-
"Calculation
|
| 162 |
-
"Calculation
|
| 163 |
"Data Interpretation", "Practical / Experiment", "Equation Writing",
|
| 164 |
"Evaluation / Suggest Improvements"
|
| 165 |
],
|
| 166 |
"Biology": [
|
| 167 |
-
"Multiple Choice", "Short Answer
|
| 168 |
"Extended Response", "Data Interpretation", "Graph / Table Analysis",
|
| 169 |
"Practical / Experiment", "Evaluation / Suggest Improvements",
|
| 170 |
"Applying Knowledge to Novel Scenarios", "Genetic Diagrams",
|
|
@@ -172,15 +163,16 @@ QUESTION_TYPES = {
|
|
| 172 |
]
|
| 173 |
}
|
| 174 |
|
| 175 |
-
|
|
|
|
| 176 |
def generate_question(subject, topic, q_type, difficulty, num_marks):
|
| 177 |
if not topic:
|
| 178 |
-
return "
|
| 179 |
|
| 180 |
difficulty_desc = {
|
| 181 |
-
"Foundation": "Test basic recall and simple application. Clear, direct language.
|
| 182 |
-
"Core": "Test understanding and application. Require structured explanations.
|
| 183 |
-
"Extended": "Test analysis, evaluation, and synthesis. Multi-step reasoning, novel contexts.
|
| 184 |
}
|
| 185 |
|
| 186 |
prompt = f"""You are an expert IGCSE {subject} examiner for Cambridge International Education.
|
|
@@ -188,33 +180,32 @@ def generate_question(subject, topic, q_type, difficulty, num_marks):
|
|
| 188 |
Create ONE authentic IGCSE {subject} exam question with these specifications:
|
| 189 |
- Topic: {topic}
|
| 190 |
- Question Type: {q_type}
|
| 191 |
-
- Difficulty: {difficulty}
|
| 192 |
- Total Marks: {num_marks}
|
| 193 |
|
| 194 |
REQUIREMENTS:
|
| 195 |
- Use authentic IGCSE command words (describe, explain, suggest, calculate, evaluate, state, outline, compare, deduce, predict)
|
| 196 |
-
-
|
| 197 |
- Include realistic context, data, or scenarios where appropriate
|
| 198 |
-
- For
|
| 199 |
-
- For data/graph questions
|
| 200 |
-
-
|
| 201 |
|
| 202 |
-
Return ONLY a valid JSON object
|
| 203 |
{{
|
| 204 |
-
"question_text": "
|
| 205 |
"marks": {num_marks},
|
| 206 |
-
"command_word": "
|
| 207 |
"question_type": "{q_type}",
|
| 208 |
"topic": "{topic}",
|
| 209 |
"difficulty": "{difficulty}",
|
| 210 |
"mark_scheme": [
|
| 211 |
"Point 1 (1 mark): exact wording of acceptable answer",
|
| 212 |
-
"Point 2 (1 mark): ..."
|
| 213 |
-
"... one string per mark point"
|
| 214 |
],
|
| 215 |
-
"model_answer": "A complete
|
| 216 |
-
"examiner_notes": "What the examiner
|
| 217 |
-
"key_concepts_tested": ["concept1", "concept2"
|
| 218 |
}}"""
|
| 219 |
|
| 220 |
resp, source = ask_ai(prompt, temperature=0.4)
|
|
@@ -223,49 +214,41 @@ Return ONLY a valid JSON object (no markdown, no code fences):
|
|
| 223 |
clean = resp.replace("```json", "").replace("```", "").strip()
|
| 224 |
data = json.loads(clean)
|
| 225 |
|
| 226 |
-
q_display =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
|
| 228 |
-
-
|
| 229 |
-
|
| 230 |
-
{data['question_text']}"""
|
| 231 |
-
|
| 232 |
-
mark_scheme_text = "\n".join([f"• {pt}" for pt in data.get("mark_scheme", [])])
|
| 233 |
-
examiner_notes = data.get("examiner_notes", "")
|
| 234 |
key_concepts = ", ".join(data.get("key_concepts_tested", []))
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
{
|
| 239 |
-
|
| 240 |
-
---
|
| 241 |
-
|
| 242 |
-
**Examiner Notes:**
|
| 243 |
-
{examiner_notes}
|
| 244 |
-
|
| 245 |
-
**Key Concepts Tested:** {key_concepts}"""
|
| 246 |
|
| 247 |
model_ans = data.get("model_answer", "")
|
| 248 |
-
|
| 249 |
-
source_tag = f"\n\n_AI Source: {source.title()}_" if source != "gemini" else ""
|
| 250 |
|
| 251 |
return q_display, scheme_display, model_ans, source_tag
|
| 252 |
|
| 253 |
-
except Exception
|
| 254 |
-
return f"
|
| 255 |
|
| 256 |
|
| 257 |
-
# ----------
|
| 258 |
-
def mark_answer(question_display, scheme_display, student_answer, subject,
|
| 259 |
if not student_answer.strip():
|
| 260 |
-
return "
|
| 261 |
if not question_display or "select a topic" in question_display.lower():
|
| 262 |
-
return "
|
| 263 |
|
| 264 |
-
model_section = f"\n\nModel answer for reference:\n{
|
| 265 |
|
| 266 |
prompt = f"""You are a highly experienced IGCSE {subject} examiner providing DETAILED FORMATIVE FEEDBACK.
|
| 267 |
|
| 268 |
-
QUESTION
|
| 269 |
{question_display}
|
| 270 |
|
| 271 |
{scheme_display}
|
|
@@ -276,53 +259,46 @@ STUDENT'S ANSWER:
|
|
| 276 |
|
| 277 |
---
|
| 278 |
|
| 279 |
-
|
| 280 |
-
|
| 281 |
1. Award marks explicitly against the mark scheme
|
| 282 |
-
2. Identify EVERY error
|
| 283 |
3. Explain WHY each error is wrong scientifically
|
| 284 |
4. Identify gaps where expected points are missing
|
| 285 |
-
5. Recognise
|
| 286 |
6. Give specific, actionable improvement advice
|
| 287 |
|
| 288 |
-
Return ONLY a valid JSON object
|
| 289 |
{{
|
| 290 |
-
"marks_awarded":
|
| 291 |
-
"marks_total":
|
| 292 |
-
"percentage":
|
| 293 |
-
"grade_band": "Developing
|
| 294 |
"overall_verdict": "1-2 sentence summary of performance",
|
| 295 |
"mark_by_mark_breakdown": [
|
| 296 |
{{
|
| 297 |
"mark_point": "what the mark scheme required",
|
| 298 |
"awarded": true,
|
| 299 |
-
"student_wrote": "what the student actually wrote
|
| 300 |
"verdict": "Correct / Partially correct / Incorrect / Missing",
|
| 301 |
-
"explanation": "Why this earned/lost the mark
|
| 302 |
}}
|
| 303 |
],
|
| 304 |
"errors_in_detail": [
|
| 305 |
{{
|
| 306 |
-
"error": "
|
| 307 |
"error_type": "Factual error / Misconception / Vague language / Missing detail / Wrong terminology / Incomplete explanation",
|
| 308 |
-
"why_wrong": "Scientific explanation of why this is wrong
|
| 309 |
-
"correct_version": "How it should have been written
|
| 310 |
-
"how_to_fix": "Specific advice to avoid this
|
| 311 |
}}
|
| 312 |
],
|
| 313 |
-
"missing_points": [
|
| 314 |
-
|
| 315 |
-
],
|
| 316 |
-
"
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
"
|
| 320 |
-
"Specific, numbered action item to improve on this topic"
|
| 321 |
-
],
|
| 322 |
-
"conceptual_gaps": "Description of any underlying conceptual misunderstanding revealed by the answer",
|
| 323 |
-
"terminology_issues": "Any scientific terminology used incorrectly or imprecisely",
|
| 324 |
-
"exam_technique_feedback": "Advice on structure, command word response, use of scientific language, answer length",
|
| 325 |
-
"recommended_focus": "The single most important thing this student should study/practise next"
|
| 326 |
}}"""
|
| 327 |
|
| 328 |
resp, source = ask_ai(prompt, temperature=0.2)
|
|
@@ -334,134 +310,167 @@ Return ONLY a valid JSON object (no markdown, no code fences):
|
|
| 334 |
marks_awarded = fb.get("marks_awarded", 0)
|
| 335 |
marks_total = fb.get("marks_total", 1)
|
| 336 |
pct = fb.get("percentage", round(marks_awarded / marks_total * 100))
|
| 337 |
-
band = fb.get("grade_band", "
|
| 338 |
|
| 339 |
-
# Score bar
|
| 340 |
filled = int(pct / 10)
|
| 341 |
bar = "█" * filled + "░" * (10 - filled)
|
| 342 |
score_color = "🔴" if pct < 40 else "🟡" if pct < 70 else "🟢"
|
| 343 |
|
| 344 |
-
|
| 345 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
|
| 347 |
-
_{fb.get('overall_verdict', '')}_
|
| 348 |
-
|
| 349 |
-
---
|
| 350 |
-
|
| 351 |
-
## 📋 Mark-by-Mark Breakdown
|
| 352 |
-
|
| 353 |
-
"""
|
| 354 |
for i, mp in enumerate(fb.get("mark_by_mark_breakdown", []), 1):
|
| 355 |
icon = "✅" if mp.get("awarded") else "❌"
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
- **Required:** {mp.get('mark_point', '')}
|
| 359 |
-
- **Student wrote:** _{mp.get('student_wrote', '
|
| 360 |
-
- **Examiner:** {mp.get('explanation', '')}
|
| 361 |
-
|
| 362 |
-
"""
|
| 363 |
|
| 364 |
if fb.get("errors_in_detail"):
|
| 365 |
-
|
| 366 |
for err in fb["errors_in_detail"]:
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
**Why it's wrong:** {err.get('why_wrong', '')}
|
| 371 |
-
|
| 372 |
-
**
|
| 373 |
-
|
| 374 |
-
**How to fix it:** {err.get('how_to_fix', '')}
|
| 375 |
-
|
| 376 |
-
"""
|
| 377 |
|
| 378 |
if fb.get("missing_points"):
|
| 379 |
-
|
| 380 |
for mp in fb["missing_points"]:
|
| 381 |
-
|
| 382 |
|
| 383 |
if fb.get("strengths"):
|
| 384 |
-
|
| 385 |
for s in fb["strengths"]:
|
| 386 |
-
|
| 387 |
|
| 388 |
-
|
| 389 |
for i, step in enumerate(fb.get("improvement_plan", []), 1):
|
| 390 |
-
|
| 391 |
|
| 392 |
if fb.get("conceptual_gaps"):
|
| 393 |
-
|
| 394 |
|
| 395 |
if fb.get("terminology_issues"):
|
| 396 |
-
|
| 397 |
|
| 398 |
-
|
| 399 |
-
|
| 400 |
|
| 401 |
if source != "gemini":
|
| 402 |
-
|
| 403 |
|
| 404 |
-
return
|
| 405 |
|
| 406 |
-
except Exception
|
| 407 |
-
return f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 409 |
|
| 410 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
quiz_state = {}
|
| 412 |
|
|
|
|
| 413 |
def start_quiz(subject, topic, difficulty):
|
| 414 |
if not topic:
|
| 415 |
-
return "
|
| 416 |
|
| 417 |
quiz_state.clear()
|
| 418 |
-
quiz_state
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
selected_types = [q_types[i % len(q_types)] for i in range(5)]
|
| 429 |
-
quiz_state["q_types"] = selected_types
|
| 430 |
-
|
| 431 |
-
# Generate first question
|
| 432 |
-
q, scheme, model_ans, _ = generate_question(subject, topic, selected_types[0], difficulty, 4)
|
| 433 |
quiz_state["questions"].append(q)
|
| 434 |
quiz_state["schemes"].append(scheme)
|
| 435 |
-
quiz_state["model_answers"]
|
| 436 |
|
| 437 |
return (
|
| 438 |
-
f"**Quiz started! Question 1 of 5**
|
| 439 |
gr.update(visible=True),
|
| 440 |
gr.update(visible=False),
|
| 441 |
-
q,
|
| 442 |
-
"1 / 5",
|
| 443 |
-
""
|
| 444 |
)
|
| 445 |
|
|
|
|
| 446 |
def quiz_next(student_answer):
|
| 447 |
idx = quiz_state.get("current", 0)
|
| 448 |
if not student_answer.strip():
|
| 449 |
-
return "
|
| 450 |
|
| 451 |
quiz_state["answers"].append(student_answer)
|
| 452 |
quiz_state["current"] = idx + 1
|
| 453 |
|
| 454 |
if quiz_state["current"] >= quiz_state["total"]:
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
q, scheme, model_ans, _ = generate_question(subject, topic, q_type, difficulty, 4)
|
| 465 |
quiz_state["questions"].append(q)
|
| 466 |
quiz_state["schemes"].append(scheme)
|
| 467 |
quiz_state["model_answers"].append(model_ans)
|
|
@@ -471,87 +480,32 @@ def quiz_next(student_answer):
|
|
| 471 |
|
| 472 |
def get_quiz_results():
|
| 473 |
if not quiz_state or not quiz_state.get("answers"):
|
| 474 |
-
return "
|
| 475 |
|
| 476 |
subject = quiz_state["subject"]
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
for i, (q, scheme, ans) in enumerate(zip(
|
| 482 |
-
quiz_state["questions"], quiz_state["schemes"], quiz_state["answers"]
|
| 483 |
), 1):
|
| 484 |
-
feedback = mark_answer(q, scheme, ans, subject,
|
| 485 |
result_text += f"## Question {i}\n\n{q}\n\n**Your Answer:** {ans}\n\n{feedback}\n\n---\n\n"
|
| 486 |
-
|
| 487 |
return result_text
|
| 488 |
|
| 489 |
|
| 490 |
-
# ---------- 8.
|
| 491 |
-
def generate_drill(subject, topic):
|
| 492 |
-
if not topic:
|
| 493 |
-
return "⚠ Select a topic!"
|
| 494 |
-
|
| 495 |
-
prompt = f"""Generate 10 rapid-fire IGCSE {subject} questions on the topic: "{topic}"
|
| 496 |
-
|
| 497 |
-
These should be a MIX of:
|
| 498 |
-
- 3 × simple recall (1 mark each)
|
| 499 |
-
- 4 × application/explanation (2 marks each)
|
| 500 |
-
- 3 × analysis/evaluation (3 marks each)
|
| 501 |
-
|
| 502 |
-
Total: 17 marks
|
| 503 |
-
|
| 504 |
-
Return ONLY a valid JSON array (no markdown):
|
| 505 |
-
[
|
| 506 |
-
{{
|
| 507 |
-
"q_num": 1,
|
| 508 |
-
"question": "question text",
|
| 509 |
-
"marks": 1,
|
| 510 |
-
"type": "Recall/Application/Analysis",
|
| 511 |
-
"answer": "concise model answer",
|
| 512 |
-
"key_point": "the single most important thing to include"
|
| 513 |
-
}},
|
| 514 |
-
...10 items total
|
| 515 |
-
]"""
|
| 516 |
-
|
| 517 |
-
resp, source = ask_ai(prompt, temperature=0.4)
|
| 518 |
-
|
| 519 |
-
try:
|
| 520 |
-
clean = resp.replace("```json", "").replace("```", "").strip()
|
| 521 |
-
qs = json.loads(clean)
|
| 522 |
-
|
| 523 |
-
output = f"## 🔥 10-Question Drill: {topic}\n\n"
|
| 524 |
-
output += f"**Total: 17 marks | Subject: {subject}**\n\n---\n\n"
|
| 525 |
-
|
| 526 |
-
for q in qs:
|
| 527 |
-
mark_label = f"[{q['marks']} mark{'s' if q['marks'] > 1 else ''}]"
|
| 528 |
-
output += f"**Q{q['q_num']}** {mark_label} _{q['type']}_\n{q['question']}\n\n"
|
| 529 |
-
|
| 530 |
-
output += "---\n\n### ✅ Model Answers\n\n"
|
| 531 |
-
for q in qs:
|
| 532 |
-
output += f"**Q{q['q_num']}** ({q['marks']} marks): {q['answer']}\n_Key point: {q['key_point']}_\n\n"
|
| 533 |
-
|
| 534 |
-
if source != "gemini":
|
| 535 |
-
output += f"\n_Generated by {source.title()}_"
|
| 536 |
-
|
| 537 |
-
return output
|
| 538 |
-
|
| 539 |
-
except:
|
| 540 |
-
return resp
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
# ---------- 9. Update Topics Dropdown ----------
|
| 544 |
def update_topics(subject):
|
| 545 |
-
topics = {"Chemistry":
|
| 546 |
return gr.Dropdown(choices=topics[subject], value=None)
|
| 547 |
|
|
|
|
| 548 |
def update_q_types(subject):
|
| 549 |
return gr.Dropdown(choices=QUESTION_TYPES[subject], value=QUESTION_TYPES[subject][0])
|
| 550 |
|
| 551 |
|
| 552 |
-
# ----------
|
| 553 |
CUSTOM_CSS = """
|
| 554 |
-
@import url('https://fonts.googleapis.com/css2?family=Syne:wght@400;600;700;800&family=DM+Mono:
|
| 555 |
|
| 556 |
:root {
|
| 557 |
--bg: #0a0e1a;
|
|
@@ -560,158 +514,104 @@ CUSTOM_CSS = """
|
|
| 560 |
--border: #1e3a5f;
|
| 561 |
--accent: #00d4aa;
|
| 562 |
--accent2: #3b82f6;
|
| 563 |
-
--accent3: #f59e0b;
|
| 564 |
-
--chem: #10b981;
|
| 565 |
--bio: #8b5cf6;
|
| 566 |
--text: #e2e8f0;
|
| 567 |
--muted: #64748b;
|
| 568 |
-
--danger: #ef4444;
|
| 569 |
}
|
| 570 |
|
| 571 |
-
* { box-sizing: border-box; }
|
| 572 |
-
|
| 573 |
body, .gradio-container {
|
| 574 |
background: var(--bg) !important;
|
| 575 |
-
font-family: 'DM Sans', sans-serif !important;
|
| 576 |
color: var(--text) !important;
|
| 577 |
}
|
| 578 |
|
| 579 |
-
/* Header */
|
| 580 |
-
.gr-markdown h1 {
|
| 581 |
-
font-family: 'Syne', sans-serif !important;
|
| 582 |
-
font-size: 2.4rem !important;
|
| 583 |
-
font-weight: 800 !important;
|
| 584 |
-
background: linear-gradient(135deg, var(--accent), var(--accent2), var(--bio)) !important;
|
| 585 |
-
-webkit-background-clip: text !important;
|
| 586 |
-
-webkit-text-fill-color: transparent !important;
|
| 587 |
-
background-clip: text !important;
|
| 588 |
-
letter-spacing: -0.02em !important;
|
| 589 |
-
}
|
| 590 |
-
|
| 591 |
-
/* Tabs */
|
| 592 |
.tab-nav button {
|
| 593 |
font-family: 'Syne', sans-serif !important;
|
| 594 |
font-weight: 600 !important;
|
| 595 |
-
font-size: 0.
|
| 596 |
-
letter-spacing: 0.
|
| 597 |
text-transform: uppercase !important;
|
| 598 |
color: var(--muted) !important;
|
| 599 |
background: transparent !important;
|
| 600 |
border: none !important;
|
| 601 |
border-bottom: 2px solid transparent !important;
|
| 602 |
-
padding: 10px
|
| 603 |
transition: all 0.2s !important;
|
| 604 |
}
|
| 605 |
-
|
| 606 |
.tab-nav button.selected {
|
| 607 |
color: var(--accent) !important;
|
| 608 |
border-bottom-color: var(--accent) !important;
|
| 609 |
}
|
| 610 |
|
| 611 |
-
|
| 612 |
-
input, textarea, select, .gr-input, .gr-textarea {
|
| 613 |
background: var(--surface) !important;
|
| 614 |
border: 1px solid var(--border) !important;
|
| 615 |
color: var(--text) !important;
|
| 616 |
border-radius: 8px !important;
|
| 617 |
-
font-family: 'DM Sans', sans-serif !important;
|
| 618 |
}
|
| 619 |
-
|
| 620 |
textarea:focus, input:focus {
|
| 621 |
border-color: var(--accent) !important;
|
|
|
|
| 622 |
outline: none !important;
|
| 623 |
-
box-shadow: 0 0 0 3px rgba(0, 212, 170, 0.1) !important;
|
| 624 |
}
|
| 625 |
|
| 626 |
-
/* Buttons */
|
| 627 |
.gr-button {
|
| 628 |
font-family: 'Syne', sans-serif !important;
|
| 629 |
font-weight: 600 !important;
|
| 630 |
-
letter-spacing: 0.
|
| 631 |
border-radius: 8px !important;
|
| 632 |
transition: all 0.2s !important;
|
| 633 |
}
|
| 634 |
-
|
| 635 |
.gr-button-primary {
|
| 636 |
background: linear-gradient(135deg, var(--accent), var(--accent2)) !important;
|
| 637 |
border: none !important;
|
| 638 |
color: #fff !important;
|
| 639 |
}
|
| 640 |
-
|
| 641 |
.gr-button-primary:hover {
|
| 642 |
transform: translateY(-1px) !important;
|
| 643 |
-
box-shadow: 0 4px 20px rgba(0,
|
| 644 |
}
|
| 645 |
-
|
| 646 |
.gr-button-secondary {
|
| 647 |
background: var(--surface2) !important;
|
| 648 |
border: 1px solid var(--border) !important;
|
| 649 |
color: var(--text) !important;
|
| 650 |
}
|
| 651 |
|
| 652 |
-
/* Markdown output */
|
| 653 |
.gr-markdown {
|
| 654 |
background: var(--surface) !important;
|
| 655 |
border: 1px solid var(--border) !important;
|
| 656 |
border-radius: 10px !important;
|
| 657 |
padding: 20px !important;
|
| 658 |
-
font-family: 'DM Sans', sans-serif !important;
|
| 659 |
line-height: 1.7 !important;
|
| 660 |
}
|
| 661 |
-
|
| 662 |
.gr-markdown code {
|
| 663 |
font-family: 'DM Mono', monospace !important;
|
| 664 |
-
background: rgba(0,212,170,0.
|
| 665 |
color: var(--accent) !important;
|
| 666 |
padding: 2px 6px !important;
|
| 667 |
border-radius: 4px !important;
|
| 668 |
}
|
| 669 |
-
|
| 670 |
.gr-markdown blockquote {
|
| 671 |
border-left: 3px solid var(--accent2) !important;
|
| 672 |
padding-left: 12px !important;
|
| 673 |
color: var(--muted) !important;
|
| 674 |
font-style: italic !important;
|
| 675 |
}
|
| 676 |
-
|
| 677 |
.gr-markdown h2 {
|
| 678 |
font-family: 'Syne', sans-serif !important;
|
| 679 |
font-weight: 700 !important;
|
| 680 |
color: var(--accent) !important;
|
| 681 |
-
|
| 682 |
-
|
| 683 |
-
letter-spacing: 0.02em !important;
|
| 684 |
}
|
| 685 |
-
|
| 686 |
.gr-markdown h3 {
|
| 687 |
font-family: 'Syne', sans-serif !important;
|
| 688 |
font-weight: 600 !important;
|
| 689 |
color: var(--accent2) !important;
|
| 690 |
}
|
| 691 |
-
|
| 692 |
-
/* Radio & Dropdown */
|
| 693 |
-
.gr-radio, .gr-dropdown {
|
| 694 |
-
background: var(--surface) !important;
|
| 695 |
-
border-radius: 8px !important;
|
| 696 |
-
}
|
| 697 |
-
|
| 698 |
-
.gr-radio label {
|
| 699 |
-
font-family: 'DM Sans', sans-serif !important;
|
| 700 |
-
color: var(--text) !important;
|
| 701 |
-
}
|
| 702 |
-
|
| 703 |
-
/* Status bar */
|
| 704 |
-
.status-bar {
|
| 705 |
-
display: flex;
|
| 706 |
-
gap: 12px;
|
| 707 |
-
padding: 8px 0;
|
| 708 |
-
font-family: 'DM Mono', monospace;
|
| 709 |
-
font-size: 0.75rem;
|
| 710 |
-
}
|
| 711 |
"""
|
| 712 |
|
| 713 |
|
| 714 |
-
# ----------
|
| 715 |
with gr.Blocks(
|
| 716 |
theme=gr.themes.Base(
|
| 717 |
primary_hue="teal",
|
|
@@ -720,215 +620,194 @@ with gr.Blocks(
|
|
| 720 |
font=gr.themes.GoogleFont("DM Sans"),
|
| 721 |
),
|
| 722 |
css=CUSTOM_CSS,
|
| 723 |
-
title="IGCSE Science
|
| 724 |
) as app:
|
| 725 |
|
| 726 |
gr.Markdown("""
|
| 727 |
-
#
|
| 728 |
### Chemistry · Biology · AI-Powered Diagnostic Feedback
|
| 729 |
""")
|
| 730 |
|
| 731 |
with gr.Tabs():
|
| 732 |
|
| 733 |
-
#
|
| 734 |
-
|
| 735 |
-
|
| 736 |
-
with gr.Tab("🎯 Practice Questions"):
|
| 737 |
-
gr.Markdown("""### Generate exam-style questions on any topic, then get detailed mark-by-mark feedback""")
|
| 738 |
|
| 739 |
with gr.Row():
|
| 740 |
with gr.Column(scale=1):
|
| 741 |
pg_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 742 |
-
pg_topic
|
| 743 |
-
pg_qtype
|
| 744 |
-
|
| 745 |
-
|
| 746 |
-
|
| 747 |
-
|
| 748 |
-
|
| 749 |
-
|
| 750 |
|
| 751 |
with gr.Column(scale=2):
|
| 752 |
-
pg_question = gr.Markdown(
|
| 753 |
-
pg_progress = gr.Textbox(label="", value="", visible=False)
|
| 754 |
|
| 755 |
-
pg_subject.change(update_topics,
|
| 756 |
pg_subject.change(update_q_types, pg_subject, pg_qtype)
|
| 757 |
|
| 758 |
-
gr.Markdown("---")
|
| 759 |
-
gr.Markdown("### ✍️ Write Your Answer")
|
| 760 |
pg_answer = gr.Textbox(
|
| 761 |
lines=10,
|
| 762 |
label="Your Answer",
|
| 763 |
-
placeholder=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 764 |
)
|
| 765 |
|
| 766 |
with gr.Row():
|
| 767 |
-
|
| 768 |
-
mark_btn = gr.Button("
|
| 769 |
|
| 770 |
-
pg_feedback = gr.Markdown(
|
| 771 |
|
| 772 |
-
|
| 773 |
-
|
| 774 |
-
pg_model_answer = gr.State("")
|
| 775 |
-
pg_source_tag = gr.State("")
|
| 776 |
|
| 777 |
def on_generate(subject, topic, q_type, difficulty, marks):
|
| 778 |
-
q, scheme, model_ans,
|
| 779 |
-
return q, scheme, model_ans,
|
| 780 |
|
| 781 |
-
|
| 782 |
on_generate,
|
| 783 |
-
[pg_subject, pg_topic, pg_qtype,
|
| 784 |
-
[pg_question, pg_scheme,
|
| 785 |
)
|
| 786 |
|
| 787 |
-
def on_mark(question, scheme, model_ans, student_ans, subject,
|
| 788 |
-
|
| 789 |
-
return mark_answer(question, scheme, student_ans, subject, model_to_show)
|
| 790 |
|
| 791 |
mark_btn.click(
|
| 792 |
on_mark,
|
| 793 |
-
[pg_question, pg_scheme,
|
| 794 |
pg_feedback
|
| 795 |
)
|
| 796 |
|
| 797 |
-
|
| 798 |
-
|
| 799 |
-
pg_scheme_display = gr.Markdown(value="_Generate a question first_")
|
| 800 |
|
| 801 |
-
|
| 802 |
-
|
| 803 |
-
|
| 804 |
-
|
| 805 |
|
| 806 |
|
| 807 |
-
#
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
with gr.Tab("🔥 Topic Drill"):
|
| 811 |
-
gr.Markdown("""### 10 rapid-fire questions on one topic — with model answers
|
| 812 |
-
*Great for revision and checking what you know*""")
|
| 813 |
|
| 814 |
with gr.Row():
|
| 815 |
drill_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 816 |
-
drill_topic
|
| 817 |
|
| 818 |
drill_subject.change(update_topics, drill_subject, drill_topic)
|
| 819 |
|
| 820 |
-
drill_btn
|
| 821 |
-
drill_output = gr.Markdown(
|
| 822 |
-
|
| 823 |
drill_btn.click(generate_drill, [drill_subject, drill_topic], drill_output)
|
| 824 |
|
| 825 |
|
| 826 |
-
#
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
with gr.Tab("⏱️ Timed Mock"):
|
| 830 |
-
gr.Markdown("""### Answer 5 questions in sequence, then get full results
|
| 831 |
-
*Simulate real exam conditions*""")
|
| 832 |
|
| 833 |
with gr.Row():
|
| 834 |
mock_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 835 |
-
mock_topic
|
| 836 |
-
mock_diff
|
| 837 |
|
| 838 |
mock_subject.change(update_topics, mock_subject, mock_topic)
|
| 839 |
|
| 840 |
-
mock_status
|
| 841 |
-
mock_start_btn = gr.Button("
|
| 842 |
-
|
| 843 |
-
with gr.Column(visible=False) as mock_question_section:
|
| 844 |
-
mock_q_display = gr.Markdown(label="Current Question")
|
| 845 |
-
mock_progress = gr.Textbox(label="Progress", value="1 / 5", interactive=False)
|
| 846 |
-
mock_answer = gr.Textbox(lines=8, label="Your Answer",
|
| 847 |
-
placeholder="Write your answer here...")
|
| 848 |
-
mock_next_btn = gr.Button("Next Question →", variant="primary")
|
| 849 |
-
mock_finish_btn = gr.Button("🏁 Finish & Get Results", variant="secondary", visible=False)
|
| 850 |
|
| 851 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 852 |
|
| 853 |
-
|
| 854 |
-
mock_current_scheme = gr.State("")
|
| 855 |
|
| 856 |
def on_start_mock(subject, topic, difficulty):
|
| 857 |
if not topic:
|
| 858 |
-
return
|
| 859 |
-
|
| 860 |
-
gr.update(visible=False),
|
| 861 |
-
"_", "1 / 5", "")
|
| 862 |
-
status, q_section, finish, q, prog, _ = start_quiz(subject, topic, difficulty)
|
| 863 |
return status, gr.update(visible=True), gr.update(visible=False), q, prog, ""
|
| 864 |
|
| 865 |
mock_start_btn.click(
|
| 866 |
on_start_mock,
|
| 867 |
[mock_subject, mock_topic, mock_diff],
|
| 868 |
-
[mock_status,
|
| 869 |
)
|
| 870 |
|
| 871 |
def on_mock_next(answer):
|
| 872 |
q, scheme, prog, done = quiz_next(answer)
|
| 873 |
-
|
| 874 |
-
return
|
| 875 |
-
gr.update(visible=not finish_visible),
|
| 876 |
-
gr.update(visible=finish_visible),
|
| 877 |
-
"")
|
| 878 |
|
| 879 |
mock_next_btn.click(
|
| 880 |
on_mock_next,
|
| 881 |
-
[
|
| 882 |
-
[mock_q_display, mock_progress, mock_next_btn, mock_finish_btn,
|
| 883 |
)
|
| 884 |
-
|
| 885 |
mock_finish_btn.click(get_quiz_results, [], mock_results)
|
| 886 |
|
| 887 |
|
| 888 |
-
# ══════════════════════════════════════════════════════
|
| 889 |
# TAB 4: TOPIC EXPLORER
|
| 890 |
-
|
| 891 |
-
|
| 892 |
-
gr.Markdown("""### Explore all examinable topics
|
| 893 |
-
*Click any topic to generate a focused question set*""")
|
| 894 |
|
| 895 |
with gr.Row():
|
| 896 |
-
|
| 897 |
-
|
| 898 |
-
|
| 899 |
-
|
| 900 |
-
|
| 901 |
-
|
| 902 |
-
|
|
|
|
| 903 |
|
| 904 |
-
gr.Markdown("---")
|
| 905 |
-
gr.Markdown("#### Quick Question — Type any topic:")
|
| 906 |
with gr.Row():
|
| 907 |
-
quick_topic
|
| 908 |
quick_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 909 |
-
quick_diff
|
| 910 |
|
| 911 |
-
quick_btn
|
| 912 |
-
quick_output = gr.Markdown(
|
| 913 |
quick_scheme = gr.State("")
|
| 914 |
-
quick_model
|
| 915 |
|
| 916 |
-
def
|
| 917 |
-
|
| 918 |
-
|
| 919 |
-
q, scheme, model, tag = generate_question(subject, topic, random.choice(qtypes), diff, 5)
|
| 920 |
return q, scheme, model
|
| 921 |
|
| 922 |
-
quick_btn.click(
|
| 923 |
-
|
| 924 |
-
|
|
|
|
|
|
|
| 925 |
|
| 926 |
-
# Footer
|
| 927 |
gr.Markdown("""
|
| 928 |
---
|
| 929 |
-
**AI System:**
|
| 930 |
|
| 931 |
-
|
| 932 |
""")
|
| 933 |
|
| 934 |
|
|
|
|
| 1 |
# --------------------------------------------------------------
|
| 2 |
# IGCSE Science Platform – Question Practice & Deep Marking
|
| 3 |
+
# AI: Gemini 2.5 Pro (Primary) → Cohere Command-R+ (Fallback)
|
| 4 |
+
# Hugging Face Spaces compatible
|
| 5 |
# --------------------------------------------------------------
|
| 6 |
|
|
|
|
| 7 |
import os
|
| 8 |
import json
|
|
|
|
| 9 |
import time
|
| 10 |
+
import random
|
| 11 |
import gradio as gr
|
| 12 |
|
| 13 |
+
# ---------- 1. LAZY AI Initialization ----------
|
| 14 |
+
# Deferred to first use — prevents OOM crash on HF Spaces startup
|
| 15 |
+
|
| 16 |
+
_gemini_model = None
|
| 17 |
+
_cohere_client = None
|
| 18 |
+
_ai_initialized = False
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _init_ai():
|
| 22 |
+
global _gemini_model, _cohere_client, _ai_initialized
|
| 23 |
+
if _ai_initialized:
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
import google.generativeai as genai
|
| 28 |
+
key = os.getenv("GEMINI_API_KEY", "")
|
| 29 |
+
if key:
|
| 30 |
+
genai.configure(api_key=key)
|
| 31 |
+
_gemini_model = genai.GenerativeModel("gemini-2.5-pro")
|
| 32 |
+
print("✅ Gemini 2.5 Pro ready (PRIMARY)")
|
| 33 |
+
else:
|
| 34 |
+
print("⚠ GEMINI_API_KEY not set — skipping Gemini")
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"❌ Gemini init failed: {e}")
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
import cohere
|
| 40 |
+
key = os.getenv("COHERE_API_KEY", "")
|
| 41 |
+
if key:
|
| 42 |
+
_cohere_client = cohere.Client(key)
|
| 43 |
+
print("✅ Cohere Command-R+ ready (FALLBACK)")
|
| 44 |
+
else:
|
| 45 |
+
print("⚠ COHERE_API_KEY not set — skipping Cohere")
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f"❌ Cohere init failed: {e}")
|
| 48 |
+
|
| 49 |
+
_ai_initialized = True
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# ---------- 2. Unified AI Call ----------
|
| 53 |
def ask_ai(prompt, temperature=0.7, max_retries=2):
|
| 54 |
+
_init_ai()
|
| 55 |
last_error = None
|
| 56 |
|
| 57 |
+
# Primary: Gemini 2.5 Pro
|
| 58 |
+
if _gemini_model:
|
| 59 |
for attempt in range(max_retries):
|
| 60 |
try:
|
| 61 |
+
import google.generativeai as genai
|
| 62 |
+
resp = _gemini_model.generate_content(
|
| 63 |
prompt,
|
| 64 |
generation_config=genai.types.GenerationConfig(temperature=temperature)
|
| 65 |
)
|
| 66 |
return resp.text, "gemini"
|
| 67 |
except Exception as e:
|
| 68 |
last_error = e
|
| 69 |
+
print(f"⚠ Gemini attempt {attempt + 1} failed: {e}")
|
| 70 |
if attempt < max_retries - 1:
|
| 71 |
time.sleep(1)
|
| 72 |
|
| 73 |
+
# Fallback: Cohere Command-R+
|
| 74 |
+
if _cohere_client:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
for attempt in range(max_retries):
|
| 76 |
try:
|
| 77 |
+
resp = _cohere_client.chat(
|
| 78 |
+
model="command-r-plus-08-2024",
|
| 79 |
+
message=prompt,
|
| 80 |
temperature=temperature
|
| 81 |
)
|
| 82 |
+
return resp.text, "cohere"
|
| 83 |
except Exception as e:
|
| 84 |
last_error = e
|
| 85 |
+
print(f"⚠ Cohere attempt {attempt + 1} failed: {e}")
|
| 86 |
if attempt < max_retries - 1:
|
| 87 |
time.sleep(1)
|
| 88 |
|
| 89 |
+
return (
|
| 90 |
+
"❌ No AI service available. Add GEMINI_API_KEY or COHERE_API_KEY in Space Secrets.",
|
| 91 |
+
"error"
|
| 92 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
|
| 95 |
# ---------- 3. Topic Lists ----------
|
| 96 |
+
CHEMISTRY_TOPICS = [
|
| 97 |
"States of Matter", "Atoms, Elements & Compounds", "Mixtures & Separation Techniques",
|
| 98 |
"Atomic Structure", "Electronic Configuration", "Periodic Table",
|
| 99 |
"Chemical Bonding: Ionic", "Chemical Bonding: Covalent", "Chemical Bonding: Metallic",
|
|
|
|
| 116 |
"Laboratory Safety", "Experimental Techniques", "Analysis & Evaluation"
|
| 117 |
]
|
| 118 |
|
| 119 |
+
BIOLOGY_TOPICS = [
|
| 120 |
"Cell Structure & Function", "Specialised Cells", "Microscopy",
|
| 121 |
"Cell Division: Mitosis", "Cell Division: Meiosis", "Stem Cells",
|
| 122 |
"Diffusion", "Osmosis", "Active Transport",
|
|
|
|
| 146 |
"Biological Techniques", "Field Studies"
|
| 147 |
]
|
| 148 |
|
|
|
|
| 149 |
QUESTION_TYPES = {
|
| 150 |
"Chemistry": [
|
| 151 |
+
"Multiple Choice", "Short Answer - Describe", "Short Answer - Explain",
|
| 152 |
+
"Calculation - Moles / Mass", "Calculation - Concentration",
|
| 153 |
+
"Calculation - Percentage Yield", "Extended Response",
|
| 154 |
"Data Interpretation", "Practical / Experiment", "Equation Writing",
|
| 155 |
"Evaluation / Suggest Improvements"
|
| 156 |
],
|
| 157 |
"Biology": [
|
| 158 |
+
"Multiple Choice", "Short Answer - Describe", "Short Answer - Explain",
|
| 159 |
"Extended Response", "Data Interpretation", "Graph / Table Analysis",
|
| 160 |
"Practical / Experiment", "Evaluation / Suggest Improvements",
|
| 161 |
"Applying Knowledge to Novel Scenarios", "Genetic Diagrams",
|
|
|
|
| 163 |
]
|
| 164 |
}
|
| 165 |
|
| 166 |
+
|
| 167 |
+
# ---------- 4. Question Generation ----------
|
| 168 |
def generate_question(subject, topic, q_type, difficulty, num_marks):
|
| 169 |
if not topic:
|
| 170 |
+
return "Please select a topic first.", "", "", ""
|
| 171 |
|
| 172 |
difficulty_desc = {
|
| 173 |
+
"Foundation": "Test basic recall and simple application. Clear, direct language.",
|
| 174 |
+
"Core": "Test understanding and application. Require structured explanations.",
|
| 175 |
+
"Extended": "Test analysis, evaluation, and synthesis. Multi-step reasoning, novel contexts."
|
| 176 |
}
|
| 177 |
|
| 178 |
prompt = f"""You are an expert IGCSE {subject} examiner for Cambridge International Education.
|
|
|
|
| 180 |
Create ONE authentic IGCSE {subject} exam question with these specifications:
|
| 181 |
- Topic: {topic}
|
| 182 |
- Question Type: {q_type}
|
| 183 |
+
- Difficulty: {difficulty} - {difficulty_desc[difficulty]}
|
| 184 |
- Total Marks: {num_marks}
|
| 185 |
|
| 186 |
REQUIREMENTS:
|
| 187 |
- Use authentic IGCSE command words (describe, explain, suggest, calculate, evaluate, state, outline, compare, deduce, predict)
|
| 188 |
+
- Test genuine UNDERSTANDING, not just memorisation
|
| 189 |
- Include realistic context, data, or scenarios where appropriate
|
| 190 |
+
- For calculations: provide all necessary data (molar masses, formulae, values)
|
| 191 |
+
- For data/graph questions: describe a realistic dataset in text form
|
| 192 |
+
- Every mark should test something specific
|
| 193 |
|
| 194 |
+
Return ONLY a valid JSON object, no markdown, no code fences:
|
| 195 |
{{
|
| 196 |
+
"question_text": "Full question with context. Use \\n for new lines.",
|
| 197 |
"marks": {num_marks},
|
| 198 |
+
"command_word": "primary command word used",
|
| 199 |
"question_type": "{q_type}",
|
| 200 |
"topic": "{topic}",
|
| 201 |
"difficulty": "{difficulty}",
|
| 202 |
"mark_scheme": [
|
| 203 |
"Point 1 (1 mark): exact wording of acceptable answer",
|
| 204 |
+
"Point 2 (1 mark): ..."
|
|
|
|
| 205 |
],
|
| 206 |
+
"model_answer": "A complete full-mark answer written as a student would write it",
|
| 207 |
+
"examiner_notes": "What the examiner looks for; common pitfalls; what separates strong from weak answers",
|
| 208 |
+
"key_concepts_tested": ["concept1", "concept2"]
|
| 209 |
}}"""
|
| 210 |
|
| 211 |
resp, source = ask_ai(prompt, temperature=0.4)
|
|
|
|
| 214 |
clean = resp.replace("```json", "").replace("```", "").strip()
|
| 215 |
data = json.loads(clean)
|
| 216 |
|
| 217 |
+
q_display = (
|
| 218 |
+
f"**[{data['marks']} marks] | {data['question_type']} | "
|
| 219 |
+
f"{data['difficulty']} | Command word: {data['command_word'].upper()}**\n\n---\n\n"
|
| 220 |
+
f"{data['question_text']}"
|
| 221 |
+
)
|
| 222 |
|
| 223 |
+
mark_scheme_text = "\n".join([f"- {pt}" for pt in data.get("mark_scheme", [])])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
key_concepts = ", ".join(data.get("key_concepts_tested", []))
|
| 225 |
+
scheme_display = (
|
| 226 |
+
f"**Mark Scheme - {data['marks']} marks**\n\n{mark_scheme_text}\n\n---\n\n"
|
| 227 |
+
f"**Examiner Notes:**\n{data.get('examiner_notes', '')}\n\n"
|
| 228 |
+
f"**Key Concepts Tested:** {key_concepts}"
|
| 229 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
|
| 231 |
model_ans = data.get("model_answer", "")
|
| 232 |
+
source_tag = f"\n\n_Generated by {source.title()}_" if source != "gemini" else ""
|
|
|
|
| 233 |
|
| 234 |
return q_display, scheme_display, model_ans, source_tag
|
| 235 |
|
| 236 |
+
except Exception:
|
| 237 |
+
return f"Could not parse question. Raw response:\n\n{resp}", "", "", ""
|
| 238 |
|
| 239 |
|
| 240 |
+
# ---------- 5. Deep Marking ----------
|
| 241 |
+
def mark_answer(question_display, scheme_display, student_answer, subject, model_ans):
|
| 242 |
if not student_answer.strip():
|
| 243 |
+
return "Please write your answer before submitting for marking."
|
| 244 |
if not question_display or "select a topic" in question_display.lower():
|
| 245 |
+
return "Please generate a question first."
|
| 246 |
|
| 247 |
+
model_section = f"\n\nModel answer for reference:\n{model_ans}" if model_ans else ""
|
| 248 |
|
| 249 |
prompt = f"""You are a highly experienced IGCSE {subject} examiner providing DETAILED FORMATIVE FEEDBACK.
|
| 250 |
|
| 251 |
+
QUESTION AND MARK SCHEME:
|
| 252 |
{question_display}
|
| 253 |
|
| 254 |
{scheme_display}
|
|
|
|
| 259 |
|
| 260 |
---
|
| 261 |
|
| 262 |
+
Mark this answer with MAXIMUM DIAGNOSTIC DETAIL. Go sentence by sentence and:
|
|
|
|
| 263 |
1. Award marks explicitly against the mark scheme
|
| 264 |
+
2. Identify EVERY error - factual, conceptual, terminological, or structural
|
| 265 |
3. Explain WHY each error is wrong scientifically
|
| 266 |
4. Identify gaps where expected points are missing
|
| 267 |
+
5. Recognise genuine understanding and credit it
|
| 268 |
6. Give specific, actionable improvement advice
|
| 269 |
|
| 270 |
+
Return ONLY a valid JSON object, no markdown, no code fences:
|
| 271 |
{{
|
| 272 |
+
"marks_awarded": 0,
|
| 273 |
+
"marks_total": 5,
|
| 274 |
+
"percentage": 0,
|
| 275 |
+
"grade_band": "Developing",
|
| 276 |
"overall_verdict": "1-2 sentence summary of performance",
|
| 277 |
"mark_by_mark_breakdown": [
|
| 278 |
{{
|
| 279 |
"mark_point": "what the mark scheme required",
|
| 280 |
"awarded": true,
|
| 281 |
+
"student_wrote": "what the student actually wrote",
|
| 282 |
"verdict": "Correct / Partially correct / Incorrect / Missing",
|
| 283 |
+
"explanation": "Why this earned/lost the mark"
|
| 284 |
}}
|
| 285 |
],
|
| 286 |
"errors_in_detail": [
|
| 287 |
{{
|
| 288 |
+
"error": "Quote or paraphrase of the student error",
|
| 289 |
"error_type": "Factual error / Misconception / Vague language / Missing detail / Wrong terminology / Incomplete explanation",
|
| 290 |
+
"why_wrong": "Scientific explanation of why this is wrong",
|
| 291 |
+
"correct_version": "How it should have been written",
|
| 292 |
+
"how_to_fix": "Specific advice to avoid this in future"
|
| 293 |
}}
|
| 294 |
],
|
| 295 |
+
"missing_points": ["Key point the student should have included"],
|
| 296 |
+
"strengths": ["Specific things the student did well"],
|
| 297 |
+
"improvement_plan": ["Action item 1", "Action item 2"],
|
| 298 |
+
"conceptual_gaps": "Any underlying conceptual misunderstanding revealed",
|
| 299 |
+
"terminology_issues": "Scientific terminology used incorrectly or imprecisely",
|
| 300 |
+
"exam_technique_feedback": "Advice on structure, command word response, scientific language, answer length",
|
| 301 |
+
"recommended_focus": "The single most important thing to study next"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 302 |
}}"""
|
| 303 |
|
| 304 |
resp, source = ask_ai(prompt, temperature=0.2)
|
|
|
|
| 310 |
marks_awarded = fb.get("marks_awarded", 0)
|
| 311 |
marks_total = fb.get("marks_total", 1)
|
| 312 |
pct = fb.get("percentage", round(marks_awarded / marks_total * 100))
|
| 313 |
+
band = fb.get("grade_band", "")
|
| 314 |
|
|
|
|
| 315 |
filled = int(pct / 10)
|
| 316 |
bar = "█" * filled + "░" * (10 - filled)
|
| 317 |
score_color = "🔴" if pct < 40 else "🟡" if pct < 70 else "🟢"
|
| 318 |
|
| 319 |
+
out = (
|
| 320 |
+
f"{score_color} **{marks_awarded}/{marks_total} marks ({pct}%) - {band}**\n"
|
| 321 |
+
f"`{bar}`\n\n"
|
| 322 |
+
f"_{fb.get('overall_verdict', '')}_\n\n---\n\n"
|
| 323 |
+
f"## Mark-by-Mark Breakdown\n\n"
|
| 324 |
+
)
|
| 325 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 326 |
for i, mp in enumerate(fb.get("mark_by_mark_breakdown", []), 1):
|
| 327 |
icon = "✅" if mp.get("awarded") else "❌"
|
| 328 |
+
out += (
|
| 329 |
+
f"**Mark {i}** {icon} _{mp.get('verdict', '')}_\n"
|
| 330 |
+
f"- **Required:** {mp.get('mark_point', '')}\n"
|
| 331 |
+
f"- **Student wrote:** _{mp.get('student_wrote', '')}_\n"
|
| 332 |
+
f"- **Examiner:** {mp.get('explanation', '')}\n\n"
|
| 333 |
+
)
|
|
|
|
| 334 |
|
| 335 |
if fb.get("errors_in_detail"):
|
| 336 |
+
out += "---\n\n## Errors In Detail\n\n"
|
| 337 |
for err in fb["errors_in_detail"]:
|
| 338 |
+
out += (
|
| 339 |
+
f"**Error type:** `{err.get('error_type', '')}`\n"
|
| 340 |
+
f"> _{err.get('error', '')}_\n\n"
|
| 341 |
+
f"**Why it's wrong:** {err.get('why_wrong', '')}\n\n"
|
| 342 |
+
f"**Correct version:** {err.get('correct_version', '')}\n\n"
|
| 343 |
+
f"**How to fix it:** {err.get('how_to_fix', '')}\n\n"
|
| 344 |
+
)
|
|
|
|
|
|
|
|
|
|
| 345 |
|
| 346 |
if fb.get("missing_points"):
|
| 347 |
+
out += "---\n\n## Missing Points\n\n"
|
| 348 |
for mp in fb["missing_points"]:
|
| 349 |
+
out += f"- {mp}\n"
|
| 350 |
|
| 351 |
if fb.get("strengths"):
|
| 352 |
+
out += "\n---\n\n## What You Did Well\n\n"
|
| 353 |
for s in fb["strengths"]:
|
| 354 |
+
out += f"- {s}\n"
|
| 355 |
|
| 356 |
+
out += "\n---\n\n## Improvement Plan\n\n"
|
| 357 |
for i, step in enumerate(fb.get("improvement_plan", []), 1):
|
| 358 |
+
out += f"{i}. {step}\n"
|
| 359 |
|
| 360 |
if fb.get("conceptual_gaps"):
|
| 361 |
+
out += f"\n---\n\n## Conceptual Gap Identified\n\n{fb['conceptual_gaps']}\n"
|
| 362 |
|
| 363 |
if fb.get("terminology_issues"):
|
| 364 |
+
out += f"\n---\n\n## Terminology Issues\n\n{fb['terminology_issues']}\n"
|
| 365 |
|
| 366 |
+
out += f"\n---\n\n## Exam Technique\n\n{fb.get('exam_technique_feedback', '')}\n"
|
| 367 |
+
out += f"\n---\n\n## Most Important Next Step\n\n**{fb.get('recommended_focus', '')}**\n"
|
| 368 |
|
| 369 |
if source != "gemini":
|
| 370 |
+
out += f"\n\n_Marked by {source.title()}_"
|
| 371 |
|
| 372 |
+
return out
|
| 373 |
|
| 374 |
+
except Exception:
|
| 375 |
+
return f"Could not parse marking feedback. Raw response:\n\n{resp}"
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
# ---------- 6. Topic Drill ----------
|
| 379 |
+
def generate_drill(subject, topic):
|
| 380 |
+
if not topic:
|
| 381 |
+
return "Select a topic first!"
|
| 382 |
+
|
| 383 |
+
prompt = f"""Generate 10 rapid-fire IGCSE {subject} questions on: "{topic}"
|
| 384 |
+
|
| 385 |
+
Mix of:
|
| 386 |
+
- 3 x simple recall (1 mark each)
|
| 387 |
+
- 4 x application/explanation (2 marks each)
|
| 388 |
+
- 3 x analysis/evaluation (3 marks each)
|
| 389 |
|
| 390 |
+
Return ONLY a valid JSON array, no markdown:
|
| 391 |
+
[
|
| 392 |
+
{{
|
| 393 |
+
"q_num": 1,
|
| 394 |
+
"question": "question text",
|
| 395 |
+
"marks": 1,
|
| 396 |
+
"type": "Recall",
|
| 397 |
+
"answer": "concise model answer",
|
| 398 |
+
"key_point": "the single most important thing to include"
|
| 399 |
+
}}
|
| 400 |
+
]"""
|
| 401 |
+
|
| 402 |
+
resp, source = ask_ai(prompt, temperature=0.4)
|
| 403 |
+
|
| 404 |
+
try:
|
| 405 |
+
clean = resp.replace("```json", "").replace("```", "").strip()
|
| 406 |
+
qs = json.loads(clean)
|
| 407 |
+
|
| 408 |
+
out = f"## 10-Question Drill: {topic}\n\n**Total: 17 marks | {subject}**\n\n---\n\n"
|
| 409 |
+
for q in qs:
|
| 410 |
+
label = f"[{q['marks']} mark{'s' if q['marks'] > 1 else ''}]"
|
| 411 |
+
out += f"**Q{q['q_num']}** {label} _{q['type']}_\n{q['question']}\n\n"
|
| 412 |
+
|
| 413 |
+
out += "---\n\n### Model Answers\n\n"
|
| 414 |
+
for q in qs:
|
| 415 |
+
out += f"**Q{q['q_num']}** ({q['marks']} marks): {q['answer']}\n_Key point: {q['key_point']}_\n\n"
|
| 416 |
+
|
| 417 |
+
if source != "gemini":
|
| 418 |
+
out += f"\n_Generated by {source.title()}_"
|
| 419 |
+
return out
|
| 420 |
|
| 421 |
+
except Exception:
|
| 422 |
+
return resp
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
# ---------- 7. Quiz State ----------
|
| 426 |
quiz_state = {}
|
| 427 |
|
| 428 |
+
|
| 429 |
def start_quiz(subject, topic, difficulty):
|
| 430 |
if not topic:
|
| 431 |
+
return "Select a topic first.", gr.update(visible=False), gr.update(visible=False), "", "1 / 5", ""
|
| 432 |
|
| 433 |
quiz_state.clear()
|
| 434 |
+
quiz_state.update({
|
| 435 |
+
"subject": subject, "topic": topic, "difficulty": difficulty,
|
| 436 |
+
"questions": [], "schemes": [], "model_answers": [], "answers": [],
|
| 437 |
+
"current": 0, "total": 5,
|
| 438 |
+
"q_types": [QUESTION_TYPES[subject][i % len(QUESTION_TYPES[subject])] for i in range(5)]
|
| 439 |
+
})
|
| 440 |
+
|
| 441 |
+
q, scheme, model_ans, _ = generate_question(
|
| 442 |
+
subject, topic, quiz_state["q_types"][0], difficulty, 4
|
| 443 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
quiz_state["questions"].append(q)
|
| 445 |
quiz_state["schemes"].append(scheme)
|
| 446 |
+
quiz_state["model_answers"].append(model_ans)
|
| 447 |
|
| 448 |
return (
|
| 449 |
+
f"**Quiz started! Question 1 of 5** - {topic} | {difficulty}",
|
| 450 |
gr.update(visible=True),
|
| 451 |
gr.update(visible=False),
|
| 452 |
+
q, "1 / 5", ""
|
|
|
|
|
|
|
| 453 |
)
|
| 454 |
|
| 455 |
+
|
| 456 |
def quiz_next(student_answer):
|
| 457 |
idx = quiz_state.get("current", 0)
|
| 458 |
if not student_answer.strip():
|
| 459 |
+
return quiz_state["questions"][idx], quiz_state["schemes"][idx], f"{idx + 1} / 5", ""
|
| 460 |
|
| 461 |
quiz_state["answers"].append(student_answer)
|
| 462 |
quiz_state["current"] = idx + 1
|
| 463 |
|
| 464 |
if quiz_state["current"] >= quiz_state["total"]:
|
| 465 |
+
return (
|
| 466 |
+
quiz_state["questions"][idx], quiz_state["schemes"][idx],
|
| 467 |
+
f"{quiz_state['total']} / {quiz_state['total']}", "done"
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
q, scheme, model_ans, _ = generate_question(
|
| 471 |
+
quiz_state["subject"], quiz_state["topic"],
|
| 472 |
+
quiz_state["q_types"][quiz_state["current"]], quiz_state["difficulty"], 4
|
| 473 |
+
)
|
|
|
|
| 474 |
quiz_state["questions"].append(q)
|
| 475 |
quiz_state["schemes"].append(scheme)
|
| 476 |
quiz_state["model_answers"].append(model_ans)
|
|
|
|
| 480 |
|
| 481 |
def get_quiz_results():
|
| 482 |
if not quiz_state or not quiz_state.get("answers"):
|
| 483 |
+
return "No quiz in progress."
|
| 484 |
|
| 485 |
subject = quiz_state["subject"]
|
| 486 |
+
result_text = "# Mock Results\n\n"
|
| 487 |
+
for i, (q, scheme, ans, model) in enumerate(zip(
|
| 488 |
+
quiz_state["questions"], quiz_state["schemes"],
|
| 489 |
+
quiz_state["answers"], quiz_state["model_answers"]
|
|
|
|
|
|
|
| 490 |
), 1):
|
| 491 |
+
feedback = mark_answer(q, scheme, ans, subject, model)
|
| 492 |
result_text += f"## Question {i}\n\n{q}\n\n**Your Answer:** {ans}\n\n{feedback}\n\n---\n\n"
|
|
|
|
| 493 |
return result_text
|
| 494 |
|
| 495 |
|
| 496 |
+
# ---------- 8. UI Helpers ----------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 497 |
def update_topics(subject):
|
| 498 |
+
topics = {"Chemistry": CHEMISTRY_TOPICS, "Biology": BIOLOGY_TOPICS}
|
| 499 |
return gr.Dropdown(choices=topics[subject], value=None)
|
| 500 |
|
| 501 |
+
|
| 502 |
def update_q_types(subject):
|
| 503 |
return gr.Dropdown(choices=QUESTION_TYPES[subject], value=QUESTION_TYPES[subject][0])
|
| 504 |
|
| 505 |
|
| 506 |
+
# ---------- 9. CSS ----------
|
| 507 |
CUSTOM_CSS = """
|
| 508 |
+
@import url('https://fonts.googleapis.com/css2?family=Syne:wght@400;600;700;800&family=DM+Mono:wght@400;500&family=DM+Sans:ital,opsz,wght@0,9..40,300;0,9..40,400;0,9..40,500;1,9..40,300&display=swap');
|
| 509 |
|
| 510 |
:root {
|
| 511 |
--bg: #0a0e1a;
|
|
|
|
| 514 |
--border: #1e3a5f;
|
| 515 |
--accent: #00d4aa;
|
| 516 |
--accent2: #3b82f6;
|
|
|
|
|
|
|
| 517 |
--bio: #8b5cf6;
|
| 518 |
--text: #e2e8f0;
|
| 519 |
--muted: #64748b;
|
|
|
|
| 520 |
}
|
| 521 |
|
|
|
|
|
|
|
| 522 |
body, .gradio-container {
|
| 523 |
background: var(--bg) !important;
|
|
|
|
| 524 |
color: var(--text) !important;
|
| 525 |
}
|
| 526 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 527 |
.tab-nav button {
|
| 528 |
font-family: 'Syne', sans-serif !important;
|
| 529 |
font-weight: 600 !important;
|
| 530 |
+
font-size: 0.82rem !important;
|
| 531 |
+
letter-spacing: 0.06em !important;
|
| 532 |
text-transform: uppercase !important;
|
| 533 |
color: var(--muted) !important;
|
| 534 |
background: transparent !important;
|
| 535 |
border: none !important;
|
| 536 |
border-bottom: 2px solid transparent !important;
|
| 537 |
+
padding: 10px 18px !important;
|
| 538 |
transition: all 0.2s !important;
|
| 539 |
}
|
|
|
|
| 540 |
.tab-nav button.selected {
|
| 541 |
color: var(--accent) !important;
|
| 542 |
border-bottom-color: var(--accent) !important;
|
| 543 |
}
|
| 544 |
|
| 545 |
+
input, textarea, select {
|
|
|
|
| 546 |
background: var(--surface) !important;
|
| 547 |
border: 1px solid var(--border) !important;
|
| 548 |
color: var(--text) !important;
|
| 549 |
border-radius: 8px !important;
|
|
|
|
| 550 |
}
|
|
|
|
| 551 |
textarea:focus, input:focus {
|
| 552 |
border-color: var(--accent) !important;
|
| 553 |
+
box-shadow: 0 0 0 3px rgba(0,212,170,0.1) !important;
|
| 554 |
outline: none !important;
|
|
|
|
| 555 |
}
|
| 556 |
|
|
|
|
| 557 |
.gr-button {
|
| 558 |
font-family: 'Syne', sans-serif !important;
|
| 559 |
font-weight: 600 !important;
|
| 560 |
+
letter-spacing: 0.04em !important;
|
| 561 |
border-radius: 8px !important;
|
| 562 |
transition: all 0.2s !important;
|
| 563 |
}
|
|
|
|
| 564 |
.gr-button-primary {
|
| 565 |
background: linear-gradient(135deg, var(--accent), var(--accent2)) !important;
|
| 566 |
border: none !important;
|
| 567 |
color: #fff !important;
|
| 568 |
}
|
|
|
|
| 569 |
.gr-button-primary:hover {
|
| 570 |
transform: translateY(-1px) !important;
|
| 571 |
+
box-shadow: 0 4px 20px rgba(0,212,170,0.3) !important;
|
| 572 |
}
|
|
|
|
| 573 |
.gr-button-secondary {
|
| 574 |
background: var(--surface2) !important;
|
| 575 |
border: 1px solid var(--border) !important;
|
| 576 |
color: var(--text) !important;
|
| 577 |
}
|
| 578 |
|
|
|
|
| 579 |
.gr-markdown {
|
| 580 |
background: var(--surface) !important;
|
| 581 |
border: 1px solid var(--border) !important;
|
| 582 |
border-radius: 10px !important;
|
| 583 |
padding: 20px !important;
|
|
|
|
| 584 |
line-height: 1.7 !important;
|
| 585 |
}
|
|
|
|
| 586 |
.gr-markdown code {
|
| 587 |
font-family: 'DM Mono', monospace !important;
|
| 588 |
+
background: rgba(0,212,170,0.12) !important;
|
| 589 |
color: var(--accent) !important;
|
| 590 |
padding: 2px 6px !important;
|
| 591 |
border-radius: 4px !important;
|
| 592 |
}
|
|
|
|
| 593 |
.gr-markdown blockquote {
|
| 594 |
border-left: 3px solid var(--accent2) !important;
|
| 595 |
padding-left: 12px !important;
|
| 596 |
color: var(--muted) !important;
|
| 597 |
font-style: italic !important;
|
| 598 |
}
|
|
|
|
| 599 |
.gr-markdown h2 {
|
| 600 |
font-family: 'Syne', sans-serif !important;
|
| 601 |
font-weight: 700 !important;
|
| 602 |
color: var(--accent) !important;
|
| 603 |
+
font-size: 1.05rem !important;
|
| 604 |
+
margin-top: 1.4rem !important;
|
|
|
|
| 605 |
}
|
|
|
|
| 606 |
.gr-markdown h3 {
|
| 607 |
font-family: 'Syne', sans-serif !important;
|
| 608 |
font-weight: 600 !important;
|
| 609 |
color: var(--accent2) !important;
|
| 610 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 611 |
"""
|
| 612 |
|
| 613 |
|
| 614 |
+
# ---------- 10. Gradio UI ----------
|
| 615 |
with gr.Blocks(
|
| 616 |
theme=gr.themes.Base(
|
| 617 |
primary_hue="teal",
|
|
|
|
| 620 |
font=gr.themes.GoogleFont("DM Sans"),
|
| 621 |
),
|
| 622 |
css=CUSTOM_CSS,
|
| 623 |
+
title="IGCSE Science - Question & Marking Platform"
|
| 624 |
) as app:
|
| 625 |
|
| 626 |
gr.Markdown("""
|
| 627 |
+
# IGCSE Science - Question Practice & Deep Marking
|
| 628 |
### Chemistry · Biology · AI-Powered Diagnostic Feedback
|
| 629 |
""")
|
| 630 |
|
| 631 |
with gr.Tabs():
|
| 632 |
|
| 633 |
+
# TAB 1: PRACTICE QUESTIONS
|
| 634 |
+
with gr.Tab("Practice Questions"):
|
| 635 |
+
gr.Markdown("### Generate an exam-style question, write your answer, get full mark-by-mark feedback")
|
|
|
|
|
|
|
| 636 |
|
| 637 |
with gr.Row():
|
| 638 |
with gr.Column(scale=1):
|
| 639 |
pg_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 640 |
+
pg_topic = gr.Dropdown(CHEMISTRY_TOPICS, label="Topic", allow_custom_value=True)
|
| 641 |
+
pg_qtype = gr.Dropdown(
|
| 642 |
+
QUESTION_TYPES["Chemistry"], label="Question Type",
|
| 643 |
+
value=QUESTION_TYPES["Chemistry"][0]
|
| 644 |
+
)
|
| 645 |
+
pg_diff = gr.Radio(["Foundation", "Core", "Extended"], label="Difficulty", value="Core")
|
| 646 |
+
pg_marks = gr.Slider(1, 9, value=5, step=1, label="Marks")
|
| 647 |
+
gen_btn = gr.Button("Generate Question", variant="primary", size="lg")
|
| 648 |
|
| 649 |
with gr.Column(scale=2):
|
| 650 |
+
pg_question = gr.Markdown(value="_Your question will appear here after clicking Generate_")
|
|
|
|
| 651 |
|
| 652 |
+
pg_subject.change(update_topics, pg_subject, pg_topic)
|
| 653 |
pg_subject.change(update_q_types, pg_subject, pg_qtype)
|
| 654 |
|
| 655 |
+
gr.Markdown("---\n### Write Your Answer")
|
|
|
|
| 656 |
pg_answer = gr.Textbox(
|
| 657 |
lines=10,
|
| 658 |
label="Your Answer",
|
| 659 |
+
placeholder=(
|
| 660 |
+
"Write your full answer here.\n\n"
|
| 661 |
+
"For explanations: give cause AND effect\n"
|
| 662 |
+
"For calculations: show all working with units\n"
|
| 663 |
+
"For descriptions: be specific, not vague\n"
|
| 664 |
+
"Use correct scientific terminology"
|
| 665 |
+
)
|
| 666 |
)
|
| 667 |
|
| 668 |
with gr.Row():
|
| 669 |
+
show_model_cb = gr.Checkbox(label="Include model answer in feedback", value=True)
|
| 670 |
+
mark_btn = gr.Button("Submit for Marking", variant="primary", size="lg")
|
| 671 |
|
| 672 |
+
pg_feedback = gr.Markdown(value="_Submit your answer to receive detailed feedback_")
|
| 673 |
|
| 674 |
+
pg_scheme = gr.State("")
|
| 675 |
+
pg_model_store = gr.State("")
|
|
|
|
|
|
|
| 676 |
|
| 677 |
def on_generate(subject, topic, q_type, difficulty, marks):
|
| 678 |
+
q, scheme, model_ans, _ = generate_question(subject, topic, q_type, difficulty, marks)
|
| 679 |
+
return q, scheme, model_ans, "_Submit your answer to receive detailed feedback_"
|
| 680 |
|
| 681 |
+
gen_btn.click(
|
| 682 |
on_generate,
|
| 683 |
+
[pg_subject, pg_topic, pg_qtype, pg_diff, pg_marks],
|
| 684 |
+
[pg_question, pg_scheme, pg_model_store, pg_feedback]
|
| 685 |
)
|
| 686 |
|
| 687 |
+
def on_mark(question, scheme, model_ans, student_ans, subject, use_model):
|
| 688 |
+
return mark_answer(question, scheme, student_ans, subject, model_ans if use_model else "")
|
|
|
|
| 689 |
|
| 690 |
mark_btn.click(
|
| 691 |
on_mark,
|
| 692 |
+
[pg_question, pg_scheme, pg_model_store, pg_answer, pg_subject, show_model_cb],
|
| 693 |
pg_feedback
|
| 694 |
)
|
| 695 |
|
| 696 |
+
with gr.Accordion("View Mark Scheme (spoiler!)", open=False):
|
| 697 |
+
pg_scheme_view = gr.Markdown(value="_Generate a question first_")
|
|
|
|
| 698 |
|
| 699 |
+
gen_btn.click(
|
| 700 |
+
lambda s: s if s else "_Generate a question first_",
|
| 701 |
+
pg_scheme, pg_scheme_view
|
| 702 |
+
)
|
| 703 |
|
| 704 |
|
| 705 |
+
# TAB 2: TOPIC DRILL
|
| 706 |
+
with gr.Tab("Topic Drill"):
|
| 707 |
+
gr.Markdown("### 10 rapid-fire questions on one topic — answers revealed below\n*Great for revision sweeps*")
|
|
|
|
|
|
|
|
|
|
| 708 |
|
| 709 |
with gr.Row():
|
| 710 |
drill_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 711 |
+
drill_topic = gr.Dropdown(CHEMISTRY_TOPICS, label="Topic", allow_custom_value=True)
|
| 712 |
|
| 713 |
drill_subject.change(update_topics, drill_subject, drill_topic)
|
| 714 |
|
| 715 |
+
drill_btn = gr.Button("Generate 10-Question Drill", variant="primary", size="lg")
|
| 716 |
+
drill_output = gr.Markdown(value="_Select a topic and click Generate_")
|
|
|
|
| 717 |
drill_btn.click(generate_drill, [drill_subject, drill_topic], drill_output)
|
| 718 |
|
| 719 |
|
| 720 |
+
# TAB 3: TIMED MOCK
|
| 721 |
+
with gr.Tab("Timed Mock"):
|
| 722 |
+
gr.Markdown("### Answer 5 questions in sequence — full batch marking at the end\n*Simulate real exam conditions*")
|
|
|
|
|
|
|
|
|
|
| 723 |
|
| 724 |
with gr.Row():
|
| 725 |
mock_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 726 |
+
mock_topic = gr.Dropdown(CHEMISTRY_TOPICS, label="Topic", allow_custom_value=True)
|
| 727 |
+
mock_diff = gr.Radio(["Foundation", "Core", "Extended"], label="Difficulty", value="Core")
|
| 728 |
|
| 729 |
mock_subject.change(update_topics, mock_subject, mock_topic)
|
| 730 |
|
| 731 |
+
mock_status = gr.Markdown("_Click Start to begin your mock_")
|
| 732 |
+
mock_start_btn = gr.Button("Start 5-Question Mock", variant="primary", size="lg")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 733 |
|
| 734 |
+
with gr.Column(visible=False) as mock_q_section:
|
| 735 |
+
mock_q_display = gr.Markdown()
|
| 736 |
+
mock_progress = gr.Textbox(label="Progress", value="1 / 5", interactive=False)
|
| 737 |
+
mock_answer_box = gr.Textbox(
|
| 738 |
+
lines=8, label="Your Answer", placeholder="Write your answer here..."
|
| 739 |
+
)
|
| 740 |
+
mock_next_btn = gr.Button("Next Question", variant="primary")
|
| 741 |
+
mock_finish_btn = gr.Button("Finish and Get Full Results", variant="secondary", visible=False)
|
| 742 |
|
| 743 |
+
mock_results = gr.Markdown(value="")
|
|
|
|
| 744 |
|
| 745 |
def on_start_mock(subject, topic, difficulty):
|
| 746 |
if not topic:
|
| 747 |
+
return "Select a topic first.", gr.update(visible=False), gr.update(visible=False), "", "1 / 5", ""
|
| 748 |
+
status, _, __, q, prog, ___ = start_quiz(subject, topic, difficulty)
|
|
|
|
|
|
|
|
|
|
| 749 |
return status, gr.update(visible=True), gr.update(visible=False), q, prog, ""
|
| 750 |
|
| 751 |
mock_start_btn.click(
|
| 752 |
on_start_mock,
|
| 753 |
[mock_subject, mock_topic, mock_diff],
|
| 754 |
+
[mock_status, mock_q_section, mock_finish_btn, mock_q_display, mock_progress, mock_results]
|
| 755 |
)
|
| 756 |
|
| 757 |
def on_mock_next(answer):
|
| 758 |
q, scheme, prog, done = quiz_next(answer)
|
| 759 |
+
is_done = done == "done"
|
| 760 |
+
return q, prog, gr.update(visible=not is_done), gr.update(visible=is_done), ""
|
|
|
|
|
|
|
|
|
|
| 761 |
|
| 762 |
mock_next_btn.click(
|
| 763 |
on_mock_next,
|
| 764 |
+
[mock_answer_box],
|
| 765 |
+
[mock_q_display, mock_progress, mock_next_btn, mock_finish_btn, mock_answer_box]
|
| 766 |
)
|
|
|
|
| 767 |
mock_finish_btn.click(get_quiz_results, [], mock_results)
|
| 768 |
|
| 769 |
|
|
|
|
| 770 |
# TAB 4: TOPIC EXPLORER
|
| 771 |
+
with gr.Tab("Topic Explorer"):
|
| 772 |
+
gr.Markdown("### Browse all examinable topics or type any topic for a quick question")
|
|
|
|
|
|
|
| 773 |
|
| 774 |
with gr.Row():
|
| 775 |
+
gr.Markdown(
|
| 776 |
+
"#### Chemistry Topics\n\n" +
|
| 777 |
+
"\n".join([f"- {t}" for t in CHEMISTRY_TOPICS])
|
| 778 |
+
)
|
| 779 |
+
gr.Markdown(
|
| 780 |
+
"#### Biology Topics\n\n" +
|
| 781 |
+
"\n".join([f"- {t}" for t in BIOLOGY_TOPICS])
|
| 782 |
+
)
|
| 783 |
|
| 784 |
+
gr.Markdown("---\n#### Quick Question")
|
|
|
|
| 785 |
with gr.Row():
|
| 786 |
+
quick_topic = gr.Textbox(label="Topic", placeholder="e.g. Osmosis, Electrolysis, Enzymes...")
|
| 787 |
quick_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry")
|
| 788 |
+
quick_diff = gr.Radio(["Foundation", "Core", "Extended"], label="Difficulty", value="Core")
|
| 789 |
|
| 790 |
+
quick_btn = gr.Button("Generate Quick Question", variant="primary")
|
| 791 |
+
quick_output = gr.Markdown(value="_Enter a topic above and click Generate_")
|
| 792 |
quick_scheme = gr.State("")
|
| 793 |
+
quick_model = gr.State("")
|
| 794 |
|
| 795 |
+
def quick_q(topic, subject, diff):
|
| 796 |
+
q_type = random.choice(QUESTION_TYPES[subject])
|
| 797 |
+
q, scheme, model, _ = generate_question(subject, topic, q_type, diff, 5)
|
|
|
|
| 798 |
return q, scheme, model
|
| 799 |
|
| 800 |
+
quick_btn.click(
|
| 801 |
+
quick_q,
|
| 802 |
+
[quick_topic, quick_subject, quick_diff],
|
| 803 |
+
[quick_output, quick_scheme, quick_model]
|
| 804 |
+
)
|
| 805 |
|
|
|
|
| 806 |
gr.Markdown("""
|
| 807 |
---
|
| 808 |
+
**AI System:** Gemini 2.5 Pro (Primary) → Cohere Command-R+ (Fallback)
|
| 809 |
|
| 810 |
+
Add `GEMINI_API_KEY` and/or `COHERE_API_KEY` in Space Secrets to activate.
|
| 811 |
""")
|
| 812 |
|
| 813 |
|