Spaces:
Sleeping
Sleeping
| # -------------------------------------------------------------- | |
| # IGCSE Science Platform β Question Practice & Deep Marking | |
| # AI: Gemini 2.5 Pro (Primary) β Cohere Command-R+ (Fallback) | |
| # Hugging Face Spaces compatible | |
| # -------------------------------------------------------------- | |
| import os | |
| import json | |
| import time | |
| import random | |
| import gradio as gr | |
| # ---------- 1. LAZY AI Initialization ---------- | |
| # Deferred to first use β prevents OOM crash on HF Spaces startup | |
| _gemini_model = None | |
| _cohere_client = None | |
| _ai_initialized = False | |
| def _init_ai(): | |
| global _gemini_model, _cohere_client, _ai_initialized | |
| if _ai_initialized: | |
| return | |
| try: | |
| import google.generativeai as genai | |
| key = os.getenv("GEMINI_API_KEY", "") | |
| if key: | |
| genai.configure(api_key=key) | |
| _gemini_model = genai.GenerativeModel("gemini-2.5-pro") | |
| print("β Gemini 2.5 Pro ready (PRIMARY)") | |
| else: | |
| print("β GEMINI_API_KEY not set β skipping Gemini") | |
| except Exception as e: | |
| print(f"β Gemini init failed: {e}") | |
| try: | |
| import cohere | |
| key = os.getenv("COHERE_API_KEY", "") | |
| if key: | |
| _cohere_client = cohere.Client(key) | |
| print("β Cohere Command-R+ ready (FALLBACK)") | |
| else: | |
| print("β COHERE_API_KEY not set β skipping Cohere") | |
| except Exception as e: | |
| print(f"β Cohere init failed: {e}") | |
| _ai_initialized = True | |
| # ---------- 2. Unified AI Call ---------- | |
| def ask_ai(prompt, temperature=0.7, max_retries=2): | |
| _init_ai() | |
| last_error = None | |
| # Primary: Gemini 2.5 Pro | |
| if _gemini_model: | |
| for attempt in range(max_retries): | |
| try: | |
| import google.generativeai as genai | |
| resp = _gemini_model.generate_content( | |
| prompt, | |
| generation_config=genai.types.GenerationConfig(temperature=temperature) | |
| ) | |
| return resp.text, "gemini" | |
| except Exception as e: | |
| last_error = e | |
| print(f"β Gemini attempt {attempt + 1} failed: {e}") | |
| if attempt < max_retries - 1: | |
| time.sleep(1) | |
| # Fallback: Cohere Command-R+ | |
| if _cohere_client: | |
| for attempt in range(max_retries): | |
| try: | |
| resp = _cohere_client.chat( | |
| model="command-r-plus-08-2024", | |
| message=prompt, | |
| temperature=temperature | |
| ) | |
| return resp.text, "cohere" | |
| except Exception as e: | |
| last_error = e | |
| print(f"β Cohere attempt {attempt + 1} failed: {e}") | |
| if attempt < max_retries - 1: | |
| time.sleep(1) | |
| return ( | |
| "β No AI service available. Add GEMINI_API_KEY or COHERE_API_KEY in Space Secrets.", | |
| "error" | |
| ) | |
| # ---------- 3. Topic Lists ---------- | |
| CHEMISTRY_TOPICS = [ | |
| "States of Matter", "Atoms, Elements & Compounds", "Mixtures & Separation Techniques", | |
| "Atomic Structure", "Electronic Configuration", "Periodic Table", | |
| "Chemical Bonding: Ionic", "Chemical Bonding: Covalent", "Chemical Bonding: Metallic", | |
| "Structure & Properties of Materials", "Nanoparticles", | |
| "Group 1: Alkali Metals", "Group 7: Halogens", "Group 0: Noble Gases", | |
| "Transition Metals", "Reactivity Series", "Extraction of Metals", | |
| "Corrosion & Rusting", "Alloys", | |
| "Chemical Reactions", "Exothermic & Endothermic Reactions", "Energy Changes", | |
| "Rates of Reaction", "Catalysts", "Reversible Reactions", "Equilibrium", | |
| "Redox Reactions", "Electrolysis", "Electrochemistry", | |
| "Acids & Alkalis", "pH Scale", "Neutralization", "Making Salts", | |
| "Titrations", "Strong & Weak Acids", | |
| "Hydrocarbons: Alkanes", "Hydrocarbons: Alkenes", "Crude Oil & Fractional Distillation", | |
| "Polymers", "Alcohols", "Carboxylic Acids", "Organic Synthesis", | |
| "Air Composition", "Air Pollution", "Greenhouse Effect & Climate Change", | |
| "Water Treatment", "Sustainable Chemistry", | |
| "Relative Formula Mass", "Moles & Molar Mass", "Empirical & Molecular Formulae", | |
| "Reacting Masses", "Limiting Reactants", "Percentage Yield", | |
| "Gas Volumes", "Concentration Calculations", | |
| "Laboratory Safety", "Experimental Techniques", "Analysis & Evaluation" | |
| ] | |
| BIOLOGY_TOPICS = [ | |
| "Cell Structure & Function", "Specialised Cells", "Microscopy", | |
| "Cell Division: Mitosis", "Cell Division: Meiosis", "Stem Cells", | |
| "Diffusion", "Osmosis", "Active Transport", | |
| "Organisation of Organisms", "Enzymes", "Digestive System", | |
| "Circulatory System: Heart", "Circulatory System: Blood Vessels", "Blood Components", | |
| "Respiratory System", "Gas Exchange", "Breathing Mechanism", | |
| "Communicable Diseases", "Pathogens: Bacteria & Viruses", "Disease Prevention", | |
| "Immune System", "Vaccination", "Antibiotics & Painkillers", | |
| "Developing New Medicines", "Monoclonal Antibodies", | |
| "Photosynthesis", "Factors Affecting Photosynthesis", "Uses of Glucose", | |
| "Respiration: Aerobic", "Respiration: Anaerobic", "Metabolism", | |
| "Homeostasis Principles", "Nervous System", "Reflex Actions", "Brain Structure", | |
| "Eye Structure & Function", "Body Temperature Control", | |
| "Endocrine System", "Hormones", "Blood Glucose Regulation", | |
| "Diabetes", "Water & Nitrogen Balance", "Kidneys & Dialysis", | |
| "DNA Structure", "Protein Synthesis", "Genetic Inheritance", | |
| "Inherited Disorders", "Sex Determination", "Genetic Diagrams", | |
| "Variation", "Evolution", "Natural Selection", "Selective Breeding", | |
| "Genetic Engineering", "Cloning", "Classification", | |
| "Ecosystems", "Food Chains & Webs", "Energy Transfer", | |
| "Nutrient Cycles: Carbon", "Nutrient Cycles: Water", "Nutrient Cycles: Nitrogen", | |
| "Biodiversity", "Habitat Loss", "Conservation", | |
| "Population Dynamics", "Competition", "Adaptations", | |
| "Waste Management", "Pollution", "Global Warming Impact", | |
| "Deforestation", "Sustainable Development", | |
| "Scientific Method", "Variables & Controls", "Data Analysis", | |
| "Biological Techniques", "Field Studies" | |
| ] | |
| QUESTION_TYPES = { | |
| "Chemistry": [ | |
| "Multiple Choice", "Short Answer - Describe", "Short Answer - Explain", | |
| "Calculation - Moles / Mass", "Calculation - Concentration", | |
| "Calculation - Percentage Yield", "Extended Response", | |
| "Data Interpretation", "Practical / Experiment", "Equation Writing", | |
| "Evaluation / Suggest Improvements" | |
| ], | |
| "Biology": [ | |
| "Multiple Choice", "Short Answer - Describe", "Short Answer - Explain", | |
| "Extended Response", "Data Interpretation", "Graph / Table Analysis", | |
| "Practical / Experiment", "Evaluation / Suggest Improvements", | |
| "Applying Knowledge to Novel Scenarios", "Genetic Diagrams", | |
| "Label / Describe a Diagram" | |
| ] | |
| } | |
| # ---------- 4. Question Generation ---------- | |
| def generate_question(subject, topic, q_type, difficulty, num_marks): | |
| if not topic: | |
| return "Please select a topic first.", "", "", "" | |
| difficulty_desc = { | |
| "Foundation": "Test basic recall and simple application. Clear, direct language.", | |
| "Core": "Test understanding and application. Require structured explanations.", | |
| "Extended": "Test analysis, evaluation, and synthesis. Multi-step reasoning, novel contexts." | |
| } | |
| prompt = f"""You are an expert IGCSE {subject} examiner for Cambridge International Education. | |
| Create ONE authentic IGCSE {subject} exam question with these specifications: | |
| - Topic: {topic} | |
| - Question Type: {q_type} | |
| - Difficulty: {difficulty} - {difficulty_desc[difficulty]} | |
| - Total Marks: {num_marks} | |
| REQUIREMENTS: | |
| - Use authentic IGCSE command words (describe, explain, suggest, calculate, evaluate, state, outline, compare, deduce, predict) | |
| - Test genuine UNDERSTANDING, not just memorisation | |
| - Include realistic context, data, or scenarios where appropriate | |
| - For calculations: provide all necessary data (molar masses, formulae, values) | |
| - For data/graph questions: describe a realistic dataset in text form | |
| - Every mark should test something specific | |
| Return ONLY a valid JSON object, no markdown, no code fences: | |
| {{ | |
| "question_text": "Full question with context. Use \\n for new lines.", | |
| "marks": {num_marks}, | |
| "command_word": "primary command word used", | |
| "question_type": "{q_type}", | |
| "topic": "{topic}", | |
| "difficulty": "{difficulty}", | |
| "mark_scheme": [ | |
| "Point 1 (1 mark): exact wording of acceptable answer", | |
| "Point 2 (1 mark): ..." | |
| ], | |
| "model_answer": "A complete full-mark answer written as a student would write it", | |
| "examiner_notes": "What the examiner looks for; common pitfalls; what separates strong from weak answers", | |
| "key_concepts_tested": ["concept1", "concept2"] | |
| }}""" | |
| resp, source = ask_ai(prompt, temperature=0.4) | |
| try: | |
| clean = resp.replace("```json", "").replace("```", "").strip() | |
| data = json.loads(clean) | |
| q_display = ( | |
| f"**[{data['marks']} marks] | {data['question_type']} | " | |
| f"{data['difficulty']} | Command word: {data['command_word'].upper()}**\n\n---\n\n" | |
| f"{data['question_text']}" | |
| ) | |
| mark_scheme_text = "\n".join([f"- {pt}" for pt in data.get("mark_scheme", [])]) | |
| key_concepts = ", ".join(data.get("key_concepts_tested", [])) | |
| scheme_display = ( | |
| f"**Mark Scheme - {data['marks']} marks**\n\n{mark_scheme_text}\n\n---\n\n" | |
| f"**Examiner Notes:**\n{data.get('examiner_notes', '')}\n\n" | |
| f"**Key Concepts Tested:** {key_concepts}" | |
| ) | |
| model_ans = data.get("model_answer", "") | |
| source_tag = f"\n\n_Generated by {source.title()}_" if source != "gemini" else "" | |
| return q_display, scheme_display, model_ans, source_tag | |
| except Exception: | |
| return f"Could not parse question. Raw response:\n\n{resp}", "", "", "" | |
| # ---------- 5. Deep Marking ---------- | |
| def mark_answer(question_display, scheme_display, student_answer, subject, model_ans): | |
| if not student_answer.strip(): | |
| return "Please write your answer before submitting for marking." | |
| if not question_display or "select a topic" in question_display.lower(): | |
| return "Please generate a question first." | |
| model_section = f"\n\nModel answer for reference:\n{model_ans}" if model_ans else "" | |
| prompt = f"""You are a highly experienced IGCSE {subject} examiner providing DETAILED FORMATIVE FEEDBACK. | |
| QUESTION AND MARK SCHEME: | |
| {question_display} | |
| {scheme_display} | |
| {model_section} | |
| STUDENT'S ANSWER: | |
| {student_answer} | |
| --- | |
| Mark this answer with MAXIMUM DIAGNOSTIC DETAIL. Go sentence by sentence and: | |
| 1. Award marks explicitly against the mark scheme | |
| 2. Identify EVERY error - factual, conceptual, terminological, or structural | |
| 3. Explain WHY each error is wrong scientifically | |
| 4. Identify gaps where expected points are missing | |
| 5. Recognise genuine understanding and credit it | |
| 6. Give specific, actionable improvement advice | |
| Return ONLY a valid JSON object, no markdown, no code fences: | |
| {{ | |
| "marks_awarded": 0, | |
| "marks_total": 5, | |
| "percentage": 0, | |
| "grade_band": "Developing", | |
| "overall_verdict": "1-2 sentence summary of performance", | |
| "mark_by_mark_breakdown": [ | |
| {{ | |
| "mark_point": "what the mark scheme required", | |
| "awarded": true, | |
| "student_wrote": "what the student actually wrote", | |
| "verdict": "Correct / Partially correct / Incorrect / Missing", | |
| "explanation": "Why this earned/lost the mark" | |
| }} | |
| ], | |
| "errors_in_detail": [ | |
| {{ | |
| "error": "Quote or paraphrase of the student error", | |
| "error_type": "Factual error / Misconception / Vague language / Missing detail / Wrong terminology / Incomplete explanation", | |
| "why_wrong": "Scientific explanation of why this is wrong", | |
| "correct_version": "How it should have been written", | |
| "how_to_fix": "Specific advice to avoid this in future" | |
| }} | |
| ], | |
| "missing_points": ["Key point the student should have included"], | |
| "strengths": ["Specific things the student did well"], | |
| "improvement_plan": ["Action item 1", "Action item 2"], | |
| "conceptual_gaps": "Any underlying conceptual misunderstanding revealed", | |
| "terminology_issues": "Scientific terminology used incorrectly or imprecisely", | |
| "exam_technique_feedback": "Advice on structure, command word response, scientific language, answer length", | |
| "recommended_focus": "The single most important thing to study next" | |
| }}""" | |
| resp, source = ask_ai(prompt, temperature=0.2) | |
| try: | |
| clean = resp.replace("```json", "").replace("```", "").strip() | |
| fb = json.loads(clean) | |
| marks_awarded = fb.get("marks_awarded", 0) | |
| marks_total = fb.get("marks_total", 1) | |
| pct = fb.get("percentage", round(marks_awarded / marks_total * 100)) | |
| band = fb.get("grade_band", "") | |
| filled = int(pct / 10) | |
| bar = "β" * filled + "β" * (10 - filled) | |
| score_color = "π΄" if pct < 40 else "π‘" if pct < 70 else "π’" | |
| out = ( | |
| f"{score_color} **{marks_awarded}/{marks_total} marks ({pct}%) - {band}**\n" | |
| f"`{bar}`\n\n" | |
| f"_{fb.get('overall_verdict', '')}_\n\n---\n\n" | |
| f"## Mark-by-Mark Breakdown\n\n" | |
| ) | |
| for i, mp in enumerate(fb.get("mark_by_mark_breakdown", []), 1): | |
| icon = "β " if mp.get("awarded") else "β" | |
| out += ( | |
| f"**Mark {i}** {icon} _{mp.get('verdict', '')}_\n" | |
| f"- **Required:** {mp.get('mark_point', '')}\n" | |
| f"- **Student wrote:** _{mp.get('student_wrote', '')}_\n" | |
| f"- **Examiner:** {mp.get('explanation', '')}\n\n" | |
| ) | |
| if fb.get("errors_in_detail"): | |
| out += "---\n\n## Errors In Detail\n\n" | |
| for err in fb["errors_in_detail"]: | |
| out += ( | |
| f"**Error type:** `{err.get('error_type', '')}`\n" | |
| f"> _{err.get('error', '')}_\n\n" | |
| f"**Why it's wrong:** {err.get('why_wrong', '')}\n\n" | |
| f"**Correct version:** {err.get('correct_version', '')}\n\n" | |
| f"**How to fix it:** {err.get('how_to_fix', '')}\n\n" | |
| ) | |
| if fb.get("missing_points"): | |
| out += "---\n\n## Missing Points\n\n" | |
| for mp in fb["missing_points"]: | |
| out += f"- {mp}\n" | |
| if fb.get("strengths"): | |
| out += "\n---\n\n## What You Did Well\n\n" | |
| for s in fb["strengths"]: | |
| out += f"- {s}\n" | |
| out += "\n---\n\n## Improvement Plan\n\n" | |
| for i, step in enumerate(fb.get("improvement_plan", []), 1): | |
| out += f"{i}. {step}\n" | |
| if fb.get("conceptual_gaps"): | |
| out += f"\n---\n\n## Conceptual Gap Identified\n\n{fb['conceptual_gaps']}\n" | |
| if fb.get("terminology_issues"): | |
| out += f"\n---\n\n## Terminology Issues\n\n{fb['terminology_issues']}\n" | |
| out += f"\n---\n\n## Exam Technique\n\n{fb.get('exam_technique_feedback', '')}\n" | |
| out += f"\n---\n\n## Most Important Next Step\n\n**{fb.get('recommended_focus', '')}**\n" | |
| if source != "gemini": | |
| out += f"\n\n_Marked by {source.title()}_" | |
| return out | |
| except Exception: | |
| return f"Could not parse marking feedback. Raw response:\n\n{resp}" | |
| # ---------- 6. Topic Drill ---------- | |
| def generate_drill(subject, topic): | |
| if not topic: | |
| return "Select a topic first!" | |
| prompt = f"""Generate 10 rapid-fire IGCSE {subject} questions on: "{topic}" | |
| Mix of: | |
| - 3 x simple recall (1 mark each) | |
| - 4 x application/explanation (2 marks each) | |
| - 3 x analysis/evaluation (3 marks each) | |
| Return ONLY a valid JSON array, no markdown: | |
| [ | |
| {{ | |
| "q_num": 1, | |
| "question": "question text", | |
| "marks": 1, | |
| "type": "Recall", | |
| "answer": "concise model answer", | |
| "key_point": "the single most important thing to include" | |
| }} | |
| ]""" | |
| resp, source = ask_ai(prompt, temperature=0.4) | |
| try: | |
| clean = resp.replace("```json", "").replace("```", "").strip() | |
| qs = json.loads(clean) | |
| out = f"## 10-Question Drill: {topic}\n\n**Total: 17 marks | {subject}**\n\n---\n\n" | |
| for q in qs: | |
| label = f"[{q['marks']} mark{'s' if q['marks'] > 1 else ''}]" | |
| out += f"**Q{q['q_num']}** {label} _{q['type']}_\n{q['question']}\n\n" | |
| out += "---\n\n### Model Answers\n\n" | |
| for q in qs: | |
| out += f"**Q{q['q_num']}** ({q['marks']} marks): {q['answer']}\n_Key point: {q['key_point']}_\n\n" | |
| if source != "gemini": | |
| out += f"\n_Generated by {source.title()}_" | |
| return out | |
| except Exception: | |
| return resp | |
| # ---------- 7. Quiz State ---------- | |
| quiz_state = {} | |
| def start_quiz(subject, topic, difficulty): | |
| if not topic: | |
| return "Select a topic first.", gr.update(visible=False), gr.update(visible=False), "", "1 / 5", "" | |
| quiz_state.clear() | |
| quiz_state.update({ | |
| "subject": subject, "topic": topic, "difficulty": difficulty, | |
| "questions": [], "schemes": [], "model_answers": [], "answers": [], | |
| "current": 0, "total": 5, | |
| "q_types": [QUESTION_TYPES[subject][i % len(QUESTION_TYPES[subject])] for i in range(5)] | |
| }) | |
| q, scheme, model_ans, _ = generate_question( | |
| subject, topic, quiz_state["q_types"][0], difficulty, 4 | |
| ) | |
| quiz_state["questions"].append(q) | |
| quiz_state["schemes"].append(scheme) | |
| quiz_state["model_answers"].append(model_ans) | |
| return ( | |
| f"**Quiz started! Question 1 of 5** - {topic} | {difficulty}", | |
| gr.update(visible=True), | |
| gr.update(visible=False), | |
| q, "1 / 5", "" | |
| ) | |
| def quiz_next(student_answer): | |
| idx = quiz_state.get("current", 0) | |
| if not student_answer.strip(): | |
| return quiz_state["questions"][idx], quiz_state["schemes"][idx], f"{idx + 1} / 5", "" | |
| quiz_state["answers"].append(student_answer) | |
| quiz_state["current"] = idx + 1 | |
| if quiz_state["current"] >= quiz_state["total"]: | |
| return ( | |
| quiz_state["questions"][idx], quiz_state["schemes"][idx], | |
| f"{quiz_state['total']} / {quiz_state['total']}", "done" | |
| ) | |
| q, scheme, model_ans, _ = generate_question( | |
| quiz_state["subject"], quiz_state["topic"], | |
| quiz_state["q_types"][quiz_state["current"]], quiz_state["difficulty"], 4 | |
| ) | |
| quiz_state["questions"].append(q) | |
| quiz_state["schemes"].append(scheme) | |
| quiz_state["model_answers"].append(model_ans) | |
| return q, scheme, f"{quiz_state['current'] + 1} / {quiz_state['total']}", "" | |
| def get_quiz_results(): | |
| if not quiz_state or not quiz_state.get("answers"): | |
| return "No quiz in progress." | |
| subject = quiz_state["subject"] | |
| result_text = "# Mock Results\n\n" | |
| for i, (q, scheme, ans, model) in enumerate(zip( | |
| quiz_state["questions"], quiz_state["schemes"], | |
| quiz_state["answers"], quiz_state["model_answers"] | |
| ), 1): | |
| feedback = mark_answer(q, scheme, ans, subject, model) | |
| result_text += f"## Question {i}\n\n{q}\n\n**Your Answer:** {ans}\n\n{feedback}\n\n---\n\n" | |
| return result_text | |
| # ---------- 8. UI Helpers ---------- | |
| def update_topics(subject): | |
| topics = {"Chemistry": CHEMISTRY_TOPICS, "Biology": BIOLOGY_TOPICS} | |
| return gr.Dropdown(choices=topics[subject], value=None) | |
| def update_q_types(subject): | |
| return gr.Dropdown(choices=QUESTION_TYPES[subject], value=QUESTION_TYPES[subject][0]) | |
| # ---------- 9. CSS ---------- | |
| CUSTOM_CSS = """ | |
| @import url('https://fonts.googleapis.com/css2?family=Syne:wght@400;600;700;800&family=DM+Mono:wght@400;500&family=DM+Sans:ital,opsz,wght@0,9..40,300;0,9..40,400;0,9..40,500;1,9..40,300&display=swap'); | |
| :root { | |
| --bg: #0a0e1a; | |
| --surface: #111827; | |
| --surface2: #1a2235; | |
| --border: #1e3a5f; | |
| --accent: #00d4aa; | |
| --accent2: #3b82f6; | |
| --bio: #8b5cf6; | |
| --text: #e2e8f0; | |
| --muted: #64748b; | |
| } | |
| body, .gradio-container { | |
| background: var(--bg) !important; | |
| color: var(--text) !important; | |
| } | |
| .tab-nav button { | |
| font-family: 'Syne', sans-serif !important; | |
| font-weight: 600 !important; | |
| font-size: 0.82rem !important; | |
| letter-spacing: 0.06em !important; | |
| text-transform: uppercase !important; | |
| color: var(--muted) !important; | |
| background: transparent !important; | |
| border: none !important; | |
| border-bottom: 2px solid transparent !important; | |
| padding: 10px 18px !important; | |
| transition: all 0.2s !important; | |
| } | |
| .tab-nav button.selected { | |
| color: var(--accent) !important; | |
| border-bottom-color: var(--accent) !important; | |
| } | |
| input, textarea, select { | |
| background: var(--surface) !important; | |
| border: 1px solid var(--border) !important; | |
| color: var(--text) !important; | |
| border-radius: 8px !important; | |
| } | |
| textarea:focus, input:focus { | |
| border-color: var(--accent) !important; | |
| box-shadow: 0 0 0 3px rgba(0,212,170,0.1) !important; | |
| outline: none !important; | |
| } | |
| .gr-button { | |
| font-family: 'Syne', sans-serif !important; | |
| font-weight: 600 !important; | |
| letter-spacing: 0.04em !important; | |
| border-radius: 8px !important; | |
| transition: all 0.2s !important; | |
| } | |
| .gr-button-primary { | |
| background: linear-gradient(135deg, var(--accent), var(--accent2)) !important; | |
| border: none !important; | |
| color: #fff !important; | |
| } | |
| .gr-button-primary:hover { | |
| transform: translateY(-1px) !important; | |
| box-shadow: 0 4px 20px rgba(0,212,170,0.3) !important; | |
| } | |
| .gr-button-secondary { | |
| background: var(--surface2) !important; | |
| border: 1px solid var(--border) !important; | |
| color: var(--text) !important; | |
| } | |
| .gr-markdown { | |
| background: var(--surface) !important; | |
| border: 1px solid var(--border) !important; | |
| border-radius: 10px !important; | |
| padding: 20px !important; | |
| line-height: 1.7 !important; | |
| } | |
| .gr-markdown code { | |
| font-family: 'DM Mono', monospace !important; | |
| background: rgba(0,212,170,0.12) !important; | |
| color: var(--accent) !important; | |
| padding: 2px 6px !important; | |
| border-radius: 4px !important; | |
| } | |
| .gr-markdown blockquote { | |
| border-left: 3px solid var(--accent2) !important; | |
| padding-left: 12px !important; | |
| color: var(--muted) !important; | |
| font-style: italic !important; | |
| } | |
| .gr-markdown h2 { | |
| font-family: 'Syne', sans-serif !important; | |
| font-weight: 700 !important; | |
| color: var(--accent) !important; | |
| font-size: 1.05rem !important; | |
| margin-top: 1.4rem !important; | |
| } | |
| .gr-markdown h3 { | |
| font-family: 'Syne', sans-serif !important; | |
| font-weight: 600 !important; | |
| color: var(--accent2) !important; | |
| } | |
| """ | |
| # ---------- 10. Gradio UI ---------- | |
| with gr.Blocks( | |
| theme=gr.themes.Base( | |
| primary_hue="teal", | |
| secondary_hue="blue", | |
| neutral_hue="slate", | |
| font=gr.themes.GoogleFont("DM Sans"), | |
| ), | |
| css=CUSTOM_CSS, | |
| title="IGCSE Science - Question & Marking Platform" | |
| ) as app: | |
| gr.Markdown(""" | |
| # IGCSE Science - Question Practice & Deep Marking | |
| ### Chemistry Β· Biology Β· AI-Powered Diagnostic Feedback | |
| """) | |
| with gr.Tabs(): | |
| # TAB 1: PRACTICE QUESTIONS | |
| with gr.Tab("Practice Questions"): | |
| gr.Markdown("### Generate an exam-style question, write your answer, get full mark-by-mark feedback") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| pg_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry") | |
| pg_topic = gr.Dropdown(CHEMISTRY_TOPICS, label="Topic", allow_custom_value=True) | |
| pg_qtype = gr.Dropdown( | |
| QUESTION_TYPES["Chemistry"], label="Question Type", | |
| value=QUESTION_TYPES["Chemistry"][0] | |
| ) | |
| pg_diff = gr.Radio(["Foundation", "Core", "Extended"], label="Difficulty", value="Core") | |
| pg_marks = gr.Slider(1, 9, value=5, step=1, label="Marks") | |
| gen_btn = gr.Button("Generate Question", variant="primary", size="lg") | |
| with gr.Column(scale=2): | |
| pg_question = gr.Markdown(value="_Your question will appear here after clicking Generate_") | |
| pg_subject.change(update_topics, pg_subject, pg_topic) | |
| pg_subject.change(update_q_types, pg_subject, pg_qtype) | |
| gr.Markdown("---\n### Write Your Answer") | |
| pg_answer = gr.Textbox( | |
| lines=10, | |
| label="Your Answer", | |
| placeholder=( | |
| "Write your full answer here.\n\n" | |
| "For explanations: give cause AND effect\n" | |
| "For calculations: show all working with units\n" | |
| "For descriptions: be specific, not vague\n" | |
| "Use correct scientific terminology" | |
| ) | |
| ) | |
| with gr.Row(): | |
| show_model_cb = gr.Checkbox(label="Include model answer in feedback", value=True) | |
| mark_btn = gr.Button("Submit for Marking", variant="primary", size="lg") | |
| pg_feedback = gr.Markdown(value="_Submit your answer to receive detailed feedback_") | |
| pg_scheme = gr.State("") | |
| pg_model_store = gr.State("") | |
| def on_generate(subject, topic, q_type, difficulty, marks): | |
| q, scheme, model_ans, _ = generate_question(subject, topic, q_type, difficulty, marks) | |
| return q, scheme, model_ans, "_Submit your answer to receive detailed feedback_" | |
| gen_btn.click( | |
| on_generate, | |
| [pg_subject, pg_topic, pg_qtype, pg_diff, pg_marks], | |
| [pg_question, pg_scheme, pg_model_store, pg_feedback] | |
| ) | |
| def on_mark(question, scheme, model_ans, student_ans, subject, use_model): | |
| return mark_answer(question, scheme, student_ans, subject, model_ans if use_model else "") | |
| mark_btn.click( | |
| on_mark, | |
| [pg_question, pg_scheme, pg_model_store, pg_answer, pg_subject, show_model_cb], | |
| pg_feedback | |
| ) | |
| with gr.Accordion("View Mark Scheme (spoiler!)", open=False): | |
| pg_scheme_view = gr.Markdown(value="_Generate a question first_") | |
| gen_btn.click( | |
| lambda s: s if s else "_Generate a question first_", | |
| pg_scheme, pg_scheme_view | |
| ) | |
| # TAB 2: TOPIC DRILL | |
| with gr.Tab("Topic Drill"): | |
| gr.Markdown("### 10 rapid-fire questions on one topic β answers revealed below\n*Great for revision sweeps*") | |
| with gr.Row(): | |
| drill_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry") | |
| drill_topic = gr.Dropdown(CHEMISTRY_TOPICS, label="Topic", allow_custom_value=True) | |
| drill_subject.change(update_topics, drill_subject, drill_topic) | |
| drill_btn = gr.Button("Generate 10-Question Drill", variant="primary", size="lg") | |
| drill_output = gr.Markdown(value="_Select a topic and click Generate_") | |
| drill_btn.click(generate_drill, [drill_subject, drill_topic], drill_output) | |
| # TAB 3: TIMED MOCK | |
| with gr.Tab("Timed Mock"): | |
| gr.Markdown("### Answer 5 questions in sequence β full batch marking at the end\n*Simulate real exam conditions*") | |
| with gr.Row(): | |
| mock_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry") | |
| mock_topic = gr.Dropdown(CHEMISTRY_TOPICS, label="Topic", allow_custom_value=True) | |
| mock_diff = gr.Radio(["Foundation", "Core", "Extended"], label="Difficulty", value="Core") | |
| mock_subject.change(update_topics, mock_subject, mock_topic) | |
| mock_status = gr.Markdown("_Click Start to begin your mock_") | |
| mock_start_btn = gr.Button("Start 5-Question Mock", variant="primary", size="lg") | |
| with gr.Column(visible=False) as mock_q_section: | |
| mock_q_display = gr.Markdown() | |
| mock_progress = gr.Textbox(label="Progress", value="1 / 5", interactive=False) | |
| mock_answer_box = gr.Textbox( | |
| lines=8, label="Your Answer", placeholder="Write your answer here..." | |
| ) | |
| mock_next_btn = gr.Button("Next Question", variant="primary") | |
| mock_finish_btn = gr.Button("Finish and Get Full Results", variant="secondary", visible=False) | |
| mock_results = gr.Markdown(value="") | |
| def on_start_mock(subject, topic, difficulty): | |
| if not topic: | |
| return "Select a topic first.", gr.update(visible=False), gr.update(visible=False), "", "1 / 5", "" | |
| status, _, __, q, prog, ___ = start_quiz(subject, topic, difficulty) | |
| return status, gr.update(visible=True), gr.update(visible=False), q, prog, "" | |
| mock_start_btn.click( | |
| on_start_mock, | |
| [mock_subject, mock_topic, mock_diff], | |
| [mock_status, mock_q_section, mock_finish_btn, mock_q_display, mock_progress, mock_results] | |
| ) | |
| def on_mock_next(answer): | |
| q, scheme, prog, done = quiz_next(answer) | |
| is_done = done == "done" | |
| return q, prog, gr.update(visible=not is_done), gr.update(visible=is_done), "" | |
| mock_next_btn.click( | |
| on_mock_next, | |
| [mock_answer_box], | |
| [mock_q_display, mock_progress, mock_next_btn, mock_finish_btn, mock_answer_box] | |
| ) | |
| mock_finish_btn.click(get_quiz_results, [], mock_results) | |
| # TAB 4: TOPIC EXPLORER | |
| with gr.Tab("Topic Explorer"): | |
| gr.Markdown("### Browse all examinable topics or type any topic for a quick question") | |
| with gr.Row(): | |
| gr.Markdown( | |
| "#### Chemistry Topics\n\n" + | |
| "\n".join([f"- {t}" for t in CHEMISTRY_TOPICS]) | |
| ) | |
| gr.Markdown( | |
| "#### Biology Topics\n\n" + | |
| "\n".join([f"- {t}" for t in BIOLOGY_TOPICS]) | |
| ) | |
| gr.Markdown("---\n#### Quick Question") | |
| with gr.Row(): | |
| quick_topic = gr.Textbox(label="Topic", placeholder="e.g. Osmosis, Electrolysis, Enzymes...") | |
| quick_subject = gr.Radio(["Chemistry", "Biology"], label="Subject", value="Chemistry") | |
| quick_diff = gr.Radio(["Foundation", "Core", "Extended"], label="Difficulty", value="Core") | |
| quick_btn = gr.Button("Generate Quick Question", variant="primary") | |
| quick_output = gr.Markdown(value="_Enter a topic above and click Generate_") | |
| quick_scheme = gr.State("") | |
| quick_model = gr.State("") | |
| def quick_q(topic, subject, diff): | |
| q_type = random.choice(QUESTION_TYPES[subject]) | |
| q, scheme, model, _ = generate_question(subject, topic, q_type, diff, 5) | |
| return q, scheme, model | |
| quick_btn.click( | |
| quick_q, | |
| [quick_topic, quick_subject, quick_diff], | |
| [quick_output, quick_scheme, quick_model] | |
| ) | |
| gr.Markdown(""" | |
| --- | |
| **AI System:** Gemini 2.5 Pro (Primary) β Cohere Command-R+ (Fallback) | |
| Add `GEMINI_API_KEY` and/or `COHERE_API_KEY` in Space Secrets to activate. | |
| """) | |
| app.launch() |