AptlyDigital commited on
Commit
8828ecf
·
verified ·
1 Parent(s): 7ab1e8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +689 -214
app.py CHANGED
@@ -1,10 +1,16 @@
 
 
 
1
  import os
2
  import json
3
  import re
 
4
  from typing import List, Dict, Any, Tuple
 
5
 
6
  import gradio as gr
7
  from groq import Groq
 
8
 
9
  # -----------------------------
10
  # Configuration
@@ -12,122 +18,459 @@ from groq import Groq
12
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "").strip()
13
  client = Groq(api_key=GROQ_API_KEY)
14
 
15
- LANG_OPTIONS = [
16
- "English",
17
- "Urdu",
18
- "Mandarin Chinease",
19
- "Hindi",
20
- "Spanish",
21
- "Standard Arabic",
22
- "French",
23
- "Bengali",
24
- "Protaguese",
25
- "Russian",
26
- "Indonasion",
 
 
 
 
 
 
 
 
 
 
27
  ]
28
 
 
29
  LEVEL_OPTIONS = ["Beginner", "Intermediate", "Advanced"]
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # -----------------------------
33
- # Helpers
34
  # -----------------------------
35
  def generate_with_groq(prompt: str) -> str:
36
- """
37
- Call Groq chat completions with the specified model and return text content.
38
- Includes basic error handling and a concise error message for the UI.
39
- """
40
  if not GROQ_API_KEY:
41
  return "❌ Missing GROQ_API_KEY. Please set it as a secret/environment variable."
42
-
43
  try:
44
  response = client.chat.completions.create(
45
  model="llama-3.1-8b-instant",
46
  messages=[{"role": "user", "content": prompt}],
47
  temperature=0.7,
48
- max_tokens=500,
49
  )
50
  return response.choices[0].message.content
51
  except Exception as e:
52
  return f"❌ API error: {e}"
53
 
54
-
55
  def build_system_context(subject: str, topic: str, language: str, level: str) -> str:
56
  return (
57
  f"Subject: {subject}\n"
58
  f"Topic: {topic}\n"
59
  f"Language: {language}\n"
60
  f"Student Level: {level}\n"
 
61
  )
62
 
63
-
64
  def prompt_explanation(subject: str, topic: str, language: str, level: str) -> str:
65
  ctx = build_system_context(subject, topic, language, level)
66
  return (
67
  f"{ctx}\n"
68
- "Task: Write a clear, friendly, step-by-step explanation of the topic."
69
- " Use short paragraphs, numbered steps where helpful, and examples."
70
- " Keep it concise but thorough. Reply in the specified language only."
 
71
  )
72
 
73
-
74
  def prompt_resources(subject: str, topic: str, language: str, level: str) -> str:
75
  ctx = build_system_context(subject, topic, language, level)
76
  return (
77
  f"{ctx}\n"
78
- "Task: Recommend at least 3 quality learning resources (mix of articles, videos, documentation). "
79
- "Return as a markdown bulleted list. Each item must include a title, the type (Article/Video/Docs), "
80
- "a one-line why it's useful, and a URL. Reply in the specified language only."
 
81
  )
82
 
83
-
84
  def prompt_roadmap(subject: str, topic: str, language: str, level: str) -> str:
85
  ctx = build_system_context(subject, topic, language, level)
86
  return (
87
  f"{ctx}\n"
88
- "Task: Produce a structured learning roadmap for this topic and level. "
89
- "Organize into stages with bullet points, estimated effort, and key outcomes. "
90
- "Add a short list of common mistakes to avoid. Reply in the specified language only."
 
91
  )
92
 
93
-
94
  def prompt_quiz(subject: str, topic: str, language: str, level: str) -> str:
95
  ctx = build_system_context(subject, topic, language, level)
96
  return (
97
  f"{ctx}\n"
98
- "Task: Create a short multiple-choice quiz with 3 to 5 questions. "
99
  "Return STRICT JSON only with this schema:\n"
100
  "{\n"
101
  ' "questions": [\n'
102
  ' {\n'
103
  ' "question": "string",\n'
104
  ' "options": ["A", "B", "C", "D"],\n'
105
- ' "answer_index": 0\n'
 
106
  " }\n"
107
  " ]\n"
108
  "}\n"
109
  "Requirements:\n"
110
- "- options length 3-5\n"
111
- "- answer_index is an integer index into the options array\n"
112
- "- No additional commentary or code fences\n"
113
- f"- Write the question text and options in {language}."
 
114
  )
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  def parse_quiz_json(text: str) -> Dict[str, Any]:
118
- """
119
- Extract and parse the JSON quiz from model output.
120
- Tries to locate the first JSON-looking block if the response isn't pure JSON.
121
- """
122
- # Try direct JSON first
123
  try:
124
  parsed = json.loads(text)
125
  if "questions" in parsed:
126
  return parsed
127
  except Exception:
128
  pass
129
-
130
- # Fallback: regex to find JSON block
131
  match = re.search(r"\{(?:[^{}]|(?R))*\}", text, re.DOTALL)
132
  if match:
133
  try:
@@ -136,261 +479,393 @@ def parse_quiz_json(text: str) -> Dict[str, Any]:
136
  return parsed
137
  except Exception:
138
  pass
139
-
140
- # Final fallback
141
  return {"questions": []}
142
 
143
-
144
  def normalize_quiz(quiz: Dict[str, Any]) -> List[Dict[str, Any]]:
145
- """
146
- Ensure each question has required fields. Drop invalid ones.
147
- """
148
  cleaned = []
149
  for q in quiz.get("questions", []):
150
  question = q.get("question")
151
  options = q.get("options", [])
152
  answer_index = q.get("answer_index")
 
153
  if (
154
  isinstance(question, str)
155
  and isinstance(options, list)
156
- and 3 <= len(options) <= 5
157
  and isinstance(answer_index, int)
158
  and 0 <= answer_index < len(options)
159
  ):
160
- cleaned.append(
161
- {
162
- "question": question.strip(),
163
- "options": [str(o).strip() for o in options],
164
- "answer_index": answer_index,
165
- }
166
- )
167
- return cleaned[:5] # at most 5
168
-
169
 
170
  def evaluate_answers(
171
  user_choices: List[int], quiz_data: List[Dict[str, Any]]
172
  ) -> Tuple[str, str]:
173
- """
174
- Compute score and short feedback summary.
175
- """
 
176
  correct = 0
177
  details = []
 
178
  for i, q in enumerate(quiz_data):
179
  user_idx = user_choices[i] if i < len(user_choices) else None
180
  ans_idx = q["answer_index"]
181
  is_correct = (user_idx == ans_idx)
 
182
  if is_correct:
183
  correct += 1
184
- # Build per-question line
185
  chosen = (
186
  f"{q['options'][user_idx]}"
187
  if isinstance(user_idx, int) and 0 <= user_idx < len(q["options"])
188
  else "No answer"
189
  )
 
190
  details.append(
191
- f"Q{i+1}: {'✅ Correct' if is_correct else '❌ Incorrect'} | "
192
- f"Your answer: {chosen} | Correct: {q['options'][ans_idx]}"
 
 
193
  )
194
-
195
  total = len(quiz_data)
196
- score_text = f"Score: {correct} / {total}"
197
- if total == 0:
198
- return "No quiz generated yet.", ""
199
- # Brief feedback
200
- if correct == total and total > 0:
201
- feedback = "Great job. You’ve mastered this set."
202
- elif correct >= (total * 0.6):
203
- feedback = "Good work. Review the missed questions and try again."
204
  else:
205
- feedback = "Keep practicing. Revisit the explanation and roadmap."
206
-
207
- return score_text, feedback + "\n\n" + "\n".join(details)
208
-
209
-
210
- # -----------------------------
211
- # Gradio Callbacks
212
- # -----------------------------
213
- def on_generate_explanation(subject, topic, language, level):
214
- prompt = prompt_explanation(subject, topic, language, level)
215
- return generate_with_groq(prompt)
216
-
217
-
218
- def on_generate_resources(subject, topic, language, level):
219
- prompt = prompt_resources(subject, topic, language, level)
220
- return generate_with_groq(prompt)
221
-
222
-
223
- def on_generate_roadmap(subject, topic, language, level):
224
- prompt = prompt_roadmap(subject, topic, language, level)
225
- return generate_with_groq(prompt)
226
-
227
-
228
- def on_generate_quiz(subject, topic, language, level):
229
- raw = generate_with_groq(prompt_quiz(subject, topic, language, level))
230
- quiz = normalize_quiz(parse_quiz_json(raw))
231
-
232
- # Build updates for up to 5 radios and their labels
233
- vis = [False] * 5
234
- labels = [("Question", ["Option 1", "Option 2", "Option 3"])] * 5
235
-
236
- for i, q in enumerate(quiz):
237
- vis[i] = True
238
- labels[i] = (f"Q{i+1}. {q['question']}", q["options"])
239
-
240
- return (
241
- quiz, # gr.State
242
- gr.update(visible=vis[0], label=labels[0][0], choices=labels[0][1], value=None),
243
- gr.update(visible=vis[1], label=labels[1][0], choices=labels[1][1], value=None),
244
- gr.update(visible=vis[2], label=labels[2][0], choices=labels[2][1], value=None),
245
- gr.update(visible=vis[3], label=labels[3][0], choices=labels[3][1], value=None),
246
- gr.update(visible=vis[4], label=labels[4][0], choices=labels[4][1], value=None),
247
- raw if not quiz else "Quiz generated. Select your answers below."
248
- )
249
-
250
 
251
- def on_display_results(
252
- quiz_state,
253
- a1, a2, a3, a4, a5
254
- ):
255
  quiz = quiz_state or []
256
- # Map selected option text back to index
 
257
  selections = []
 
 
258
  for i, q in enumerate(quiz):
259
- # chosen label may be None
260
- chosen = [a1, a2, a3, a4, a5][i]
261
  if chosen is None:
262
  selections.append(None)
263
  continue
 
264
  try:
265
  idx = q["options"].index(chosen)
 
266
  except ValueError:
267
- idx = None
268
- selections.append(idx)
269
-
270
- score_text, feedback = evaluate_answers(selections, quiz)
271
- return score_text, feedback
272
-
273
 
274
  # -----------------------------
275
- # UI
276
  # -----------------------------
277
- # Custom blue theme + card-like styling
278
  CSS = """
279
  :root {
280
- --brand-blue: #1e40af; /* indigo-800 */
281
- --brand-blue-600: #2563eb;
282
- --card-bg: #f8fafc;
283
- --border: #cbd5e1;
284
  }
285
-
286
- .gradio-container {max-width: 1200px !important}
287
- #title h1 {color: var(--brand-blue); margin-bottom: 6px}
288
- #subtitle {color:#334155; margin-top:0}
289
-
290
  .card {
291
  background: var(--card-bg);
292
  border: 1px solid var(--border);
293
- border-radius: 14px;
294
- padding: 14px;
295
- box-shadow: 0 2px 8px rgb(2 6 23 / 6%);
 
296
  }
297
-
298
  .btn-primary button {
299
- background: var(--brand-blue-600) !important;
300
- border-color: var(--brand-blue-600) !important;
301
  color: white !important;
 
 
 
 
 
 
 
 
302
  }
303
-
304
  .section-title {
305
- font-weight: 700; color: var(--brand-blue);
306
- margin-bottom: 6px; font-size: 16px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  }
308
  """
309
 
310
  with gr.Blocks(css=CSS, theme=gr.themes.Soft(primary_hue="blue")) as demo:
311
- gr.Markdown("<div id='title'><h1>AI Study Tutor</h1><p id='subtitle'>Powered by Groq + Gradio</p></div>")
312
-
 
 
 
 
 
 
 
313
  with gr.Row():
314
  with gr.Column(scale=1):
315
  with gr.Group(elem_classes="card"):
316
- gr.Markdown("### Inputs")
317
- subject = gr.Textbox(label="Subject", placeholder="e.g., Mathematics")
318
- topic = gr.Textbox(label="Topic", placeholder="e.g., Derivatives of Trigonometric Functions")
319
- language = gr.Dropdown(LANG_OPTIONS, value="English", label="Language")
320
- level = gr.Radio(LEVEL_OPTIONS, value="Beginner", label="Level")
321
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
  with gr.Column(scale=2):
323
- # Explanation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
  with gr.Group(elem_classes="card"):
325
- gr.Markdown("<div class='section-title'>Generate Explanation</div>")
326
- btn_explain = gr.Button("Generate Explanation", elem_classes="btn-primary")
327
- explanation = gr.Markdown(label="Explanation", value="")
328
-
329
- # Resources
330
- with gr.Group(elem_classes="card"):
331
- gr.Markdown("<div class='section-title'>Generate Resources</div>")
332
- btn_resources = gr.Button("Generate Resources", elem_classes="btn-primary")
333
- resources = gr.Markdown(label="Resources", value="")
334
-
335
- with gr.Row():
336
- with gr.Column():
337
- with gr.Group(elem_classes="card"):
338
- gr.Markdown("<div class='section-title'>Generate Roadmap</div>")
339
- btn_roadmap = gr.Button("Generate Roadmap", elem_classes="btn-primary")
340
- roadmap = gr.Markdown(label="Roadmap", value="")
341
-
342
- with gr.Row():
343
- with gr.Column():
344
- with gr.Group(elem_classes="card"):
345
- gr.Markdown("<div class='section-title'>Generate Quiz</div>")
346
- btn_quiz = gr.Button("Generate Quiz", elem_classes="btn-primary")
347
- quiz_info = gr.Markdown("Click the button to create a quiz.")
348
- # Quiz state and up to 5 radios
349
- quiz_state = gr.State([])
350
- q1 = gr.Radio(label="Question 1", choices=[], visible=False, interactive=True)
351
- q2 = gr.Radio(label="Question 2", choices=[], visible=False, interactive=True)
352
- q3 = gr.Radio(label="Question 3", choices=[], visible=False, interactive=True)
353
- q4 = gr.Radio(label="Question 4", choices=[], visible=False, interactive=True)
354
- q5 = gr.Radio(label="Question 5", choices=[], visible=False, interactive=True)
355
-
356
- with gr.Row():
357
- with gr.Column():
358
- with gr.Group(elem_classes="card"):
359
- gr.Markdown("<div class='section-title'>Display Results</div>")
360
- btn_results = gr.Button("Evaluate Answers", elem_classes="btn-primary")
361
- score = gr.Markdown("Score will appear here.")
362
- feedback = gr.Markdown("Feedback will appear here.")
363
-
364
- # Events
365
  btn_explain.click(
366
  fn=on_generate_explanation,
367
  inputs=[subject, topic, language, level],
368
- outputs=[explanation],
369
  )
370
-
371
  btn_resources.click(
372
  fn=on_generate_resources,
373
  inputs=[subject, topic, language, level],
374
- outputs=[resources],
375
  )
376
-
377
  btn_roadmap.click(
378
  fn=on_generate_roadmap,
379
  inputs=[subject, topic, language, level],
380
- outputs=[roadmap],
381
  )
382
-
383
  btn_quiz.click(
384
  fn=on_generate_quiz,
385
  inputs=[subject, topic, language, level],
386
- outputs=[quiz_state, q1, q2, q3, q4, q5, quiz_info],
 
 
 
 
387
  )
388
-
 
 
 
 
 
 
 
 
 
 
389
  btn_results.click(
390
  fn=on_display_results,
391
  inputs=[quiz_state, q1, q2, q3, q4, q5],
392
- outputs=[score, feedback],
393
  )
394
 
 
 
 
395
  if __name__ == "__main__":
396
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py - AI Study Tutor for SEA Exam Preparation
2
+ # Enhanced with PDF upload and RAG capabilities
3
+
4
  import os
5
  import json
6
  import re
7
+ import tempfile
8
  from typing import List, Dict, Any, Tuple
9
+ from pathlib import Path
10
 
11
  import gradio as gr
12
  from groq import Groq
13
+ import PyPDF2 # For PDF text extraction
14
 
15
  # -----------------------------
16
  # Configuration
 
18
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "").strip()
19
  client = Groq(api_key=GROQ_API_KEY)
20
 
21
+ # SEA-specific configurations
22
+ SEA_SUBJECTS = [
23
+ "Mathematics",
24
+ "English Language Arts"
25
+ ]
26
+
27
+ SEA_MATH_TOPICS = [
28
+ "Number Theory (Fractions, Decimals, Percentages)",
29
+ "Measurement (Perimeter, Area, Volume)",
30
+ "Geometry",
31
+ "Algebra Basics",
32
+ "Word Problems",
33
+ "Data Interpretation"
34
+ ]
35
+
36
+ SEA_ENGLISH_TOPICS = [
37
+ "Reading Comprehension",
38
+ "Grammar (Parts of Speech, Tenses)",
39
+ "Vocabulary (Synonyms, Antonyms)",
40
+ "Composition & Writing",
41
+ "Spelling & Punctuation",
42
+ "Listening Comprehension (simulated)"
43
  ]
44
 
45
+ LANG_OPTIONS = ["English"] # Primary language for SEA exam
46
  LEVEL_OPTIONS = ["Beginner", "Intermediate", "Advanced"]
47
 
48
+ # Storage for uploaded documents
49
+ UPLOADED_DOCS_FILE = "sea_exam_documents.json"
50
+
51
+ # -----------------------------
52
+ # Document Processing Functions
53
+ # -----------------------------
54
+ def extract_text_from_pdf(file_bytes: bytes, filename: str) -> str:
55
+ """Extract text from uploaded PDF files with SEA-specific formatting."""
56
+ try:
57
+ # Create temporary file
58
+ with tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) as tmp_file:
59
+ tmp_file.write(file_bytes)
60
+ tmp_file_path = tmp_file.name
61
+
62
+ # Extract text using PyPDF2
63
+ full_text = ""
64
+ with open(tmp_file_path, 'rb') as pdf_file:
65
+ pdf_reader = PyPDF2.PdfReader(pdf_file)
66
+
67
+ for page_num in range(len(pdf_reader.pages)):
68
+ page = pdf_reader.pages[page_num]
69
+ page_text = page.extract_text()
70
+
71
+ # Add page marker for reference
72
+ full_text += f"\n--- SEA Paper Page {page_num+1} ---\n"
73
+ full_text += page_text + "\n"
74
+
75
+ # Clean up temp file
76
+ os.unlink(tmp_file_path)
77
+
78
+ # Post-process: Detect question patterns
79
+ processed_text = enhance_sea_text_extraction(full_text, filename)
80
+
81
+ return processed_text
82
+
83
+ except Exception as e:
84
+ return f"ERROR processing {filename}: {str(e)}"
85
+
86
+ def enhance_sea_text_extraction(text: str, filename: str) -> str:
87
+ """Enhance extracted text with SEA-specific pattern recognition."""
88
+ enhancements = []
89
+
90
+ # Detect common SEA question patterns
91
+ question_patterns = [
92
+ r"Question\s+\d+[:\.]\s*(.*?)(?=\nQuestion\s+\d+|$)",
93
+ r"\d+\.\s+(.*?)(?=\n\d+\.|\Z)",
94
+ r"Section\s+[A-Z][:\.]\s*(.*?)(?=\nSection\s+[A-Z]|\Z)"
95
+ ]
96
+
97
+ for pattern in question_patterns:
98
+ matches = re.findall(pattern, text, re.DOTALL | re.IGNORECASE)
99
+ if matches:
100
+ enhancements.append(f"Detected {len(matches)} SEA-style questions")
101
+ break
102
+
103
+ # Add metadata based on filename
104
+ year_match = re.search(r'(20\d{2}|19\d{2})', filename)
105
+ subject_match = re.search(r'(math|english|mathematics|language)', filename, re.IGNORECASE)
106
+
107
+ metadata = f"\n[FILE METADATA]\nFilename: {filename}\n"
108
+ if year_match:
109
+ metadata += f"Year: {year_match.group(1)}\n"
110
+ if subject_match:
111
+ metadata += f"Subject: {subject_match.group(1).title()}\n"
112
+
113
+ return metadata + "\n" + text + "\n" + "\n".join(enhancements)
114
+
115
+ def process_uploaded_documents(files) -> str:
116
+ """Process all uploaded SEA exam documents and build searchable index."""
117
+ if not files:
118
+ return "⚠️ No files uploaded. Please upload SEA exam PDFs or text files."
119
+
120
+ all_documents = []
121
+ processing_summary = []
122
+
123
+ for file_info in files:
124
+ # Gradio provides (temp_path, original_filename) for each file
125
+ if isinstance(file_info, tuple) and len(file_info) >= 2:
126
+ file_path, filename = file_info[0], file_info[1]
127
+ else:
128
+ # Fallback for different Gradio versions
129
+ file_path = file_info
130
+ filename = os.path.basename(str(file_info))
131
+
132
+ try:
133
+ # Read file content
134
+ with open(file_path, 'rb') as f:
135
+ file_bytes = f.read()
136
+
137
+ # Extract text based on file type
138
+ if filename.lower().endswith('.pdf'):
139
+ text_content = extract_text_from_pdf(file_bytes, filename)
140
+ file_type = "PDF"
141
+ elif filename.lower().endswith(('.txt', '.md')):
142
+ text_content = file_bytes.decode('utf-8', errors='replace')
143
+ file_type = "Text"
144
+ else:
145
+ text_content = f"Unsupported file type: {filename}"
146
+ file_type = "Unknown"
147
+
148
+ # Create structured document entry
149
+ doc_entry = {
150
+ "filename": filename,
151
+ "content": text_content[:10000] if len(text_content) > 10000 else text_content, # Limit size
152
+ "type": file_type,
153
+ "subject": detect_subject_from_content(text_content),
154
+ "size_chars": len(text_content),
155
+ "upload_time": gr.utils.datetime.datetime.now().isoformat()
156
+ }
157
+
158
+ all_documents.append(doc_entry)
159
+ processing_summary.append(f"✅ {filename} ({file_type}, {len(text_content)} chars)")
160
+
161
+ except Exception as e:
162
+ error_msg = f"❌ Failed to process {filename}: {str(e)}"
163
+ processing_summary.append(error_msg)
164
+ print(error_msg)
165
+
166
+ # Save documents to JSON file for persistence
167
+ try:
168
+ with open(UPLOADED_DOCS_FILE, 'w', encoding='utf-8') as f:
169
+ json.dump(all_documents, f, ensure_ascii=False, indent=2)
170
+
171
+ # Create a quick-search index
172
+ create_search_index(all_documents)
173
+
174
+ summary = f"📚 **Processing Complete**\n\n"
175
+ summary += f"**Processed {len(all_documents)} files:**\n"
176
+ summary += "\n".join(processing_summary)
177
+ summary += f"\n\n📁 Documents saved to: `{UPLOADED_DOCS_FILE}`"
178
+ summary += f"\n🔍 Index created for RAG queries."
179
+
180
+ return summary
181
+
182
+ except Exception as e:
183
+ return f"❌ Error saving documents: {str(e)}"
184
+
185
+ def detect_subject_from_content(text: str) -> str:
186
+ """Auto-detect subject from document content."""
187
+ text_lower = text.lower()
188
+
189
+ math_keywords = ['fraction', 'decimal', 'percentage', 'geometry', 'algebra', 'equation', 'calculate', 'sum']
190
+ english_keywords = ['comprehension', 'grammar', 'vocabulary', 'essay', 'reading', 'writing', 'passage']
191
+
192
+ math_count = sum(1 for keyword in math_keywords if keyword in text_lower)
193
+ english_count = sum(1 for keyword in english_keywords if keyword in text_lower)
194
+
195
+ if math_count > english_count:
196
+ return "Mathematics"
197
+ elif english_count > math_count:
198
+ return "English Language Arts"
199
+ else:
200
+ return "General SEA"
201
+
202
+ def create_search_index(documents: List[Dict]):
203
+ """Create a simplified search index for quick lookups."""
204
+ index_entries = []
205
+
206
+ for doc in documents:
207
+ # Extract first few lines as preview
208
+ preview_lines = doc['content'].split('\n')[:10]
209
+ preview = ' '.join([line.strip() for line in preview_lines if line.strip()])
210
+
211
+ index_entry = {
212
+ "filename": doc['filename'],
213
+ "subject": doc['subject'],
214
+ "preview": preview[:200] + "..." if len(preview) > 200 else preview,
215
+ "size": doc['size_chars']
216
+ }
217
+ index_entries.append(index_entry)
218
+
219
+ # Save index
220
+ with open("sea_document_index.json", 'w', encoding='utf-8') as f:
221
+ json.dump(index_entries, f, ensure_ascii=False, indent=2)
222
+
223
+ def get_relevant_context(subject: str, topic: str, max_context: int = 1500) -> str:
224
+ """Retrieve relevant context from uploaded SEA papers."""
225
+ try:
226
+ if not os.path.exists(UPLOADED_DOCS_FILE):
227
+ return ""
228
+
229
+ with open(UPLOADED_DOCS_FILE, 'r', encoding='utf-8') as f:
230
+ documents = json.load(f)
231
+
232
+ relevant_parts = []
233
+ topic_lower = topic.lower()
234
+ subject_lower = subject.lower()
235
+
236
+ for doc in documents:
237
+ doc_content = doc.get('content', '').lower()
238
+ doc_subject = doc.get('subject', '').lower()
239
+
240
+ # Check relevance
241
+ relevance_score = 0
242
+ if topic_lower in doc_content:
243
+ relevance_score += 3
244
+ if subject_lower in doc_subject or subject_lower in doc_content:
245
+ relevance_score += 2
246
+
247
+ if relevance_score > 0:
248
+ # Extract most relevant snippet
249
+ content = doc['content']
250
+
251
+ # Try to find topic mention
252
+ if topic_lower in content.lower():
253
+ idx = content.lower().find(topic_lower)
254
+ start = max(0, idx - 200)
255
+ end = min(len(content), idx + 500)
256
+ snippet = content[start:end]
257
+ else:
258
+ # Take beginning of document
259
+ snippet = content[:500] + "..."
260
+
261
+ relevant_parts.append(f"\n--- From: {doc['filename']} (Subject: {doc['subject']}) ---\n{snippet}\n")
262
+
263
+ # Combine and limit total size
264
+ combined = "\n".join(relevant_parts)
265
+ if len(combined) > max_context:
266
+ combined = combined[:max_context] + "\n...[context truncated]..."
267
+
268
+ return combined if combined else ""
269
+
270
+ except Exception as e:
271
+ print(f"Context retrieval error: {e}")
272
+ return ""
273
+
274
+ # -----------------------------
275
+ # Enhanced Generation with RAG
276
+ # -----------------------------
277
+ def generate_with_context(prompt: str, subject: str, topic: str, language: str, level: str) -> str:
278
+ """Enhanced generator using uploaded SEA papers as context."""
279
+ # Retrieve relevant context from uploaded documents
280
+ context = get_relevant_context(subject, topic)
281
+
282
+ context_header = ""
283
+ if context:
284
+ context_header = f"""
285
+ IMPORTANT CONTEXT FROM UPLOADED SEA EXAM PAPERS:
286
+ {context}
287
+
288
+ BASED ON THE ABOVE SEA EXAM CONTEXT, please respond to the following request:
289
+ """
290
+ else:
291
+ context_header = """
292
+ NOTE: No SEA exam papers uploaded yet. For more accurate SEA-aligned content, upload past papers using the document upload section.
293
+ """
294
+
295
+ # Build enhanced prompt
296
+ enhanced_prompt = f"""
297
+ SEA EXAM TUTOR MODE
298
+ {context_header}
299
+ ---
300
+ REQUEST DETAILS:
301
+ Subject: {subject}
302
+ Topic: {topic}
303
+ Language: {language}
304
+ Student Level: {level}
305
+
306
+ TASK: {prompt}
307
+
308
+ SPECIFIC SEA REQUIREMENTS:
309
+ 1. Align with Trinidad & Tobago SEA exam standards
310
+ 2. Use appropriate difficulty for {level} level
311
+ 3. Format similar to actual SEA past papers
312
+ 4. Include clear, step-by-step explanations where applicable
313
+ 5. Focus on conceptual understanding rather than rote memorization
314
+ """
315
+
316
+ return generate_with_groq(enhanced_prompt)
317
 
318
  # -----------------------------
319
+ # Original Helper Functions (Updated to use RAG)
320
  # -----------------------------
321
  def generate_with_groq(prompt: str) -> str:
322
+ """Call Groq API with error handling."""
 
 
 
323
  if not GROQ_API_KEY:
324
  return "❌ Missing GROQ_API_KEY. Please set it as a secret/environment variable."
325
+
326
  try:
327
  response = client.chat.completions.create(
328
  model="llama-3.1-8b-instant",
329
  messages=[{"role": "user", "content": prompt}],
330
  temperature=0.7,
331
+ max_tokens=800,
332
  )
333
  return response.choices[0].message.content
334
  except Exception as e:
335
  return f"❌ API error: {e}"
336
 
 
337
  def build_system_context(subject: str, topic: str, language: str, level: str) -> str:
338
  return (
339
  f"Subject: {subject}\n"
340
  f"Topic: {topic}\n"
341
  f"Language: {language}\n"
342
  f"Student Level: {level}\n"
343
+ f"Exam: Trinidad & Tobago Secondary Entrance Assessment (SEA)\n"
344
  )
345
 
 
346
  def prompt_explanation(subject: str, topic: str, language: str, level: str) -> str:
347
  ctx = build_system_context(subject, topic, language, level)
348
  return (
349
  f"{ctx}\n"
350
+ "Task: Write a clear, friendly, step-by-step explanation of the topic suitable for SEA exam preparation. "
351
+ "Use examples similar to those found in SEA past papers. "
352
+ "Include common mistakes students make and how to avoid them. "
353
+ "Reply in English only."
354
  )
355
 
 
356
  def prompt_resources(subject: str, topic: str, language: str, level: str) -> str:
357
  ctx = build_system_context(subject, topic, language, level)
358
  return (
359
  f"{ctx}\n"
360
+ "Task: Recommend SEA-specific learning resources. "
361
+ "Include official resources, practice papers, and study strategies. "
362
+ "Return as a markdown list with resource type, description, and why it's useful for SEA. "
363
+ "Reply in English only."
364
  )
365
 
 
366
  def prompt_roadmap(subject: str, topic: str, language: str, level: str) -> str:
367
  ctx = build_system_context(subject, topic, language, level)
368
  return (
369
  f"{ctx}\n"
370
+ "Task: Create a 4-week study roadmap for this SEA topic. "
371
+ "Include weekly goals, practice activities, and checkpoints. "
372
+ "Add test-taking strategies specific to SEA exam format. "
373
+ "Reply in English only."
374
  )
375
 
 
376
  def prompt_quiz(subject: str, topic: str, language: str, level: str) -> str:
377
  ctx = build_system_context(subject, topic, language, level)
378
  return (
379
  f"{ctx}\n"
380
+ "Task: Create SEA-style multiple choice questions with 4 options each. "
381
  "Return STRICT JSON only with this schema:\n"
382
  "{\n"
383
  ' "questions": [\n'
384
  ' {\n'
385
  ' "question": "string",\n'
386
  ' "options": ["A", "B", "C", "D"],\n'
387
+ ' "answer_index": 0,\n'
388
+ ' "explanation": "string"\n'
389
  " }\n"
390
  " ]\n"
391
  "}\n"
392
  "Requirements:\n"
393
+ "- Exactly 3-5 questions\n"
394
+ "- Options A-D only\n"
395
+ "- answer_index is 0-3\n"
396
+ "- Include explanation for answer\n"
397
+ "- Questions must be SEA exam appropriate\n"
398
  )
399
 
400
+ def prompt_past_paper_question(subject: str, topic: str) -> str:
401
+ """Generate a new question in SEA exam format."""
402
+ return (
403
+ f"Subject: {subject}\n"
404
+ f"Topic: {topic}\n"
405
+ "Task: Create a NEW practice question in the exact format of Trinidad & Tobago SEA exam. "
406
+ "Include:\n"
407
+ "1. The question text\n"
408
+ "2. Multiple choice options (A-D) or structured answer format\n"
409
+ "3. Correct answer\n"
410
+ "4. Step-by-step solution\n"
411
+ "5. Marks allocation\n"
412
+ "6. Common errors to avoid\n"
413
+ "Make it original but consistent with SEA standards."
414
+ )
415
+
416
+ # -----------------------------
417
+ # Gradio Callbacks (Updated)
418
+ # -----------------------------
419
+ def on_generate_explanation(subject, topic, language, level):
420
+ base_prompt = prompt_explanation(subject, topic, language, level)
421
+ return generate_with_context(base_prompt, subject, topic, language, level)
422
+
423
+ def on_generate_resources(subject, topic, language, level):
424
+ base_prompt = prompt_resources(subject, topic, language, level)
425
+ return generate_with_context(base_prompt, subject, topic, language, level)
426
+
427
+ def on_generate_roadmap(subject, topic, language, level):
428
+ base_prompt = prompt_roadmap(subject, topic, language, level)
429
+ return generate_with_context(base_prompt, subject, topic, language, level)
430
 
431
+ def on_generate_quiz(subject, topic, language, level):
432
+ base_prompt = prompt_quiz(subject, topic, language, level)
433
+ raw_json = generate_with_context(base_prompt, subject, topic, language, level)
434
+
435
+ # Parse and normalize quiz
436
+ quiz = normalize_quiz(parse_quiz_json(raw_json))
437
+
438
+ # Update UI components
439
+ vis = [False] * 5
440
+ labels = [("Question", ["Option 1", "Option 2", "Option 3", "Option 4"])] * 5
441
+
442
+ for i, q in enumerate(quiz[:5]): # Max 5 questions
443
+ vis[i] = True
444
+ labels[i] = (f"Q{i+1}. {q['question']}", q["options"])
445
+
446
+ status = f"✅ Generated {len(quiz)} SEA-style questions." if quiz else "⚠️ No valid questions generated."
447
+
448
+ return (
449
+ quiz,
450
+ gr.update(visible=vis[0], label=labels[0][0], choices=labels[0][1], value=None),
451
+ gr.update(visible=vis[1], label=labels[1][0], choices=labels[1][1], value=None),
452
+ gr.update(visible=vis[2], label=labels[2][0], choices=labels[2][1], value=None),
453
+ gr.update(visible=vis[3], label=labels[3][0], choices=labels[3][1], value=None),
454
+ gr.update(visible=vis[4], label=labels[4][0], choices=labels[4][1], value=None),
455
+ status
456
+ )
457
+
458
+ def on_generate_past_paper_question(subject, topic):
459
+ prompt = prompt_past_paper_question(subject, topic)
460
+ return generate_with_context(prompt, subject, topic, "English", "Intermediate")
461
+
462
+ # -----------------------------
463
+ # Original Quiz Functions (Keep as is)
464
+ # -----------------------------
465
  def parse_quiz_json(text: str) -> Dict[str, Any]:
466
+ """Extract and parse JSON quiz from model output."""
 
 
 
 
467
  try:
468
  parsed = json.loads(text)
469
  if "questions" in parsed:
470
  return parsed
471
  except Exception:
472
  pass
473
+
 
474
  match = re.search(r"\{(?:[^{}]|(?R))*\}", text, re.DOTALL)
475
  if match:
476
  try:
 
479
  return parsed
480
  except Exception:
481
  pass
482
+
 
483
  return {"questions": []}
484
 
 
485
  def normalize_quiz(quiz: Dict[str, Any]) -> List[Dict[str, Any]]:
486
+ """Ensure each question has required fields."""
 
 
487
  cleaned = []
488
  for q in quiz.get("questions", []):
489
  question = q.get("question")
490
  options = q.get("options", [])
491
  answer_index = q.get("answer_index")
492
+
493
  if (
494
  isinstance(question, str)
495
  and isinstance(options, list)
496
+ and 2 <= len(options) <= 5
497
  and isinstance(answer_index, int)
498
  and 0 <= answer_index < len(options)
499
  ):
500
+ cleaned.append({
501
+ "question": question.strip(),
502
+ "options": [str(o).strip() for o in options],
503
+ "answer_index": answer_index,
504
+ "explanation": q.get("explanation", "No explanation provided.")
505
+ })
506
+
507
+ return cleaned[:5]
 
508
 
509
  def evaluate_answers(
510
  user_choices: List[int], quiz_data: List[Dict[str, Any]]
511
  ) -> Tuple[str, str]:
512
+ """Compute score and feedback."""
513
+ if not quiz_data:
514
+ return "No quiz available.", "Generate a quiz first."
515
+
516
  correct = 0
517
  details = []
518
+
519
  for i, q in enumerate(quiz_data):
520
  user_idx = user_choices[i] if i < len(user_choices) else None
521
  ans_idx = q["answer_index"]
522
  is_correct = (user_idx == ans_idx)
523
+
524
  if is_correct:
525
  correct += 1
526
+
527
  chosen = (
528
  f"{q['options'][user_idx]}"
529
  if isinstance(user_idx, int) and 0 <= user_idx < len(q["options"])
530
  else "No answer"
531
  )
532
+
533
  details.append(
534
+ f"**Q{i+1}:** {'✅ Correct' if is_correct else '❌ Incorrect'}\n"
535
+ f"Your answer: {chosen}\n"
536
+ f"Correct answer: {q['options'][ans_idx]}\n"
537
+ f"Explanation: {q.get('explanation', 'No explanation')}\n"
538
  )
539
+
540
  total = len(quiz_data)
541
+ score_text = f"## 📊 Score: {correct} / {total}"
542
+
543
+ if correct == total:
544
+ feedback = "**Excellent!** You've mastered these SEA-style questions."
545
+ elif correct >= total * 0.7:
546
+ feedback = "**Good work!** Review the explanations for any mistakes."
 
 
547
  else:
548
+ feedback = "**Keep practicing!** Review the topic and try again."
549
+
550
+ feedback += "\n\n### Question Details:\n" + "\n".join(details)
551
+ return score_text, feedback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
552
 
553
+ def on_display_results(quiz_state, a1, a2, a3, a4, a5):
 
 
 
554
  quiz = quiz_state or []
555
+
556
+ # Map selected options to indices
557
  selections = []
558
+ chosen_texts = [a1, a2, a3, a4, a5]
559
+
560
  for i, q in enumerate(quiz):
561
+ chosen = chosen_texts[i] if i < len(chosen_texts) else None
 
562
  if chosen is None:
563
  selections.append(None)
564
  continue
565
+
566
  try:
567
  idx = q["options"].index(chosen)
568
+ selections.append(idx)
569
  except ValueError:
570
+ selections.append(None)
571
+
572
+ return evaluate_answers(selections, quiz)
 
 
 
573
 
574
  # -----------------------------
575
+ # Enhanced Gradio UI
576
  # -----------------------------
 
577
  CSS = """
578
  :root {
579
+ --sea-blue: #1a5f7a;
580
+ --sea-light-blue: #57cc99;
581
+ --card-bg: #f8f9fa;
582
+ --border: #dee2e6;
583
  }
584
+ .gradio-container {max-width: 1200px !important; font-family: 'Segoe UI', sans-serif;}
585
+ #title h1 {color: var(--sea-blue); margin-bottom: 6px; border-bottom: 3px solid var(--sea-light-blue); padding-bottom: 10px;}
586
+ #subtitle {color: #495057; margin-top: 0; font-style: italic;}
 
 
587
  .card {
588
  background: var(--card-bg);
589
  border: 1px solid var(--border);
590
+ border-radius: 12px;
591
+ padding: 18px;
592
+ box-shadow: 0 4px 12px rgba(26, 95, 122, 0.08);
593
+ margin-bottom: 20px;
594
  }
 
595
  .btn-primary button {
596
+ background: linear-gradient(135deg, var(--sea-blue), #2a9d8f) !important;
597
+ border: none !important;
598
  color: white !important;
599
+ font-weight: 600 !important;
600
+ border-radius: 8px !important;
601
+ padding: 10px 24px !important;
602
+ }
603
+ .btn-primary button:hover {
604
+ background: linear-gradient(135deg, #2a9d8f, var(--sea-blue)) !important;
605
+ transform: translateY(-2px);
606
+ transition: all 0.3s ease;
607
  }
 
608
  .section-title {
609
+ font-weight: 700;
610
+ color: var(--sea-blue);
611
+ margin-bottom: 12px;
612
+ font-size: 18px;
613
+ display: flex;
614
+ align-items: center;
615
+ gap: 8px;
616
+ }
617
+ .section-title::before {
618
+ content: "📘";
619
+ }
620
+ .upload-section {
621
+ border: 2px dashed var(--sea-light-blue) !important;
622
+ background: rgba(87, 204, 153, 0.05) !important;
623
+ }
624
+ .sea-badge {
625
+ background: var(--sea-light-blue);
626
+ color: white;
627
+ padding: 2px 8px;
628
+ border-radius: 12px;
629
+ font-size: 12px;
630
+ font-weight: 600;
631
+ margin-left: 8px;
632
  }
633
  """
634
 
635
  with gr.Blocks(css=CSS, theme=gr.themes.Soft(primary_hue="blue")) as demo:
636
+ gr.Markdown(
637
+ """
638
+ <div id='title'>
639
+ <h1>🇹🇹 AI SEA Exam Tutor - Trinidad & Tobago</h1>
640
+ <p id='subtitle'>Secondary Entrance Assessment Preparation Assistant</p>
641
+ </div>
642
+ """
643
+ )
644
+
645
  with gr.Row():
646
  with gr.Column(scale=1):
647
  with gr.Group(elem_classes="card"):
648
+ gr.Markdown("### 📝 SEA Study Parameters")
649
+
650
+ subject = gr.Dropdown(
651
+ choices=SEA_SUBJECTS,
652
+ value="Mathematics",
653
+ label="SEA Subject",
654
+ info="Select subject area"
655
+ )
656
+
657
+ # Dynamic topic based on subject
658
+ def update_topics(subject):
659
+ if subject == "Mathematics":
660
+ return gr.Dropdown(choices=SEA_MATH_TOPICS, value=SEA_MATH_TOPICS[0])
661
+ else:
662
+ return gr.Dropdown(choices=SEA_ENGLISH_TOPICS, value=SEA_ENGLISH_TOPICS[0])
663
+
664
+ topic = gr.Dropdown(
665
+ choices=SEA_MATH_TOPICS,
666
+ value=SEA_MATH_TOPICS[0],
667
+ label="Topic Area"
668
+ )
669
+
670
+ subject.change(update_topics, inputs=[subject], outputs=[topic])
671
+
672
+ language = gr.Dropdown(
673
+ choices=LANG_OPTIONS,
674
+ value="English",
675
+ label="Language",
676
+ interactive=False # SEA is primarily English
677
+ )
678
+
679
+ level = gr.Radio(
680
+ choices=LEVEL_OPTIONS,
681
+ value="Intermediate",
682
+ label="Student Level"
683
+ )
684
+
685
  with gr.Column(scale=2):
686
+ # Document Upload Section
687
+ with gr.Group(elem_classes="card upload-section"):
688
+ gr.Markdown("### 📤 Upload SEA Exam Papers")
689
+ gr.Markdown("Upload past papers, answer sheets, or study materials. The AI will use these to generate accurate SEA-style content.")
690
+
691
+ uploaded_files = gr.Files(
692
+ label="Upload Files (PDF, TXT)",
693
+ file_types=[".pdf", ".txt"],
694
+ file_count="multiple",
695
+ interactive=True
696
+ )
697
+
698
+ with gr.Row():
699
+ process_btn = gr.Button(
700
+ "Process Uploaded Documents",
701
+ variant="primary",
702
+ scale=2
703
+ )
704
+ clear_btn = gr.Button("Clear Files", variant="secondary", scale=1)
705
+
706
+ upload_status = gr.Markdown(
707
+ "**Status:** No documents uploaded yet. Upload SEA papers for enhanced accuracy.",
708
+ elem_classes="status-text"
709
+ )
710
+
711
+ # Processing events
712
+ process_btn.click(
713
+ fn=process_uploaded_documents,
714
+ inputs=[uploaded_files],
715
+ outputs=[upload_status]
716
+ )
717
+
718
+ clear_btn.click(
719
+ fn=lambda: (None, "✅ Files cleared. Upload new documents."),
720
+ inputs=[],
721
+ outputs=[uploaded_files, upload_status]
722
+ )
723
+
724
+ # Main Features in Tabs
725
+ with gr.Tabs():
726
+ with gr.TabItem("📚 Explanation & Resources"):
727
+ with gr.Column():
728
+ with gr.Group(elem_classes="card"):
729
+ gr.Markdown("<div class='section-title'>Generate SEA-Aligned Explanation</div>")
730
+ btn_explain = gr.Button("Generate Explanation", variant="primary")
731
+ explanation = gr.Markdown(
732
+ label="SEA-Focused Explanation",
733
+ value="Click 'Generate Explanation' for a topic-specific guide.",
734
+ elem_classes="output-area"
735
+ )
736
+
737
+ with gr.Group(elem_classes="card"):
738
+ gr.Markdown("<div class='section-title'>Generate Study Resources</div>")
739
+ btn_resources = gr.Button("Generate Resources", variant="primary")
740
+ resources = gr.Markdown(
741
+ label="Recommended Resources",
742
+ value="Resources will appear here.",
743
+ elem_classes="output-area"
744
+ )
745
+
746
+ with gr.TabItem("🗺️ Study Roadmap"):
747
+ with gr.Column():
748
+ with gr.Group(elem_classes="card"):
749
+ gr.Markdown("<div class='section-title'>Generate 4-Week Study Roadmap</div>")
750
+ btn_roadmap = gr.Button("Generate Roadmap", variant="primary")
751
+ roadmap = gr.Markdown(
752
+ label="Study Roadmap",
753
+ value="Your personalized roadmap will appear here.",
754
+ elem_classes="output-area"
755
+ )
756
+
757
+ with gr.TabItem("📝 Quiz & Assessment"):
758
+ with gr.Column():
759
+ with gr.Group(elem_classes="card"):
760
+ gr.Markdown("<div class='section-title'>Generate SEA-Style Quiz</div>")
761
+
762
+ with gr.Row():
763
+ btn_quiz = gr.Button("Generate New Quiz", variant="primary", scale=2)
764
+ btn_past_paper = gr.Button("Generate Past Paper Question", variant="secondary", scale=1)
765
+
766
+ quiz_info = gr.Markdown("Click 'Generate New Quiz' to create SEA-style questions.")
767
+
768
+ # Past paper question output
769
+ past_paper_output = gr.Markdown(visible=False)
770
+
771
+ # Quiz state and questions
772
+ quiz_state = gr.State([])
773
+
774
+ # Question containers (up to 5)
775
+ with gr.Column(visible=False) as quiz_container:
776
+ q1 = gr.Radio(label="Question 1", choices=[], visible=False, interactive=True)
777
+ q2 = gr.Radio(label="Question 2", choices=[], visible=False, interactive=True)
778
+ q3 = gr.Radio(label="Question 3", choices=[], visible=False, interactive=True)
779
+ q4 = gr.Radio(label="Question 4", choices=[], visible=False, interactive=True)
780
+ q5 = gr.Radio(label="Question 5", choices=[], visible=False, interactive=True)
781
+
782
+ with gr.Group(elem_classes="card"):
783
+ gr.Markdown("<div class='section-title'>Evaluate Your Answers</div>")
784
+ btn_results = gr.Button("Check Answers", variant="primary")
785
+
786
+ with gr.Row():
787
+ with gr.Column(scale=1):
788
+ score = gr.Markdown("**Score:** Not assessed yet.")
789
+ with gr.Column(scale=3):
790
+ feedback = gr.Markdown("**Feedback:** Submit quiz answers for evaluation.")
791
+
792
+ with gr.TabItem("ℹ️ System Info"):
793
  with gr.Group(elem_classes="card"):
794
+ gr.Markdown("### System Information")
795
+ gr.Markdown(f"""
796
+ **Current Configuration:**
797
+ - Model: Llama 3.1 8B Instant (via Groq)
798
+ - RAG Enabled: {'Yes' if os.path.exists(UPLOADED_DOCS_FILE) else 'No'}
799
+ - Documents Loaded: {len(json.load(open(UPLOADED_DOCS_FILE))) if os.path.exists(UPLOADED_DOCS_FILE) else 0}
800
+ - Subjects Configured: {len(SEA_SUBJECTS)}
801
+
802
+ **How to use:**
803
+ 1. Upload SEA past papers (PDF format)
804
+ 2. Select subject and topic
805
+ 3. Generate explanations, resources, or quizzes
806
+ 4. The AI will reference uploaded papers for accuracy
807
+
808
+ **Note:** All content is generated based on SEA exam standards and any uploaded materials.
809
+ """)
810
+
811
+ # Event Handlers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
812
  btn_explain.click(
813
  fn=on_generate_explanation,
814
  inputs=[subject, topic, language, level],
815
+ outputs=[explanation]
816
  )
817
+
818
  btn_resources.click(
819
  fn=on_generate_resources,
820
  inputs=[subject, topic, language, level],
821
+ outputs=[resources]
822
  )
823
+
824
  btn_roadmap.click(
825
  fn=on_generate_roadmap,
826
  inputs=[subject, topic, language, level],
827
+ outputs=[roadmap]
828
  )
829
+
830
  btn_quiz.click(
831
  fn=on_generate_quiz,
832
  inputs=[subject, topic, language, level],
833
+ outputs=[quiz_state, q1, q2, q3, q4, q5, quiz_info]
834
+ ).then(
835
+ fn=lambda: gr.update(visible=True),
836
+ inputs=[],
837
+ outputs=[quiz_container]
838
  )
839
+
840
+ btn_past_paper.click(
841
+ fn=on_generate_past_paper_question,
842
+ inputs=[subject, topic],
843
+ outputs=[past_paper_output]
844
+ ).then(
845
+ fn=lambda: gr.update(visible=True),
846
+ inputs=[],
847
+ outputs=[past_paper_output]
848
+ )
849
+
850
  btn_results.click(
851
  fn=on_display_results,
852
  inputs=[quiz_state, q1, q2, q3, q4, q5],
853
+ outputs=[score, feedback]
854
  )
855
 
856
+ # -----------------------------
857
+ # Launch Application
858
+ # -----------------------------
859
  if __name__ == "__main__":
860
+ # Create necessary directories
861
+ os.makedirs("uploads", exist_ok=True)
862
+ os.makedirs("data", exist_ok=True)
863
+
864
+ # Launch with file upload support
865
+ demo.launch(
866
+ server_name="0.0.0.0",
867
+ server_port=7860,
868
+ share=False,
869
+ max_file_size="20mb", # Limit file size for safety
870
+ show_error=True
871
+ )