FocusFlow Assistant commited on
Commit
a689464
·
1 Parent(s): e60bafd

Major improvements: Multi-subject plan generation, enhanced quiz/lesson quality, fixed UI layout

Browse files

- Restructured plan to create multiple topics per day (one per subject)
- Improved lesson generation with comprehensive 600-800 word content
- Enhanced quiz generation with realistic answer choices and context-based fallbacks
- Fixed Study Assistant layout: contained chat and scrollable lesson panel
- Removed multilingual support and reverted to English-only
- Fixed analytics subject tab extraction
- Removed dialog decorator error
- All topics now unlock automatically after quiz completion

Files changed (3) hide show
  1. app.py +368 -157
  2. backend/main.py +26 -1
  3. backend/rag_engine.py +332 -101
app.py CHANGED
@@ -248,6 +248,7 @@ if "chat_history" not in st.session_state: st.session_state.chat_history = []
248
  if "mastery_data" not in st.session_state: st.session_state.mastery_data = {"S1": 0, "S2": 0, "S3": 0, "S4": 0}
249
  if "expanded_topics" not in st.session_state: st.session_state.expanded_topics = set()
250
  if "show_analytics" not in st.session_state: st.session_state.show_analytics = False
 
251
 
252
  # Focus Mode State
253
  if "focus_mode" not in st.session_state: st.session_state.focus_mode = False
@@ -298,40 +299,155 @@ if st.session_state.show_analytics:
298
  # If st.dialog is available (it was in the previous app.py), we should use it.
299
  pass
300
 
301
- @st.dialog("Analytics Overview", width="large")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
  def show_analytics_dialog():
303
- # Header Tabs
304
- st.markdown("""
305
- <div style="display: flex; gap: 10px; margin-bottom: 30px; background: #F3F4F6; padding: 5px; border-radius: 8px;">
306
- <button style="flex: 1; padding: 8px; border-radius: 6px; border: none; background: #3B82F6; color: white; font-weight: 600;">S1</button>
307
- <button style="flex: 1; padding: 8px; border-radius: 6px; border: none; background: transparent; color: #6B7280; font-weight: 600;">S2</button>
308
- <button style="flex: 1; padding: 8px; border-radius: 6px; border: none; background: transparent; color: #6B7280; font-weight: 600;">S3</button>
309
- <button style="flex: 1; padding: 8px; border-radius: 6px; border: none; background: transparent; color: #6B7280; font-weight: 600;">S4</button>
310
- </div>
311
- """, unsafe_allow_html=True)
312
 
313
- # Hero Score
314
- st.markdown("""
315
- <div style="text-align: center; margin-bottom: 40px;">
316
- <h1 style="font-size: 5rem; color: #111827; margin: 0;">67%</h1>
317
- <p style="color: #6B7280; font-size: 1.2rem;">Overall Mastery</p>
318
- </div>
319
- """, unsafe_allow_html=True)
320
 
321
- # Columns
322
- c1, c2, c3 = st.columns(3)
 
323
 
324
- with c1:
325
- st.markdown("**G** <span style='float:right; background:#d1d5db; color:white; padding:2px 8px; border-radius:99px; font-size:0.8rem'>N/A</span>", unsafe_allow_html=True)
326
- st.markdown("<p style='color:#4B5563; margin-top:10px'>--</p>", unsafe_allow_html=True)
327
-
328
- with c2:
329
- st.markdown("**M** <span style='float:right; background:#d1d5db; color:white; padding:2px 8px; border-radius:99px; font-size:0.8rem'>N/A</span>", unsafe_allow_html=True)
330
- st.markdown("<p style='color:#4B5563; margin-top:10px'>--</p>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
 
332
- with c3:
333
- st.markdown("**P** <span style='float:right; background:#d1d5db; color:white; padding:2px 8px; border-radius:99px; font-size:0.8rem'>N/A</span>", unsafe_allow_html=True)
334
- st.markdown("<p style='color:#4B5563; margin-top:10px'>--</p>", unsafe_allow_html=True)
335
 
336
  # -----------------------------------------------------------------------------
337
  # 3. QUIZ TO UNLOCK (Dialog)
@@ -655,6 +771,7 @@ if not st.session_state.focus_mode:
655
  st.session_state.expiry_time = time.time() + total_seconds
656
  st.rerun()
657
 
 
658
  # Sources Widget
659
  with st.container(border=True):
660
  # Connectivity Check
@@ -765,57 +882,75 @@ if st.session_state.focus_mode:
765
  # FOCUS: LEFT COLUMN (CHAT)
766
  with left_col:
767
  st.markdown("### 💬 Study Assistant")
768
- # Reuse existing chat logic or a simplified version
769
- messages = st.container(height=600)
 
770
  with messages:
771
  for msg in st.session_state.chat_history:
772
  with st.chat_message(msg["role"]):
773
  st.write(msg["content"])
774
 
775
- # New Chat Input
776
  if prompt := st.chat_input(f"Ask about {st.session_state.active_topic}..."):
777
  st.session_state.chat_history.append({"role": "user", "content": prompt})
778
- with st.chat_message("user"):
779
- st.write(prompt)
780
 
781
  # Call AI
782
- with st.chat_message("assistant"):
783
- with st.spinner("Thinking..."):
784
- try:
785
- # Prepare history
786
- history = [{"role": m["role"], "content": m["content"]} for m in st.session_state.chat_history[:-1][-5:]]
787
- resp = requests.post(f"{API_URL}/query", json={"question": prompt, "history": history})
788
- if resp.status_code == 200:
789
- data = resp.json()
790
- ans = data.get("answer", "No answer.")
791
- st.write(ans)
792
- st.session_state.chat_history.append({"role": "assistant", "content": ans})
793
- else:
794
- st.error("Error.")
795
- except Exception as e:
796
- st.error(f"Connection Error: {e}")
797
 
798
- # FOCUS: RIGHT COLUMN (CONTENT) - (Technically mid_col in layout)
799
  with mid_col:
800
- st.markdown(f"## 📖 {st.session_state.active_topic}")
801
- st.info("Here is the learning material for this topic.")
 
 
 
 
 
802
 
803
- # Placeholder Content
804
- st.markdown("""
805
- ### Key Concepts
806
- - **Concept 1:** Definition and importance.
807
- - **Concept 2:** Real-world application.
808
- - **Concept 3:** Detailed analysis.
809
- """)
810
 
811
- # Exit Button
812
- st.markdown("---")
813
- if st.button("⬅️ Exit Focus Mode", use_container_width=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
814
  st.session_state.focus_mode = False
815
- st.session_state.active_topic = None
816
  st.rerun()
817
 
818
 
 
819
  # --- MIDDLE COLUMN: Intelligent Workspace ---
820
  # --- MIDDLE COLUMN: Intelligent Workspace ---
821
  if not st.session_state.focus_mode:
@@ -912,44 +1047,29 @@ if not st.session_state.focus_mode:
912
  # --- RIGHT COLUMN: Scheduler ---
913
  if right_col:
914
  with right_col:
915
- # Scheduler Header Removed to save space
916
- # st.markdown("### Scheduler")
917
-
918
- # Calendar Agent
919
- # Calendar Agent (Minimalist)
920
- # Removing st.container() wrapper to reduce vertical gap/white block
921
-
922
- # Calculate Start Date: 1st of Previous Month
923
  today = date.today()
924
- # Logic to go back 1 month
925
- last_month_year = today.year if today.month > 1 else today.year - 1
926
- last_month = today.month - 1 if today.month > 1 else 12
927
- start_date_str = f"{last_month_year}-{last_month:02d}-01"
928
 
929
- calendar_options = {
930
- # User requested arrows "on both sides"
931
- "headerToolbar": {"left": "prev", "center": "title", "right": "next"},
 
 
 
 
 
 
 
 
 
 
 
 
932
 
933
- "initialView": "multiMonthYear",
934
- "initialDate": start_date_str,
935
- "views": {
936
- "multiMonthYear": {
937
- "type": "multiMonthYear",
938
- "duration": {"months": 3},
939
- "multiMonthMaxColumns": 3,
940
- # FIXED: 280px ensures text is readable. 100px was too small!
941
- # This will force the container to scroll horizontally.
942
- "multiMonthMinWidth": 280,
943
- }
944
- },
945
- # JS Option to format title shorter (e.g. "Dec 2025 - Feb 2026")
946
- "titleFormat": {"year": "numeric", "month": "short"},
947
- # "contentHeight": "auto",
948
- }
949
-
950
- calendar(events=[], options=calendar_options, key="mini_cal")
951
-
952
- # --- B. TALK TO CALENDAR (Fixed: No Loop) ---
953
  with st.form("calendar_chat_form", clear_on_submit=True):
954
  plan_query = st.text_input("Talk to Calendar...", placeholder="e.g., 'Make a 3 day plan'")
955
  submitted = st.form_submit_button("🚀 Generate Plan")
@@ -966,13 +1086,19 @@ if right_col:
966
 
967
  # ROBUST SANITIZATION LOOP
968
  for index, task in enumerate(raw_plan):
969
- # 1. Fix Missing ID (Use index + 1 if missing)
970
- if "id" not in task:
971
- task["id"] = index + 1
972
-
973
- # 2. Fix Missing Keys
 
 
 
 
 
 
 
974
  task["quiz_passed"] = task.get("quiz_passed", False)
975
- task["status"] = task.get("status", "locked" if task.get("locked", True) else "unlocked")
976
  task["title"] = task.get("topic", f"Topic {task['id']}") # Fallback title
977
 
978
  st.session_state.study_plan = raw_plan
@@ -982,63 +1108,148 @@ if right_col:
982
  st.error(f"Failed: {resp.text}")
983
  except Exception as e:
984
  st.error(f"Error: {e}")
985
- # NO SPACER here
986
 
987
- # Removed spacer to satisfy "remove white box" request
988
- # st.markdown("<br>", unsafe_allow_html=True) # Spacer
989
-
990
- # Today's Topics (Gamified)
991
- # Merging the opening DIV and the Header into ONE markdown call to ensure they render together.
992
- st.markdown("""
993
- <div class="custom-card">
994
- <div style="display:flex; justify-content:space-between; align-items:center;"><h4>Today's Topics</h4></div>
995
- """, unsafe_allow_html=True)
996
 
997
- if not st.session_state.study_plan:
998
- # EMPTY STATE
999
- st.info("Tell the calendar to make a plan 📅")
 
 
1000
  else:
1001
- # Render Plan
1002
- st.markdown(f'<span style="font-size:0.8rem; color:#6B7280">{len([t for t in st.session_state.study_plan if t["quiz_passed"]])}/{len(st.session_state.study_plan)} Done</span>', unsafe_allow_html=True)
1003
- st.markdown("<br>", unsafe_allow_html=True)
 
 
 
 
 
 
1004
 
1005
- # Iterate through Mock Plan
1006
- for i, topic in enumerate(st.session_state.study_plan):
1007
- t_id = topic["id"]
1008
- title = topic["title"]
1009
- status = topic["status"]
1010
- passed = topic["quiz_passed"]
1011
-
1012
- # Styles
1013
- opacity = "1.0" if status == "unlocked" else "0.5"
1014
- icon = "🔒" if status == "locked" else ("✅" if passed else "🟦")
1015
-
1016
- # Card Container
1017
- with st.container():
1018
- c1, c2 = st.columns([0.15, 0.85])
1019
- with c1:
1020
- st.markdown(f"<div style='font-size:1.5rem; opacity:{opacity}'>{icon}</div>", unsafe_allow_html=True)
1021
- with c2:
1022
- # Title
1023
- st.markdown(f"<div style='font-weight:600; opacity:{opacity}'>{title}</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
1024
 
1025
- # Unlocked & Not Passed -> Show Actions
1026
- if status == "unlocked" and not passed:
1027
- # Dropdown / Expandable Area
1028
- with st.expander("Start Learning", expanded=True):
1029
- # FOCUS MODE TRIGGER
1030
- if st.button("🚀 Enter Focus Mode", key=f"focus_{t_id}", use_container_width=True):
1031
- st.session_state.focus_mode = True
1032
- st.session_state.active_topic = title
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1033
  st.rerun()
1034
 
1035
- st.info("Mastery Required: 80%")
1036
- if st.button("Take Mandatory Quiz", key=f"q_{t_id}", type="primary", use_container_width=True):
1037
- show_quiz_dialog(t_id, title)
1038
-
1039
- if st.button("Flashcards (Optional)", key=f"fc_{t_id}", use_container_width=True):
1040
- show_flashcard_dialog(t_id, title)
1041
-
1042
- st.markdown("<hr style='margin: 10px 0;'>", unsafe_allow_html=True)
1043
-
1044
- st.markdown("</div>", unsafe_allow_html=True)
 
248
  if "mastery_data" not in st.session_state: st.session_state.mastery_data = {"S1": 0, "S2": 0, "S3": 0, "S4": 0}
249
  if "expanded_topics" not in st.session_state: st.session_state.expanded_topics = set()
250
  if "show_analytics" not in st.session_state: st.session_state.show_analytics = False
251
+ if "topic_scores" not in st.session_state: st.session_state.topic_scores = {} # Track quiz performance by topic_id
252
 
253
  # Focus Mode State
254
  if "focus_mode" not in st.session_state: st.session_state.focus_mode = False
 
299
  # If st.dialog is available (it was in the previous app.py), we should use it.
300
  pass
301
 
302
+ def extract_subjects_and_topics():
303
+ """
304
+ Extract subjects from study plan topics.
305
+ Returns: {subject_name: [topic_data_with_scores]}
306
+ """
307
+ import re
308
+ subjects = {}
309
+ for topic in st.session_state.study_plan:
310
+ title = topic.get("title", "")
311
+
312
+ # Remove "Day X:" prefix if present
313
+ title_cleaned = re.sub(r'^Day\s+\d+:\s*', '', title)
314
+
315
+ # Try to extract subject from remaining text
316
+ # Look for patterns like "OOPS:" or "Manufacturing:" or just use first few words
317
+ if ":" in title_cleaned:
318
+ # Get first part before colon as subject
319
+ subject = title_cleaned.split(":")[0].strip()
320
+ elif " - " in title_cleaned:
321
+ # Alternative separator
322
+ subject = title_cleaned.split(" - ")[0].strip()
323
+ else:
324
+ # Use first 2-3 capitalized words as subject
325
+ words = title_cleaned.split()
326
+ # Take first 1-2 capitalized words as subject name
327
+ subject_words = []
328
+ for word in words[:3]:
329
+ if word[0].isupper() or word.isupper():
330
+ subject_words.append(word)
331
+ else:
332
+ break
333
+ subject = " ".join(subject_words) if subject_words else "General"
334
+
335
+ # Clean up subject name
336
+ subject = subject.strip()
337
+ if not subject or subject.startswith("Day"):
338
+ subject = "General"
339
+
340
+ if subject not in subjects:
341
+ subjects[subject] = []
342
+
343
+ # Add topic with its score data
344
+ topic_data = {
345
+ "title": title,
346
+ "id": topic.get("id"),
347
+ "status": topic.get("status", "locked"),
348
+ "quiz_passed": topic.get("quiz_passed", False)
349
+ }
350
+
351
+ # Add score if available
352
+ if topic.get("id") in st.session_state.topic_scores:
353
+ topic_data["score_data"] = st.session_state.topic_scores[topic.get("id")]
354
+
355
+ subjects[subject].append(topic_data)
356
+
357
+ return subjects
358
+
359
+
360
+ @st.dialog("📊 Analytics Overview", width="large")
361
  def show_analytics_dialog():
362
+ subjects_data = extract_subjects_and_topics()
 
 
 
 
 
 
 
 
363
 
364
+ if not subjects_data:
365
+ st.info("📚 No subjects found. Create a study plan to see analytics.")
366
+ return
 
 
 
 
367
 
368
+ # Create dynamic tabs
369
+ subject_names = list(subjects_data.keys())
370
+ tabs = st.tabs(subject_names)
371
 
372
+ for idx, subject_name in enumerate(subject_names):
373
+ with tabs[idx]:
374
+ topics = subjects_data[subject_name]
375
+
376
+ # Calculate subject mastery
377
+ completed_topics = [t for t in topics if t.get("status") == "completed"]
378
+ total_topics = len(topics)
379
+ completion_pct = (len(completed_topics) / total_topics * 100) if total_topics > 0 else 0
380
+
381
+ # Calculate average score for topics with quiz data
382
+ topics_with_scores = [t for t in topics if "score_data" in t]
383
+ if topics_with_scores:
384
+ avg_score = sum(t["score_data"]["percentage"] for t in topics_with_scores) / len(topics_with_scores)
385
+ else:
386
+ avg_score = 0
387
+
388
+ # Display mastery header
389
+ st.markdown(f"""
390
+ <div style="text-align: center; margin-bottom: 30px;">
391
+ <h1 style="font-size: 4rem; color: #111827; margin: 0;">{avg_score:.1f}%</h1>
392
+ <p style="color: #6B7280; font-size: 1.2rem;">Overall Mastery</p>
393
+ </div>
394
+ """, unsafe_allow_html=True)
395
+
396
+ # Progress metrics
397
+ col1, col2 = st.columns(2)
398
+ with col1:
399
+ st.metric("Topics Completed", f"{len(completed_topics)}/{total_topics}")
400
+ st.progress(completion_pct / 100)
401
+
402
+ with col2:
403
+ st.metric("Quizzes Taken", f"{len(topics_with_scores)}/{total_topics}")
404
+ quiz_completion = (len(topics_with_scores) / total_topics * 100) if total_topics > 0 else 0
405
+ st.progress(quiz_completion / 100)
406
+
407
+ st.markdown("---")
408
+ st.markdown("### 📈 Performance Breakdown")
409
+
410
+ # Classify topics by performance
411
+ strong = [t for t in topics_with_scores if t["score_data"]["percentage"] >= 75]
412
+ moderate = [t for t in topics_with_scores if 50 <= t["score_data"]["percentage"] < 75]
413
+ needs_work = [t for t in topics_with_scores if t["score_data"]["percentage"] < 50]
414
+
415
+ # Display classifications
416
+ col1, col2, col3 = st.columns(3)
417
+
418
+ with col1:
419
+ st.markdown("#### 💚 Strong Topics")
420
+ st.caption(f"{len(strong)} topic(s)")
421
+ if strong:
422
+ for t in strong:
423
+ score_pct = t["score_data"]["percentage"]
424
+ score_str = f"{t['score_data']['score']}/{t['score_data']['total']}"
425
+ st.success(f"**{t['title']}**\n{score_pct:.0f}% ({score_str})")
426
+ else:
427
+ st.info("No strong topics yet. Keep studying!")
428
+
429
+ with col2:
430
+ st.markdown("#### 🟡 Moderate Topics")
431
+ st.caption(f"{len(moderate)} topic(s)")
432
+ if moderate:
433
+ for t in moderate:
434
+ score_pct = t["score_data"]["percentage"]
435
+ score_str = f"{t['score_data']['score']}/{t['score_data']['total']}"
436
+ st.warning(f"**{t['title']}**\n{score_pct:.0f}% ({score_str})")
437
+ else:
438
+ st.info("No moderate topics yet")
439
+
440
+ with col3:
441
+ st.markdown("#### 🔴 Needs Work")
442
+ st.caption(f"{len(needs_work)} topic(s)")
443
+ if needs_work:
444
+ for t in needs_work:
445
+ score_pct = t["score_data"]["percentage"]
446
+ score_str = f"{t['score_data']['score']}/{t['score_data']['total']}"
447
+ st.error(f"**{t['title']}**\n{score_pct:.0f}% ({score_str})")
448
+ else:
449
+ st.info("Great! No topics need extra work")
450
 
 
 
 
451
 
452
  # -----------------------------------------------------------------------------
453
  # 3. QUIZ TO UNLOCK (Dialog)
 
771
  st.session_state.expiry_time = time.time() + total_seconds
772
  st.rerun()
773
 
774
+
775
  # Sources Widget
776
  with st.container(border=True):
777
  # Connectivity Check
 
882
  # FOCUS: LEFT COLUMN (CHAT)
883
  with left_col:
884
  st.markdown("### 💬 Study Assistant")
885
+
886
+ # Fixed-height chat container to keep messages inside
887
+ messages = st.container(height=600, border=True)
888
  with messages:
889
  for msg in st.session_state.chat_history:
890
  with st.chat_message(msg["role"]):
891
  st.write(msg["content"])
892
 
893
+ # Chat input at bottom - messages will appear in container above
894
  if prompt := st.chat_input(f"Ask about {st.session_state.active_topic}..."):
895
  st.session_state.chat_history.append({"role": "user", "content": prompt})
 
 
896
 
897
  # Call AI
898
+ with st.spinner("Thinking..."):
899
+ try:
900
+ # Prepare history
901
+ history = [{"role": m["role"], "content": m["content"]} for m in st.session_state.chat_history[:-1][-5:]]
902
+ resp = requests.post(f"{API_URL}/query", json={"question": prompt, "history": history})
903
+ if resp.status_code == 200:
904
+ data = resp.json()
905
+ ans = data.get("answer", "No answer.")
906
+ st.session_state.chat_history.append({"role": "assistant", "content": ans})
907
+ else:
908
+ st.session_state.chat_history.append({"role": "assistant", "content": "Error processing request."})
909
+ except Exception as e:
910
+ st.session_state.chat_history.append({"role": "assistant", "content": f"Connection Error: {e}"})
911
+
912
+ st.rerun()
913
 
914
+ # FOCUS: RIGHT COLUMN (LESSON CONTENT) - Scrollable Document Viewer
915
  with mid_col:
916
+ topic_title = st.session_state.active_topic
917
+ # Handle case where active_topic is dict or string
918
+ if isinstance(topic_title, dict):
919
+ topic_title = topic_title.get('title', 'Unknown Topic')
920
+
921
+ st.markdown(f"### 📖 {topic_title}")
922
+ st.markdown("---")
923
 
924
+ # Unique key for this topic's content
925
+ t_id = st.session_state.active_topic['id'] if isinstance(st.session_state.active_topic, dict) else hash(topic_title)
926
+ content_key = f"content_{t_id}"
 
 
 
 
927
 
928
+ # 1. Fetch Content if missing
929
+ if content_key not in st.session_state:
930
+ with st.spinner(f"🤖 AI is writing a lesson for '{topic_title}'..."):
931
+ try:
932
+ resp = requests.post(f"{API_URL}/generate_lesson", json={"topic": topic_title}, timeout=300)
933
+
934
+ if resp.status_code == 200:
935
+ st.session_state[content_key] = resp.json()["content"]
936
+ else:
937
+ st.session_state[content_key] = f"⚠️ Server Error: {resp.text}"
938
+
939
+ except Exception as e:
940
+ st.session_state[content_key] = f"⚠️ Connection Error: {e}"
941
+
942
+ # 2. Render Content in Scrollable Container (like a document viewer)
943
+ lesson_container = st.container(height=650, border=True)
944
+ with lesson_container:
945
+ st.markdown(st.session_state[content_key])
946
+
947
+ # 3. Exit Button (stays fixed below the scrollable content)
948
+ if st.button("⬅ Finish & Return", use_container_width=True):
949
  st.session_state.focus_mode = False
 
950
  st.rerun()
951
 
952
 
953
+
954
  # --- MIDDLE COLUMN: Intelligent Workspace ---
955
  # --- MIDDLE COLUMN: Intelligent Workspace ---
956
  if not st.session_state.focus_mode:
 
1047
  # --- RIGHT COLUMN: Scheduler ---
1048
  if right_col:
1049
  with right_col:
1050
+ # --- CALENDAR WIDGET ---
 
 
 
 
 
 
 
1051
  today = date.today()
1052
+ selected_date = st.date_input("📅 Calendar", value=today)
 
 
 
1053
 
1054
+ # --- LOGIC: POPUP FOR OTHER DATES ---
1055
+ # If user selects a future date, show its plan in a dialog
1056
+ if selected_date != today:
1057
+ @st.dialog(f"Plan for {selected_date}")
1058
+ def show_future_plan():
1059
+ delta = selected_date - today
1060
+ day_offset = delta.days + 1
1061
+ # Filter plan for this hypothetical day
1062
+ day_tasks = [t for t in st.session_state.study_plan if t.get("day") == day_offset]
1063
+
1064
+ if day_tasks:
1065
+ for t in day_tasks:
1066
+ st.markdown(f"- **{t['title']}**")
1067
+ else:
1068
+ st.info("No plan generated for this specific date yet.")
1069
 
1070
+ show_future_plan()
1071
+
1072
+ # --- B. TALK TO CALENDAR ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073
  with st.form("calendar_chat_form", clear_on_submit=True):
1074
  plan_query = st.text_input("Talk to Calendar...", placeholder="e.g., 'Make a 3 day plan'")
1075
  submitted = st.form_submit_button("🚀 Generate Plan")
 
1086
 
1087
  # ROBUST SANITIZATION LOOP
1088
  for index, task in enumerate(raw_plan):
1089
+ # 1. FORCE UNLOCK DAY 1 (The Fix)
1090
+ if index == 0:
1091
+ task["status"] = "unlocked"
1092
+ task["locked"] = False
1093
+ else:
1094
+ # Default logic for others: Trust 'status' or default to 'locked'
1095
+ # We ignore the 'locked' boolean fallback to be stricter,
1096
+ # ensuring only Day 1 is open initially if not specified.
1097
+ task["status"] = task.get("status", "locked")
1098
+
1099
+ # 2. Fix IDs & Keys
1100
+ if "id" not in task: task["id"] = index + 1
1101
  task["quiz_passed"] = task.get("quiz_passed", False)
 
1102
  task["title"] = task.get("topic", f"Topic {task['id']}") # Fallback title
1103
 
1104
  st.session_state.study_plan = raw_plan
 
1108
  st.error(f"Failed: {resp.text}")
1109
  except Exception as e:
1110
  st.error(f"Error: {e}")
 
1111
 
1112
+ # --- TODAY'S TOPICS (FILTERED) ---
1113
+ st.markdown("### Today's Topics")
 
 
 
 
 
 
 
1114
 
1115
+ # FILTER: Only show Day 1 tasks for "Today"
1116
+ todays_tasks = [t for t in st.session_state.study_plan if t.get("day") == 1]
1117
+
1118
+ if not todays_tasks:
1119
+ st.caption("No tasks for today. Ask the calendar to make a plan!")
1120
  else:
1121
+ # Group tasks by subject if multiple topics per day
1122
+ if len(todays_tasks) > 1:
1123
+ st.caption(f"📚 {len(todays_tasks)} topics to cover today")
1124
+
1125
+ for i, task in enumerate(todays_tasks):
1126
+ # Display subject badge if available
1127
+ subject_badge = ""
1128
+ if "subject" in task and task["subject"]:
1129
+ subject_badge = f"**{task['subject']}** • "
1130
 
1131
+ # 1. COMPLETED
1132
+ if task["status"] == "completed":
1133
+ st.success(f"✅ {subject_badge}{task['title']}")
1134
+ # (Flashcards button REMOVED as requested)
1135
+
1136
+ # 2. ACTIVE / UNLOCKED
1137
+ elif task["status"] == "unlocked":
1138
+ with st.container(border=True):
1139
+ # Show subject badge prominently
1140
+ if "subject" in task and task["subject"]:
1141
+ st.caption(f"📘 {task['subject']}")
1142
+ st.markdown(f"**{task['title']}**")
1143
+
1144
+ # The Focus Mode Button
1145
+ if st.button(f"🚀 Start Learning", key=f"start_{task['id']}"):
1146
+ st.session_state.focus_mode = True
1147
+ st.session_state.active_topic = task['title']
1148
+ st.rerun()
1149
+
1150
+ # 1. THE QUIZ BUTTON
1151
+ if st.button(f"📝 Take Quiz (Unlock Next)", key=f"quiz_btn_{task['id']}"):
1152
+ st.session_state[f"show_quiz_{task['id']}"] = True
1153
+ st.rerun()
1154
+
1155
+ # 2. THE QUIZ (Inline - no dialog to avoid Streamlit error)
1156
+ if st.session_state.get(f"show_quiz_{task['id']}", False):
1157
+ st.markdown("---")
1158
+ st.write("### 🧠 Knowledge Check")
1159
 
1160
+ # 1. FETCH QUIZ DATA (Dynamic)
1161
+ quiz_key = f"quiz_data_{task['id']}"
1162
+ if quiz_key not in st.session_state:
1163
+ with st.spinner(f"🤖 Generating quiz for '{task['title']}'..."):
1164
+ try:
1165
+ resp = requests.post(f"{API_URL}/generate_quiz", json={"topic": task['title']}, timeout=120)
1166
+ if resp.status_code == 200:
1167
+ st.session_state[quiz_key] = resp.json().get("quiz", [])
1168
+ else:
1169
+ st.error("Failed to generate quiz.")
1170
+ except Exception as e:
1171
+ st.error(f"Connection error: {e}")
1172
+
1173
+ quiz_data = st.session_state.get(quiz_key, [])
1174
+ if quiz_data:
1175
+ st.caption("Answer the questions below. Next topic unlocks automatically.")
1176
+
1177
+ score = 0
1178
+ user_answers = {}
1179
+
1180
+ # 2. RENDER QUESTIONS
1181
+ for i, q in enumerate(quiz_data):
1182
+ st.markdown(f"**Q{i+1}: {q['question']}**")
1183
+ user_answers[i] = st.radio(
1184
+ "Select one:",
1185
+ q['options'],
1186
+ key=f"q_{task['id']}_{i}"
1187
+ )
1188
+
1189
+ st.markdown("---")
1190
+
1191
+ col1, col2 = st.columns([1, 1])
1192
+ with col1:
1193
+ if st.button("🚀 Submit Quiz", key=f"submit_{task['id']}", use_container_width=True):
1194
+ # GRADING LOGIC
1195
+ for i, q in enumerate(quiz_data):
1196
+ if user_answers[i] == q['answer']:
1197
+ score += 1
1198
+
1199
+ # STORE SCORE FOR ANALYTICS
1200
+ st.session_state.topic_scores[task['id']] = {
1201
+ "topic_title": task['title'],
1202
+ "score": score,
1203
+ "total": len(quiz_data),
1204
+ "percentage": (score / len(quiz_data)) * 100
1205
+ }
1206
+
1207
+ st.info(f"📊 Your Score: {score}/{len(quiz_data)}")
1208
+
1209
+ # ALWAYS UNLOCK NEXT TOPIC
1210
+ st.balloons()
1211
+
1212
+ # --- ADAPTIVE LOGIC (Optional based on score) ---
1213
+ if score == 3:
1214
+ st.toast("🚀 Perfect Score! Accelerating future plan...", icon="⚡")
1215
+ for future_task in st.session_state.study_plan:
1216
+ if future_task["id"] > task["id"]:
1217
+ if "Advanced" not in future_task["title"]:
1218
+ future_task["title"] = f"Advanced: {future_task['title']}"
1219
+ future_task["details"] = "Deep dive with complex examples. (AI Adjusted)"
1220
+
1221
+ elif score == 2:
1222
+ st.toast("⚠️ Good effort! Adding revision steps...", icon="🛡️")
1223
+ for future_task in st.session_state.study_plan:
1224
+ if future_task["id"] > task["id"]:
1225
+ if "Review" not in future_task["title"]:
1226
+ future_task["title"] = f"Review & {future_task['title']}"
1227
+ future_task["details"] = "Includes recap of previous concepts. (AI Adjusted)"
1228
+
1229
+ st.success(f"✅ Quiz completed! Unlocking next topic...")
1230
+ time.sleep(1)
1231
+
1232
+ # UNLOCK NEXT TOPIC
1233
+ task["status"] = "completed"
1234
+ task["quiz_passed"] = True
1235
+
1236
+ current_id = task["id"]
1237
+ for next_task in st.session_state.study_plan:
1238
+ if next_task["id"] == current_id + 1:
1239
+ next_task["status"] = "unlocked"
1240
+ next_task["locked"] = False
1241
+ break
1242
+
1243
+ # Close Quiz
1244
+ st.session_state[f"show_quiz_{task['id']}"] = False
1245
+ st.rerun()
1246
+
1247
+ with col2:
1248
+ if st.button("✕ Cancel", key=f"cancel_{task['id']}", use_container_width=True):
1249
+ st.session_state[f"show_quiz_{task['id']}"] = False
1250
  st.rerun()
1251
 
1252
+ # 3. LOCKED
1253
+ else:
1254
+ with st.container(border=True):
1255
+ st.markdown(f"🔒 <span style='color:gray'>{task['title']}</span>", unsafe_allow_html=True)
 
 
 
 
 
 
backend/main.py CHANGED
@@ -179,5 +179,30 @@ async def query_kb(request: QueryRequest):
179
  """
180
  RAG query endpoint.
181
  """
182
- response = query_knowledge_base(request.question,request.history)
 
183
  return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  """
180
  RAG query endpoint.
181
  """
182
+ from backend.rag_engine import query_knowledge_base
183
+ response = query_knowledge_base(request.question, request.history)
184
  return response
185
+
186
+ class LessonRequest(BaseModel):
187
+ topic: str
188
+
189
+ @app.post("/generate_lesson")
190
+ def generate_lesson_endpoint(request: LessonRequest, db: Session = Depends(get_db)):
191
+ try:
192
+ from backend.rag_engine import generate_lesson_content
193
+ content = generate_lesson_content(request.topic)
194
+ return {"content": content}
195
+ except Exception as e:
196
+ raise HTTPException(status_code=500, detail=str(e))
197
+
198
+ class QuizRequest(BaseModel):
199
+ topic: str
200
+
201
+ @app.post("/generate_quiz")
202
+ def generate_quiz_endpoint(request: QuizRequest):
203
+ try:
204
+ from backend.rag_engine import generate_quiz_data
205
+ quiz_data = generate_quiz_data(request.topic)
206
+ return {"quiz": quiz_data}
207
+ except Exception as e:
208
+ raise HTTPException(status_code=500, detail=str(e))
backend/rag_engine.py CHANGED
@@ -132,136 +132,367 @@ def query_knowledge_base(question: str, history: list = []):
132
  print(f"⚠️ Rewrite failed: {e}")
133
 
134
  # --- PART 2: THE "ROUTER" (Search or Chat?) ---
135
- # If the rewritten question is just a greeting, skip the PDF search
136
- is_greeting = False
137
- if len(standalone_question.split()) < 5:
138
- greetings = ["hi", "hello", "thanks", "good morning", "hey"]
139
- if any(g in standalone_question.lower() for g in greetings):
140
- is_greeting = True
141
 
142
- if is_greeting:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  return {
144
- "answer": "Hello! I am your FocusFlow assistant. I can help you compare topics or explain concepts from your PDFs.",
145
- "sources": []
 
 
146
  }
147
-
148
- # --- PART 3: SEARCH & ANSWER (Tutor Mode) ---
149
 
150
- # 1. Search the PDF (Increased k=5 and added debug)
151
- # 1. Search the PDF (Increased k=6 and added debug)
152
- docs = vector_store.similarity_search(standalone_question, k=6)
153
- print(f"🔎 Found {len(docs)} relevant chunks")
154
 
155
- # Construct context with explicit Source Labels
156
- context_parts = []
157
- for doc in docs:
158
- # Get a clean source name (e.g., "DSA.pdf" or "Video Title")
159
- src = doc.metadata.get("title") or doc.metadata.get("source", "Unknown").split("/")[-1]
160
- context_parts.append(f"SOURCE: {src}\nCONTENT: {doc.page_content}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
- context_text = "\n\n---\n\n".join(context_parts)
 
 
163
 
164
- # 2. The "Tutor Persona" Prompt
165
- final_prompt = f"""
166
- You are FocusFlow, a friendly and expert AI Tutor.
167
- Your goal is to explain concepts from the provided PDF content clearly and simply.
168
 
169
- GUIDELINES:
170
- - Tone: Encouraging, professional, and educational.
171
- - Format: Use **Bold** for key terms and Bullet points for lists.
172
- - Strategy: Don't just copy the text. Read the context, understand it, and explain it to the student.
173
- - If the context lists problems (like DSA), summarize the types of problems found.
174
- - Source Check: The context now includes 'SOURCE:' labels. If the user asks about a specific file (like 'the PDF' or 'the Video'), ONLY use information from that specific source.
175
 
176
- CONTEXT FROM PDF:
177
- {context_text}
178
 
179
- STUDENT'S QUESTION:
180
- {standalone_question}
181
 
182
- YOUR LESSON:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  """
184
 
185
- # 3. Generate Answer
186
- answer = llm.invoke(final_prompt)
187
 
188
- # 4. Smart Source Formatting
189
  sources_list = []
190
- for doc in docs:
191
- # Check if it's a Video (YoutubeLoader adds 'title')
192
- if "title" in doc.metadata:
193
- source_label = f"📺 {doc.metadata['title']}"
194
- loc_label = "Transcript"
195
- else:
196
- # Fallback for PDFs
197
- source_label = doc.metadata.get("source", "Unknown").split("/")[-1]
198
- loc_label = f"Page {doc.metadata.get('page', 0) + 1}"
199
-
200
- sources_list.append({
201
- "source": source_label,
202
- "location": loc_label
203
- })
204
-
205
  return {
206
- "answer": answer,
207
  "sources": sources_list
208
  }
209
-
210
- def generate_study_plan(user_request: str) -> dict:
211
- print(f"🚀 STARTING PLAN GENERATION for: {user_request}")
212
- import json
213
- import time
214
 
215
- # 1. Setup Retrieval & LLM
216
  vector_store = Chroma(
217
  persist_directory=CACHE_DIR,
218
  embedding_function=OllamaEmbeddings(model="nomic-embed-text")
219
  )
220
  llm = Ollama(model="llama3.2:1b")
221
 
222
- # --- 1. THE BACKUP PLAN (Guaranteed to work) ---
223
- backup_plan = {
224
- "days": [
225
- {"id": 1, "day": 1, "topic": "Fundamentals of the Subject", "details": "Core definitions and basic laws.", "locked": False, "quiz_passed": False},
226
- {"id": 2, "day": 2, "topic": "Advanced Theories", "details": "Applying the laws to complex systems.", "locked": True, "quiz_passed": False},
227
- {"id": 3, "day": 3, "topic": "Practical Applications", "details": "Real-world case studies and problems.", "locked": True, "quiz_passed": False}
228
- ]
229
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
- # --- 2. TRY THE AI ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  try:
233
- # Limit context to be very fast
234
- docs = vector_store.similarity_search("Syllabus topics", k=2)
235
- if not docs:
236
- context_text = "General syllabus topics."
237
- else:
238
- context_text = "\n".join([d.page_content[:200] for d in docs])
239
 
240
- prompt = f"""
241
- Context: {context_text}
242
- Task: Create a 3-day study plan (JSON).
243
- Format: {{"days": [{{"id": 1, "day": 1, "topic": "...", "details": "...", "locked": false}}]}}
244
- Output JSON only.
245
- """
246
 
247
- print("🤖 Asking AI (with 15s timeout expectation)...")
248
- # In a real production app we would wrap this in a thread timeout,
249
- # but for now we rely on the try/except block catching format errors.
250
- raw_output = llm.invoke(prompt)
251
- print("✅ AI Responded.")
 
 
 
 
252
 
253
- # Clean & Parse
254
- clean_json = raw_output.replace("```json", "").replace("```", "").strip()
255
- plan = json.loads(clean_json)
256
 
257
- # Validate Keys (The "Sanitizer")
258
- for i, task in enumerate(plan.get("days", [])):
259
- if "id" not in task: task["id"] = i + 1
260
- if "topic" not in task: task["topic"] = f"Day {i+1} Topic"
261
- task["quiz_passed"] = False
262
-
263
- return plan
264
-
265
  except Exception as e:
266
- print(f"⚠️ AI FAILED ({e}). SWITCHING TO BACKUP PLAN.")
267
- return backup_plan
 
 
132
  print(f"⚠️ Rewrite failed: {e}")
133
 
134
  # --- PART 2: THE "ROUTER" (Search or Chat?) ---
 
 
 
 
 
 
135
 
136
+ def generate_study_plan(user_request: str):
137
+ print(f"🚀 STARTING PLAN: {user_request}")
138
+
139
+ # Initialize resources
140
+ vector_store = Chroma(
141
+ persist_directory=CACHE_DIR,
142
+ embedding_function=OllamaEmbeddings(model="nomic-embed-text")
143
+ )
144
+ llm = Ollama(model="llama3.2:1b")
145
+
146
+ # 1. Extract number of days from request (default to 5 if not specified)
147
+ import re
148
+ day_match = re.search(r'(\d+)\s*day', user_request.lower())
149
+ num_days = int(day_match.group(1)) if day_match else 5
150
+
151
+ # 2. Get documents from MULTIPLE sources
152
+ docs = vector_store.similarity_search("topics subjects syllabus overview", k=20)
153
+
154
+ # 3. Extract topics grouped by source document (each source = one subject)
155
+ topics_by_source = {}
156
+ for doc in docs:
157
+ source = doc.metadata.get("source", "unknown")
158
+ if source not in topics_by_source:
159
+ topics_by_source[source] = {
160
+ "topics": [],
161
+ "subject_name": None # Will extract subject name from content
162
+ }
163
+
164
+ content = doc.page_content
165
+
166
+ # Try to extract subject name from first occurrence
167
+ if topics_by_source[source]["subject_name"] is None:
168
+ # Look for subject indicators in first 200 chars
169
+ first_part = content[:200].upper()
170
+ if "MANUFACTURING" in first_part:
171
+ topics_by_source[source]["subject_name"] = "Manufacturing Technology"
172
+ elif "OOPS" in first_part or "OBJECT" in first_part:
173
+ topics_by_source[source]["subject_name"] = "Object-Oriented Programming"
174
+ elif "DATA STRUCT" in first_part:
175
+ topics_by_source[source]["subject_name"] = "Data Structures"
176
+ else:
177
+ # Use filename as fallback
178
+ filename = source.split('/')[-1].replace('.pdf', '').replace('-', ' ').title()
179
+ topics_by_source[source]["subject_name"] = filename
180
+
181
+ # Extract topics from content
182
+ sentences = content.split('.')
183
+ for sentence in sentences:
184
+ sentence = sentence.strip()
185
+ if len(sentence) > 20 and len(sentence) < 150:
186
+ # Filter for topic-like content
187
+ if any(kw in sentence.lower() for kw in ['topic', 'chapter', 'module', 'unit', 'concept', 'introduction', 'process', 'method']):
188
+ topics_by_source[source]["topics"].append(sentence)
189
+ elif sentence[0].isupper() and len(sentence.split()) > 4:
190
+ topics_by_source[source]["topics"].append(sentence)
191
+
192
+ # Remove duplicates per source and limit
193
+ for source in topics_by_source:
194
+ topics_by_source[source]["topics"] = list(dict.fromkeys(topics_by_source[source]["topics"]))[:num_days * 2]
195
+
196
+ # 4. Create plan with MULTIPLE TOPICS PER DAY (one from each subject)
197
+ all_sources = list(topics_by_source.keys())
198
+ num_subjects = len(all_sources)
199
+ print(f"📚 Found {num_subjects} subjects/sources")
200
+
201
+ if num_subjects == 0:
202
+ # Fallback if no sources found
203
  return {
204
+ "days": [
205
+ {"day": i, "topic": f"Topic {i}", "details": "Study material", "status": "unlocked" if i == 1 else "locked", "subject": "General", "id": i}
206
+ for i in range(1, num_days + 1)
207
+ ]
208
  }
 
 
209
 
210
+ # Generate plan: For each day, create one topic from each subject
211
+ plan_days = []
212
+ topic_id = 1
 
213
 
214
+ for day_num in range(1, num_days + 1):
215
+ # For this day, create one topic from each subject
216
+ for source_idx, source in enumerate(all_sources):
217
+ subject_name = topics_by_source[source]["subject_name"]
218
+ source_topics = topics_by_source[source]["topics"]
219
+
220
+ # Get topic for this day from this subject
221
+ # Use round-robin approach: take different topic for each day
222
+ topic_idx = (day_num - 1) % len(source_topics) if source_topics else 0
223
+
224
+ if source_topics and topic_idx < len(source_topics):
225
+ topic_text = source_topics[topic_idx]
226
+ # Clean up topic text
227
+ topic_text = topic_text[:100] # Limit length
228
+ else:
229
+ topic_text = f"Concepts and Principles"
230
+
231
+ # Create topic entry
232
+ plan_days.append({
233
+ "day": day_num,
234
+ "id": topic_id,
235
+ "subject": subject_name,
236
+ "topic": f"{subject_name}: {topic_text}",
237
+ "details": f"Study material for {subject_name}",
238
+ "status": "unlocked" if day_num == 1 else "locked",
239
+ "quiz_passed": False
240
+ })
241
+ topic_id += 1
242
+
243
+ print(f"✅ Generated {len(plan_days)} topics across {num_days} days for {num_subjects} subjects")
244
+ return {"days": plan_days}
245
+
246
+ def generate_lesson_content(topic_title: str):
247
+ print(f"🚀 GENERATING LESSON FOR: {topic_title}")
248
+
249
+ # Initialize resources
250
+ vector_store = Chroma(
251
+ persist_directory=CACHE_DIR,
252
+ embedding_function=OllamaEmbeddings(model="nomic-embed-text")
253
+ )
254
+ llm = Ollama(model="llama3.2:1b")
255
 
256
+ # 1. Search DB for comprehensive context (increased from 4 to 8 chunks)
257
+ docs = vector_store.similarity_search(topic_title, k=8)
258
+ context_text = "\n".join([d.page_content[:500] for d in docs]) # Increased from 400 to 500 chars
259
 
260
+ # 2. Enhanced Educational Prompt for detailed content
261
+ prompt = f"""Create a comprehensive study guide for: {topic_title}
 
 
262
 
263
+ Context from course materials:
264
+ {context_text}
 
 
 
 
265
 
266
+ Write a DETAILED study guide in Markdown format with these sections:
 
267
 
268
+ ## Introduction
269
+ Explain what this topic is and why it's important (2-3 paragraphs)
270
 
271
+ ## Core Concepts
272
+ Break down the main ideas into clear subsections. For each concept:
273
+ - Define it clearly
274
+ - Explain how it works
275
+ - Describe when and why to use it
276
+
277
+ ## Key Points & Rules
278
+ List important formulas, rules, syntax, or principles. Include code examples if applicable.
279
+
280
+ ## Practical Examples
281
+ Provide 2-3 real-world examples showing:
282
+ - The problem scenario
283
+ - How the concept solves it
284
+ - Step-by-step walkthrough
285
+
286
+ ## Common Mistakes
287
+ Highlight typical errors students make and how to avoid them
288
+
289
+ ## Summary
290
+ Quick bullet-point recap of key takeaways
291
+
292
+ Make this comprehensive and educational. Aim for 600-800 words. Use clear explanations a student can understand.
293
+
294
+ Markdown content:"""
295
+
296
+ # 3. Generate
297
+ try:
298
+ response = llm.invoke(prompt)
299
+ # Clean potential markdown wrappers
300
+ clean_text = response.replace("```markdown", "").replace("```", "").strip()
301
+
302
+ # If response is too short, add a note
303
+ if len(clean_text) < 200:
304
+ clean_text += "\n\n*Note: For more detailed information, please refer to your course materials or ask specific questions in the chat.*"
305
+
306
+ return clean_text
307
+ except Exception as e:
308
+ return f"### Error Generating Lesson\nCould not retrieve content: {e}"
309
+
310
+ def query_knowledge_base(question: str, history: list = []):
311
+ print(f"📡 QUERY: {question}")
312
+
313
+ # Init
314
+ vector_store = Chroma(
315
+ persist_directory=CACHE_DIR,
316
+ embedding_function=OllamaEmbeddings(model="nomic-embed-text")
317
+ )
318
+ llm = Ollama(model="llama3.2:1b")
319
+
320
+ # 1. Search
321
+ docs = vector_store.similarity_search(question, k=3)
322
+ context = "\n".join([d.page_content[:500] for d in docs])
323
+
324
+ # 2. Format History
325
+ history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in history])
326
+
327
+ # 3. Prompt
328
+ prompt = f"""
329
+ Context: {context}
330
+ Chat History:
331
+ {history_text}
332
+
333
+ User Question: {question}
334
+
335
+ TASK: Answer the user's question based on the context.
336
+ If you don't know, say "I don't know".
337
  """
338
 
339
+ res = llm.invoke(prompt)
 
340
 
341
+ # Return source metadata
342
  sources_list = []
343
+ for d in docs:
344
+ meta = d.metadata
345
+ sources_list.append({"source": meta.get("source", "Unknown"), "page": meta.get("page", 1)})
346
+
 
 
 
 
 
 
 
 
 
 
 
347
  return {
348
+ "answer": res,
349
  "sources": sources_list
350
  }
351
+ def generate_quiz_data(topic_title: str):
352
+ print(f"🚀 GENERATING QUIZ FOR: {topic_title}")
 
 
 
353
 
354
+ # Initialize resources
355
  vector_store = Chroma(
356
  persist_directory=CACHE_DIR,
357
  embedding_function=OllamaEmbeddings(model="nomic-embed-text")
358
  )
359
  llm = Ollama(model="llama3.2:1b")
360
 
361
+ # 1. Search Context
362
+ docs = vector_store.similarity_search(topic_title, k=3)
363
+ context_text = "\n".join([d.page_content[:300] for d in docs])
364
+
365
+ # Helper: Generate realistic fallback quiz from context
366
+ def create_context_based_fallback():
367
+ """Generate realistic quiz questions from context when LLM fails"""
368
+ # Extract key terms and concepts from context
369
+ sentences = context_text.split('.')
370
+ key_concepts = []
371
+ for sentence in sentences[:10]: # Look at first 10 sentences
372
+ words = sentence.strip().split()
373
+ if len(words) > 3:
374
+ key_concepts.append(sentence.strip())
375
+
376
+ if not key_concepts or len(key_concepts) < 3:
377
+ # Ultimate fallback if no context
378
+ return [
379
+ {
380
+ "question": f"Which statement best describes {topic_title}?",
381
+ "options": [
382
+ "A core concept that requires understanding of fundamentals",
383
+ "An advanced technique used in specialized applications",
384
+ "A theoretical framework with practical implementations"
385
+ ],
386
+ "answer": "A core concept that requires understanding of fundamentals"
387
+ },
388
+ {
389
+ "question": f"What is the primary purpose of {topic_title}?",
390
+ "options": [
391
+ "To optimize performance and efficiency",
392
+ "To provide structure and organization",
393
+ "To enable complex problem solving"
394
+ ],
395
+ "answer": "To provide structure and organization"
396
+ },
397
+ {
398
+ "question": f"When should you apply {topic_title}?",
399
+ "options": [
400
+ "When dealing with large-scale systems",
401
+ "During the initial design phase",
402
+ "When specific requirements are identified"
403
+ ],
404
+ "answer": "When specific requirements are identified"
405
+ }
406
+ ]
407
+
408
+ # Generate questions from extracted concepts
409
+ fallback_quiz = []
410
+ for i, concept in enumerate(key_concepts[:3]):
411
+ # Create slight variations of the concept as distractors
412
+ words = concept.split()
413
+ if len(words) > 5:
414
+ # Create plausible wrong answers by modifying the concept
415
+ correct_answer = ' '.join(words[:15]) # First part as correct
416
+ distractor1 = ' '.join(words[2:10] + words[:2]) if len(words) > 10 else "Alternative interpretation of the concept"
417
+ distractor2 = ' '.join(words[5:15]) if len(words) > 15 else "Related but distinct concept"
418
+
419
+ fallback_quiz.append({
420
+ "question": f"Regarding {topic_title}, which statement is most accurate?",
421
+ "options": [correct_answer, distractor1, distractor2],
422
+ "answer": correct_answer
423
+ })
424
+
425
+ while len(fallback_quiz) < 3:
426
+ fallback_quiz.append({
427
+ "question": f"What is an important aspect of {topic_title}?",
428
+ "options": [
429
+ "Understanding the underlying principles",
430
+ "Memorizing specific implementation details",
431
+ "Following standard industry practices"
432
+ ],
433
+ "answer": "Understanding the underlying principles"
434
+ })
435
+
436
+ return fallback_quiz[:3]
437
+
438
+ # 2. Enhanced prompt for realistic quiz questions
439
+ prompt = f"""Create 3 challenging multiple choice questions about: {topic_title}
440
 
441
+ Context: {context_text}
442
+
443
+ CRITICAL REQUIREMENTS for answer choices:
444
+ 1. Make wrong answers (distractors) PLAUSIBLE and REALISTIC
445
+ 2. Use common misconceptions as wrong answers
446
+ 3. Make distractors similar enough that students need real understanding to choose correctly
447
+ 4. Avoid obviously wrong or silly options like "Option A", "Option B"
448
+ 5. Base all options on the actual context provided
449
+
450
+ Example of GOOD distractors (realistic and plausible):
451
+ Q: "What is encapsulation in OOP?"
452
+ - "Hiding implementation details and exposing only necessary interfaces" [CORRECT]
453
+ - "Combining data and methods that operate on that data into a single unit" [PLAUSIBLE - related to OOP but describes a class]
454
+ - "The ability of objects to take multiple forms through inheritance" [PLAUSIBLE - actually polymorphism]
455
+
456
+ Example of BAD distractors (too obvious):
457
+ - "A type of loop"
458
+ - "Option A"
459
+ - "None of the above"
460
+
461
+ Output as JSON array with 3 questions:
462
+ [
463
+ {{
464
+ "question": "Specific question text?",
465
+ "options": ["Realistic wrong answer 1", "Correct answer", "Realistic wrong answer 2"],
466
+ "answer": "Correct answer"
467
+ }},
468
+ ... (2 more questions)
469
+ ]
470
+
471
+ JSON:"""
472
+
473
  try:
474
+ response = llm.invoke(prompt)
475
+ clean_json = response.replace("```json", "").replace("```", "").strip()
476
+ import json
477
+ quiz_data = json.loads(clean_json)
 
 
478
 
479
+ # Ensure it's a list
480
+ if not isinstance(quiz_data, list):
481
+ raise ValueError("Quiz data must be a list")
 
 
 
482
 
483
+ # POST-PROCESSING: Ensure exactly 3 questions
484
+ if len(quiz_data) < 3:
485
+ print(f"⚠️ LLM only generated {len(quiz_data)} questions, padding with context-based questions...")
486
+ context_fallback = create_context_based_fallback()
487
+ # Add missing questions from fallback
488
+ questions_needed = 3 - len(quiz_data)
489
+ quiz_data.extend(context_fallback[:questions_needed])
490
+ elif len(quiz_data) > 3:
491
+ quiz_data = quiz_data[:3] # Trim to exactly 3
492
 
493
+ return quiz_data
 
 
494
 
 
 
 
 
 
 
 
 
495
  except Exception as e:
496
+ print(f"⚠️ Quiz Gen Failed: {e}. Using context-based fallback...")
497
+ # Return context-based fallback instead of generic placeholders
498
+ return create_context_based_fallback()