NavyDevilDoc commited on
Commit
6fa9fd5
·
verified ·
1 Parent(s): 174e731

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +17 -20
src/app.py CHANGED
@@ -481,35 +481,32 @@ with tab3:
481
  else:
482
  valid_question_found = False
483
  attempts = 0
 
 
484
  while not valid_question_found and attempts < 5:
485
  attempts += 1
486
- # Pass the focus topic here!
487
  q_ctx = quiz.get_document_context(st.session_state.username, topic_filter=focus_topic)
488
 
 
 
 
 
 
489
  if q_ctx:
490
  prompt = quiz.construct_question_generation_prompt(q_ctx["context_text"])
491
- response_text, usage = query_model_universal([{"role": "user", "content": prompt}], 300, model_choice, st.session_state.get("user_openai_key"))
492
 
493
- # PARSE OUTPUT
494
- if "UNABLE" not in response_text and "QUOTE:" in response_text:
495
- # Split into Question and Quote
496
- parts = response_text.split("QUOTE:")
497
- q_text = parts[0].replace("QUESTION:", "").strip()
498
- quote_text = parts[1].strip()
499
-
500
- # SAFETY CHECK: Is the quote actually in the text?
501
- # We use a loose check (in case of minor whitespace diffs)
502
- # We take the first 20 chars of the quote to verify location
503
- if quote_text[:20] in q_ctx["context_text"]:
504
- valid_question_found = True
505
- qs["active"] = True
506
- qs["question_data"] = q_ctx
507
- qs["generated_question_text"] = q_text
508
- qs["feedback"] = None
509
 
510
  if not valid_question_found:
511
- if focus_topic: st.warning(f"No documents found containing '{focus_topic}'. Try a different keyword.")
512
- else: st.warning("Could not generate a question. Documents may be too sparse.")
 
 
 
 
 
513
 
514
  if st.session_state.quiz_trigger:
515
  st.session_state.quiz_trigger = False
 
481
  else:
482
  valid_question_found = False
483
  attempts = 0
484
+ last_error = None
485
+
486
  while not valid_question_found and attempts < 5:
487
  attempts += 1
 
488
  q_ctx = quiz.get_document_context(st.session_state.username, topic_filter=focus_topic)
489
 
490
+ # ERROR HANDLING SWITCH
491
+ if q_ctx and "error" in q_ctx:
492
+ last_error = q_ctx["error"]
493
+ break # Stop trying, we know why it failed (e.g., Topic not found)
494
+
495
  if q_ctx:
496
  prompt = quiz.construct_question_generation_prompt(q_ctx["context_text"])
497
+ question_text, usage = query_model_universal([{"role": "user", "content": prompt}], 300, model_choice, st.session_state.get("user_openai_key"))
498
 
499
+ if "UNABLE" not in question_text and len(question_text) > 10:
500
+ valid_question_found = True; qs["active"] = True; qs["question_data"] = q_ctx; qs["generated_question_text"] = question_text; qs["feedback"] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501
 
502
  if not valid_question_found:
503
+ # SPECIFIC USER FEEDBACK
504
+ if last_error == "topic_not_found":
505
+ st.warning(f"Could not find any documents containing the topic: **'{focus_topic}'**. \n\nCheck your spelling or try a broader term.")
506
+ elif focus_topic:
507
+ st.warning(f"Found documents with '{focus_topic}', but the AI struggled to form a question. Try again or check the document content.")
508
+ else:
509
+ st.warning("Could not generate a question. Your 'Knowledge & Tools' local cache might be empty or corrupted. \n\n**Try clicking '🔄 Resync from Pinecone' in Tab 2.**")
510
 
511
  if st.session_state.quiz_trigger:
512
  st.session_state.quiz_trigger = False