ayush2917 commited on
Commit
5b12667
Β·
verified Β·
1 Parent(s): d9e4dd0

Update app/services/orchestrator/build_question_set.py

Browse files
app/services/orchestrator/build_question_set.py CHANGED
@@ -4,13 +4,15 @@ import json
4
  import random
5
  import os
6
 
7
- from app.services.model_a_question_generator import generate_mcqs
8
- from app.services.model_b_answer_predictor import predict_answer
9
- from app.services.model_c_explanation_generator import generate_short_explanation
10
- from app.services.model_d_solution_generator import generate_solution # FIXED
11
 
12
  # Correct path to local_questions.json
13
- DATA_FILE = os.path.join(os.path.dirname(__file__), "..", "..", "data", "local_questions.json")
 
 
 
14
  DATA_FILE = os.path.abspath(DATA_FILE)
15
 
16
  with open(DATA_FILE, "r", encoding="utf-8") as f:
@@ -19,41 +21,36 @@ with open(DATA_FILE, "r", encoding="utf-8") as f:
19
 
20
  async def build_question_set(topic: str, num: int = 10):
21
  """
22
- Returns a question set with:
23
- - question
24
- - options
25
- - correct answer (from local or A/B/C/D)
26
- - short explanation (optional)
27
- - detailed solution (local only)
28
  """
29
 
30
- # STEP 1 β€” Get questions (local fallback or generated)
31
  if topic in LOCAL_DB["topics"]:
32
  question_pool = LOCAL_DB["topics"][topic]
33
- selected = random.sample(question_pool, min(num, len(question_pool)))
34
  else:
35
- # fallback (HF models)
36
- raw = await generate_mcqs(topic=topic, num=num)
37
- selected = raw
 
38
 
39
  question_set = []
40
 
41
- # STEP 2 β€” Build each question item
42
  for i, q in enumerate(selected):
43
 
44
- question_text = q["question"]
45
- options = q["options"]
46
- correct = q.get("answer", "") # might be empty for generated questions
47
 
48
- # STEP 3 β€” Predict answer if needed (uses model_b or empty)
49
- if not correct:
50
- predicted = await predict_answer(question_text, options)
51
- correct = predicted
52
 
53
- # STEP 4 β€” Short explanation (local or empty)
54
- short_exp = await generate_short_explanation(question_text, correct)
55
 
56
- # STEP 5 β€” Detailed solution (LOCAL ONLY) β€” FIXED
57
  detailed = await generate_solution(i + 1)
58
 
59
  question_set.append({
@@ -61,7 +58,7 @@ async def build_question_set(topic: str, num: int = 10):
61
  "question": question_text,
62
  "options": options,
63
  "answer": correct,
64
- "short_explanation": short_exp,
65
  "detailed_solution": detailed
66
  })
67
 
 
4
  import random
5
  import os
6
 
7
+ # No AI models β€” local mode
8
+ from app.services.model_d_solution_generator import generate_solution
9
+
 
10
 
11
  # Correct path to local_questions.json
12
+ DATA_FILE = os.path.join(
13
+ os.path.dirname(__file__),
14
+ "..", "..", "data", "local_questions.json"
15
+ )
16
  DATA_FILE = os.path.abspath(DATA_FILE)
17
 
18
  with open(DATA_FILE, "r", encoding="utf-8") as f:
 
21
 
22
  async def build_question_set(topic: str, num: int = 10):
23
  """
24
+ Offline-only question builder.
25
+ Uses:
26
+ - local_questions.json
27
+ - local_solutions.json
 
 
28
  """
29
 
30
+ # STEP 1 β€” Select topic or fallback to empty list
31
  if topic in LOCAL_DB["topics"]:
32
  question_pool = LOCAL_DB["topics"][topic]
 
33
  else:
34
+ return []
35
+
36
+ # Random selection
37
+ selected = random.sample(question_pool, min(num, len(question_pool)))
38
 
39
  question_set = []
40
 
41
+ # STEP 2 β€” Build objects
42
  for i, q in enumerate(selected):
43
 
44
+ # q is a STRING
45
+ question_text = q
 
46
 
47
+ # Because PDF options were not extracted
48
+ options = ["A", "B", "C", "D"]
 
 
49
 
50
+ # No answer key available
51
+ correct = None
52
 
53
+ # Load detailed solution from local_solutions.json
54
  detailed = await generate_solution(i + 1)
55
 
56
  question_set.append({
 
58
  "question": question_text,
59
  "options": options,
60
  "answer": correct,
61
+ "short_explanation": "",
62
  "detailed_solution": detailed
63
  })
64