chore: prompt improved
Browse files
Backend/app/api/v1/endpoints/prompts.py
CHANGED
|
@@ -9,15 +9,16 @@ Your task is to generate a batch of 10 high-quality MCQ questions strictly based
|
|
| 9 |
-----------------------
|
| 10 |
GENERATION RULES
|
| 11 |
-----------------------
|
| 12 |
-
1.
|
| 13 |
-
2.
|
| 14 |
-
3.
|
| 15 |
-
4. Each
|
| 16 |
-
5.
|
| 17 |
-
6.
|
| 18 |
-
7.
|
| 19 |
-
8.
|
| 20 |
-
9. Output MUST
|
|
|
|
| 21 |
|
| 22 |
-----------------------
|
| 23 |
REQUIRED JSON FORMAT FOR EACH QUESTION
|
|
@@ -44,4 +45,4 @@ ANSWER KEY RULES
|
|
| 44 |
- 'd' -> options[3]
|
| 45 |
|
| 46 |
Strictly follow the JSON structure and generate exactly 10 MCQs.
|
| 47 |
-
"""
|
|
|
|
| 9 |
-----------------------
|
| 10 |
GENERATION RULES
|
| 11 |
-----------------------
|
| 12 |
+
1. Strictly follow the user_prompt instructions without deviation.
|
| 13 |
+
2. Generate exactly 20 MCQs.
|
| 14 |
+
3. Use only information from the provided inputs.
|
| 15 |
+
4. Each question must be unambiguous, factual, and supported by the given data.
|
| 16 |
+
5. Each MCQ MUST have exactly four options.
|
| 17 |
+
6. Only one correct answer is allowed.
|
| 18 |
+
7. Explanations must be short and directly justify the answer.
|
| 19 |
+
8. `User_response` must ALWAYS remain an empty string.
|
| 20 |
+
9. Output MUST be a valid JSON array containing 10 objects.
|
| 21 |
+
10. Output MUST contain nothing except the JSON array (no commentary or markdown).
|
| 22 |
|
| 23 |
-----------------------
|
| 24 |
REQUIRED JSON FORMAT FOR EACH QUESTION
|
|
|
|
| 45 |
- 'd' -> options[3]
|
| 46 |
|
| 47 |
Strictly follow the JSON structure and generate exactly 10 MCQs.
|
| 48 |
+
"""
|
Backend/app/api/v1/endpoints/quiz.py
CHANGED
|
@@ -151,8 +151,8 @@ async def generate_quiz_notes(
|
|
| 151 |
|
| 152 |
async def prompt_builder(parsed_doc:str, user_prompt:str, docs:str=None):
|
| 153 |
prompt = SYSTEM_PROMPT.format(
|
| 154 |
-
parsed_info=parsed_doc,
|
| 155 |
user_prompt=user_prompt,
|
|
|
|
| 156 |
retrieved_docs=docs
|
| 157 |
)
|
| 158 |
return prompt
|
|
|
|
| 151 |
|
| 152 |
async def prompt_builder(parsed_doc:str, user_prompt:str, docs:str=None):
|
| 153 |
prompt = SYSTEM_PROMPT.format(
|
|
|
|
| 154 |
user_prompt=user_prompt,
|
| 155 |
+
parsed_info=parsed_doc,
|
| 156 |
retrieved_docs=docs
|
| 157 |
)
|
| 158 |
return prompt
|
Backend/app/llm.py
CHANGED
|
@@ -22,7 +22,7 @@ async def call_llm(prompt:str):
|
|
| 22 |
],
|
| 23 |
# Use the OpenAI parameter to request JSON output
|
| 24 |
response_format={"type": "json_object"},
|
| 25 |
-
temperature=0.
|
| 26 |
)
|
| 27 |
|
| 28 |
json_string = response.choices[0].message.content
|
|
|
|
| 22 |
],
|
| 23 |
# Use the OpenAI parameter to request JSON output
|
| 24 |
response_format={"type": "json_object"},
|
| 25 |
+
temperature=0.4,
|
| 26 |
)
|
| 27 |
|
| 28 |
json_string = response.choices[0].message.content
|