aki-008 commited on
Commit
48f5789
·
1 Parent(s): b86abde

chore: Improved prompt and better llm

Browse files
Backend/app/api/v1/endpoints/prompts.py CHANGED
@@ -1,28 +1,43 @@
1
  SYSTEM_PROMPT = """
2
  You are an AI question-generation agent.
3
- Your task is to generate a batch of 10 high-quality MCQ questions strictly based on the following inputs:
4
 
5
- - {parsed_info}
6
  - {user_prompt}
 
7
  - {retrieved_docs}
8
 
9
- -----------------------
 
 
 
 
 
 
 
 
 
 
 
 
10
  GENERATION RULES
11
- -----------------------
12
- 1. Strictly follow the user_prompt instructions without deviation.
13
  2. Generate exactly 20 MCQs.
14
- 3. Use only information from the provided inputs.
15
- 4. Each question must be unambiguous, factual, and supported by the given data.
16
- 5. Each MCQ MUST have exactly four options.
 
 
 
17
  6. Only one correct answer is allowed.
18
- 7. Explanations must be short and directly justify the answer.
19
- 8. `User_response` must ALWAYS remain an empty string.
20
- 9. Output MUST be a valid JSON array containing 10 objects.
21
- 10. Output MUST contain nothing except the JSON array (no commentary or markdown).
22
 
23
- -----------------------
24
  REQUIRED JSON FORMAT FOR EACH QUESTION
25
- -----------------------
26
  {{
27
  "question": "Which of the following CLI command can also be used to rename files?",
28
  "options": [
@@ -36,17 +51,19 @@ REQUIRED JSON FORMAT FOR EACH QUESTION
36
  "User_response": ""
37
  }}
38
 
39
- -----------------------
40
  ANSWER KEY RULES
41
- -----------------------
42
- - 'a' -> options[0]
43
- - 'b' -> options[1]
44
- - 'c' -> options[2]
45
- - 'd' -> options[3]
46
 
47
  Strictly follow the JSON structure and generate exactly 10 MCQs.
48
  """
49
 
 
 
50
  Interviewer_prompt = """
51
  You are an expert technical interviewer conducting an interview for the role of {job_role}.
52
  The candidate has {experience} years of experience.
 
1
  SYSTEM_PROMPT = """
2
  You are an AI question-generation agent.
3
+ Your task is to generate a batch of high-quality MCQ questions strictly based on the following inputs:
4
 
 
5
  - {user_prompt}
6
+ - {parsed_info}
7
  - {retrieved_docs}
8
 
9
+ -------------------------------------------------
10
+ NON-NEGOTIABLE LOGIC RULES
11
+ -------------------------------------------------
12
+ 1. Always follow the user_prompt strictly.
13
+ 2. Before generating questions, you MUST analyze parsed_info:
14
+ - If parsed_info is a resume: generate MCQs that test the user's knowledge of the skills, tools, technologies, and topics mentioned in the resume. Do NOT mention names or personal details.
15
+ - If parsed_info is notes: generate MCQs that test the user's understanding of the concepts and topics covered in the notes.
16
+ 3. retrieved_docs MUST also be used while constructing the quiz.
17
+ 4. The purpose of the quiz is to *evaluate knowledge* related to the topics present in the parsed document.
18
+ 5. Never include or refer to user names or personal identifiers.
19
+ 6. All rules here are mandatory and non-negotiable.
20
+
21
+ -------------------------------------------------
22
  GENERATION RULES
23
+ -------------------------------------------------
24
+ 1. Follow the user_prompt strictly without exception.
25
  2. Generate exactly 20 MCQs.
26
+ 3. Use ONLY information from:
27
+ - user_prompt
28
+ - parsed_info
29
+ - retrieved_docs
30
+ 4. Each question must be factual, unambiguous, and directly supported by the provided data.
31
+ 5. Each MCQ MUST contain exactly four options.
32
  6. Only one correct answer is allowed.
33
+ 7. Explanations must be short and justify the answer directly.
34
+ 8. "User_response" must ALWAYS remain an empty string.
35
+ 9. Output MUST be a valid JSON array containing exactly 10 MCQ objects.
36
+ 10. Output MUST contain ONLY the JSON array no extra text, no markdown, no comments.
37
 
38
+ -------------------------------------------------
39
  REQUIRED JSON FORMAT FOR EACH QUESTION
40
+ -------------------------------------------------
41
  {{
42
  "question": "Which of the following CLI command can also be used to rename files?",
43
  "options": [
 
51
  "User_response": ""
52
  }}
53
 
54
+ -------------------------------------------------
55
  ANSWER KEY RULES
56
+ -------------------------------------------------
57
+ - 'a' corresponds to options[0]
58
+ - 'b' corresponds to options[1]
59
+ - 'c' corresponds to options[2]
60
+ - 'd' corresponds to options[3]
61
 
62
  Strictly follow the JSON structure and generate exactly 10 MCQs.
63
  """
64
 
65
+
66
+
67
  Interviewer_prompt = """
68
  You are an expert technical interviewer conducting an interview for the role of {job_role}.
69
  The candidate has {experience} years of experience.
Backend/app/llm.py CHANGED
@@ -16,7 +16,7 @@ async def call_llm(prompt:str):
16
  try:
17
  response = await client.chat.completions.create(
18
  # CRUCIAL: Use the LiteLLM format: 'gemini/gemini-2.5-pro'
19
- model="openai/gpt-oss-20b",
20
  messages=[
21
  {"role": "user", "content": prompt}
22
  ],
@@ -91,7 +91,7 @@ async def stream_chat(messages: List[dict], context: str, retrieved_docs: str |
91
  try:
92
  # Ensure 'client' is initialized before this function in your code
93
  stream = await client.chat.completions.create(
94
- model="openai/gpt-oss-20b",
95
  messages=full_history,
96
  temperature=0.7,
97
  stream=True
 
16
  try:
17
  response = await client.chat.completions.create(
18
  # CRUCIAL: Use the LiteLLM format: 'gemini/gemini-2.5-pro'
19
+ model="openai/gpt-oss-120b",
20
  messages=[
21
  {"role": "user", "content": prompt}
22
  ],
 
91
  try:
92
  # Ensure 'client' is initialized before this function in your code
93
  stream = await client.chat.completions.create(
94
+ model="openai/gpt-oss-120b",
95
  messages=full_history,
96
  temperature=0.7,
97
  stream=True