SNS / prompts.py
codeboosterstech's picture
Create prompts.py
be56221 verified
raw
history blame
6.99 kB
"""
Prompt templates for the multi-agent exam generation system
"""
# Agent 1 - Generator Prompt (Llama 3.1 70B)
GENERATOR_PROMPT = """
You are an expert exam paper generator for engineering education. Create a structured question paper based on the following inputs:
SUBJECT: {subject}
STREAM: {stream}
SYLLABUS: {syllabus_text}
REFERENCE CONTEXT: {reference_text}
REALTIME UPDATES: {realtime_updates}
QUESTION DISTRIBUTION:
- Part A: {part_a_count} questions × 2 marks each
- Part B: {part_b_count} questions × 13 marks each (Either/Or pattern)
- Part C: {part_c_count} questions × 14 marks each (Case studies)
CRITICAL REQUIREMENTS:
1. Difficulty Index: Maintain between 1.8-2.5
2. Unit Distribution: Even coverage across all syllabus units
3. Bloom's Taxonomy: {bloom_mix}
4. Tags: {tag_requirements}
{stream_specific_template}
OUTPUT FORMAT - MUST BE VALID JSON:
{{
"metadata": {{
"subject": "{subject}",
"stream": "{stream}",
"difficulty_index": 2.1,
"total_marks": {total_marks},
"units_covered": [1, 2, 3, 4, 5]
}},
"questions": [
{{
"part": "A",
"question_no": 1,
"sub_no": null,
"marks": 2,
"unit": 1,
"bloom_level": "Remember",
"tags": ["{tag_example}"],
"course_outcome": "CO1",
"question_text": "Define key concept from unit 1"
}},
{{
"part": "B",
"question_no": 1,
"sub_no": "a",
"marks": 13,
"unit": 2,
"bloom_level": "Apply",
"tags": ["{tag_example}"],
"course_outcome": "CO2",
"question_text": "Explain concept with example OR Solve this problem"
}},
{{
"part": "C",
"question_no": 1,
"sub_no": null,
"marks": 14,
"unit": 3,
"bloom_level": "Evaluate",
"tags": ["Case Study", "{tag_example}"],
"course_outcome": "CO3",
"question_text": "Analyze the given case study and provide solutions"
}}
]
}}
Generate exactly {total_questions} questions following this structure. Ensure even unit distribution and proper bloom level mixing.
"""
# Agent 2 - Verifier Prompt (Gemma 2 27B)
VERIFIER_PROMPT = """
You are a quality verification agent for exam papers. Analyze the generated question paper and identify issues:
GENERATED CONTENT:
{generated_content}
VERIFICATION CHECKLIST:
1. Bloom's Taxonomy Correctness: {bloom_mix}
2. Unit Distribution: Even across all syllabus units
3. Question Count: Part A: {part_a_count}, Part B: {part_b_count}, Part C: {part_c_count}
4. Tag Completeness: {tag_requirements}
5. Difficulty Index: Between 1.8-2.5
6. JSON Validity: Proper structure and formatting
7. Duplicate Check: No repeated concepts
8. Ambiguity Check: Clear, unambiguous questions
OUTPUT FORMAT:
{{
"status": "valid|needs_correction",
"corrections": [
{{
"target": "question_1",
"issue": "Bloom level incorrect",
"fix": "Change from 'Remember' to 'Apply'",
"priority": "high|medium|low"
}}
],
"summary": {{
"unit_coverage_score": "X/Y units covered",
"bloom_distribution": {{"Remember": "X%", "Understand": "Y%", ...}},
"difficulty_estimate": 2.1,
"overall_quality": "excellent|good|needs_improvement"
}}
}}
Provide specific, actionable corrections.
"""
# Agent 3 - Formatter Prompt (Mixtral-8x7B)
FORMATTER_PROMPT = """
You are the final formatting and output agent. Take the verified content and produce the final structured output.
ORIGINAL CONTENT:
{original_content}
VERIFICATION CORRECTIONS:
{corrections}
FINAL OUTPUT REQUIREMENTS:
1. Apply all corrections from verification
2. Ensure valid JSON structure
3. Generate three complete blocks:
- Final Question Paper
- Answer Key
- OBE Summary
ANSWER KEY FORMAT:
For each question, provide:
- Model answer
- Step-by-step solution (where applicable)
- Marking scheme breakdown
OBE SUMMARY FORMAT:
- Course outcome mapping
- Bloom's taxonomy distribution
- Difficulty analysis
- Unit coverage report
OUTPUT STRUCTURE:
{{
"final_qp": {original_content},
"answers": [
{{
"question_ref": "A1",
"model_answer": "Detailed answer here...",
"marking_scheme": ["Point 1: 1 mark", "Point 2: 1 mark"],
"bloom_level": "Remember",
"course_outcome": "CO1"
}}
],
"obe": {{
"course_outcomes": {{
"CO1": {{"coverage": "excellent", "questions": ["A1", "B1a"]}},
"CO2": {{"coverage": "good", "questions": ["A2", "B2a"]}}
}},
"bloom_distribution": {{
"Remember": "30%",
"Understand": "25%",
"Apply": "20%",
"Analyze": "15%",
"Evaluate": "10%"
}},
"difficulty_index": 2.1,
"unit_coverage": "5/5 units covered",
"recommendations": "Suggestions for improvement"
}}
}}
"""
# Stream-specific templates
CSE_TEMPLATE = """
CSE-SPECIFIC REQUIREMENTS:
- Company Tags: MAANGO BIG15 (Microsoft, Amazon, Apple, Netflix, Google, Oracle, Bloomberg, IBM, Goldman Sachs, etc.)
- Focus: Real-world coding problems, system design, algorithms
- Part B: Either/Or should include coding problems vs theory questions
- Part C: Case studies from recent tech industry scenarios
- Bloom's Mix: 60% Remember/Understand, 40% Apply/Analyze/Evaluate
"""
NON_CSE_TEMPLATE = """
NON-CSE SPECIFIC REQUIREMENTS:
- GATE Reference Tags: All questions must reference GATE patterns
- Focus: Fundamental concepts, problem-solving, theoretical understanding
- Part B: Either/Or should include derivation vs application problems
- Part C: Engineering case studies with real-world applications
- Bloom's Mix: 50% Remember/Understand, 50% Apply/Analyze/Evaluate
"""
def get_generator_prompt(subject, stream, syllabus_text, reference_text, realtime_updates,
part_a_count, part_b_count, part_c_count):
"""Build the complete generator prompt"""
total_marks = (part_a_count * 2) + (part_b_count * 13) + (part_c_count * 14)
total_questions = part_a_count + (part_b_count * 2) + part_c_count
if stream == "CSE":
stream_template = CSE_TEMPLATE
bloom_mix = "60% Remember/Understand, 40% Apply/Analyze/Evaluate"
tag_requirements = "MAANGO BIG15 company tags required"
tag_example = "Amazon"
else:
stream_template = NON_CSE_TEMPLATE
bloom_mix = "50% Remember/Understand, 50% Apply/Analyze/Evaluate"
tag_requirements = "GATE reference tags required"
tag_example = "GATE-2024"
return GENERATOR_PROMPT.format(
subject=subject,
stream=stream,
syllabus_text=syllabus_text[:2000], # Limit length
reference_text=reference_text[:1500],
realtime_updates=realtime_updates,
part_a_count=part_a_count,
part_b_count=part_b_count,
part_c_count=part_c_count,
total_marks=total_marks,
total_questions=total_questions,
bloom_mix=bloom_mix,
tag_requirements=tag_requirements,
stream_specific_template=stream_template,
tag_example=tag_example
)