quamble / test.py
raj-tomar001's picture
Upload 20 files
ed363f8 verified
import os
import openai
import logging
import re
client = openai.OpenAI(
base_url="https://api.groq.com/openai/v1",
api_key=os.getenv("GROQ_API_KEY")
)
# Function to generate a question
def generate_ques(theme):
ques_prompt = (
f"Generate a unique, factually accurate quiz question on the theme of {theme}.\n"
f"Ensure that:\n"
f"1. The question is factually accurate, unique, and not similar to previous questions.\n"
f"2. All options are clearly labeled as A, B, C, and D.\n"
f"3. The correct answer is accurate and clearly marked.\n"
f"4. Assign a difficulty level (Easy, Medium, or Hard) to the question.\n"
f" Evaluate the difficulty using the following weighted criteria (total score out of 10):\n"
f" - Concept Complexity (30%)\n"
f" - Application Level (25%)\n"
f" - Ambiguity (15%)\n"
f" - Time Requirement (15%)\n"
f" - Prerequisite Knowledge (15%)\n"
f"Format the response exactly as follows:\n"
f"Question: [Your question here]\n"
f"A) [Option A]\n"
f"B) [Option B]\n"
f"C) [Option C]\n"
f"D) [Option D]\n"
f"Correct answer: [Correct option letter]\n"
f"Difficulty level: [Easy/Medium/Hard]\n"
f"total weighted score: [Total score out of 10]\n\n"
f"Do not include any explanations or comments outside of this format."
)
response = client.chat.completions.create(
model="llama-3.3-70b-versatile",
messages=[
{"role": "system", "content": "You are a helpful assistant who generates quizzes."},
{"role": "user", "content": ques_prompt}
],
temperature=0.5,
max_tokens=1024,
top_p=1
)
return response.choices[0].message.content.strip()
# Function to verify a generated question
def verify_question(full_question_text):
try:
# Extract entire quiz block and correct answer
correct_answer_match = re.search(r"Correct answer:\s*([A-D])", full_question_text)
difficulty_match = re.search(r"Difficulty level:\s*(Easy|Medium|Hard)", full_question_text)
if not correct_answer_match:
return "invalid", "Missing correct answer or required structure.", "N/A"
correct_option = correct_answer_match.group(1).strip()
difficulty = difficulty_match.group(1).strip() if difficulty_match else "N/A"
# Pass the entire question to the model
verification_prompt = (
f"Verify the following quiz question for:\n"
f"- Factual accuracy of the question must be correct .\n"
f"- There should be no spelling or grammatical errors.\n"
f"-The question should be unique.\n"
f"- There should be no ambiguity in the question.\n"
f"- Proper formatting (includes labels: Question, A-D options, Correct answer)\n"
f"- Reject if:\n"
f" - The format is incorrect or incomplete\n"
f" - The correct answer is factually wrong\n"
f"- There is a spelling or grammatical mistake\n"
f"- The question is not unique\n"
f"- The question is ambiguous\n"
f"\n{full_question_text}\n"
f"\nRespond only in this format:\n"
f"Validation: [valid/invalid]\n"
f"Feedback: [Feedback message or N/A]\n"
f"Difficulty: [Easy/Medium/Hard]"
)
response = openai.ChatCompletion.create(
model="llama-3.3-70b-versatile",
messages=[
{"role": "system", "content": "You are a quiz verification assistant."},
{"role": "user", "content": verification_prompt}
],
temperature=0.3,
max_tokens=512,
top_p=1
)
result = response['choices'][0]['message']['content'].strip()
lines = result.splitlines()
validation = lines[0].split(":", 1)[1].strip().lower()
feedback = lines[1].split(":", 1)[1].strip()
difficulty_returned = lines[2].split(":", 1)[1].strip()
return validation, feedback, difficulty_returned
except Exception as e:
logging.error(f"Error in verification: {e}")
return "error", "Exception occurred", "N/A"
# Driver code
if __name__ == "__main__":
theme = "science" # Change this theme as needed
generated_question = generate_ques(theme)
print("\nGenerated Question:\n", generated_question)
validation, feedback, difficulty = verify_question(generated_question)
print("\nVerification Result:")
print("Validation:", validation)
print("Feedback:", feedback)
print("Difficulty:", difficulty)