import os import openai import logging client = openai.OpenAI( base_url="https://api.groq.com/openai/v1", api_key=os.getenv("GROQ_API_KEY") ) def verify_question(question, correct_option): verification_prompt = (f"Verify if the following question and correct option are appropriate:\n\n" f"Question: {question}\n" f"Correct Option: {correct_option}\n" f"Please respond with 'valid' only if the question and option are correct, " f"and provide feedback if they are not. " f"Also assign a difficulty level (Easy, Medium, or Hard) using these parameters:\n" f"- Concept Complexity\n" f"- Application Level\n" f"- Ambiguity\n" f"- Time Requirement\n" f"- Prerequisite Knowledge\n" f"Respond in the following format:\n" f"Validation: [valid/invalid]\n" f"Feedback: [Feedback message or N/A]\n" f"Difficulty: [Easy/Medium/Hard]\n") try: response = client.chat.completions.create( model="llama-3.3-70b-versatile", messages=[ {"role": "system", "content": "You are a quiz verification assistant."}, {"role": "user", "content": verification_prompt} ], temperature=0.5, max_tokens=1024, top_p=1, stop=None, stream=False ) result = response.choices[0].message.content.strip() lines = result.split(",") validation = lines[0].split(":")[1].strip().lower() feedback = lines[1].split(":")[1].strip() difficulty = lines[2].split(":")[1].strip() return validation, feedback, difficulty except Exception as e: logging.error(f"Error verifying question: {e}") return "error", "N/A", "N/A"