Spaces:
Sleeping
Sleeping
| import os | |
| import openai | |
| client = openai.OpenAI( | |
| base_url="https://api.groq.com/openai/v1", | |
| api_key=os.getenv("GROQ_API_KEY") | |
| ) | |
| def generate_ques(theme): | |
| ques_prompt = ( | |
| f"Generate a unique, factually accurate quiz question on the theme of {theme}. Make the question as innovative as possible so that it doesn't get repeated.\n" | |
| f"Ensure that:\n" | |
| f"1. The question is factually accurate, unique, and not similar to previous questions.\n" | |
| f"2. All options are clearly labeled as A, B, C, and D.\n" | |
| f"3. The correct answer is accurate and clearly marked.\n" | |
| f"4. Assign a difficulty level (Easy, Medium, or Hard) to the question.\n" | |
| f" Evaluate the difficulty using the following weighted criteria (total score out of 10):\n" | |
| f" - Concept Complexity (30%):\n" | |
| f" * 1 = Basic, 2 = Intermediate, 3 = Advanced\n" | |
| f" - Application Level (25%):\n" | |
| f" * 1 = Pure recall, 2 = Application-based, 3 = Higher-order thinking\n" | |
| f" - Ambiguity (15%):\n" | |
| f" * 1 = Clear and straightforward, 2 = Moderate trickiness, 3 = Ambiguous or misleading\n" | |
| f" - Time Requirement (15%):\n" | |
| f" * 1 = <15 seconds, 2 = 15β45 seconds, 3 = >45 seconds to solve\n" | |
| f" - Prerequisite Knowledge (15%):\n" | |
| f" * 1 = None or very basic, 2 = Requires some prior understanding, 3 = Requires strong foundational knowledge\n" | |
| f" Calculate the total weighted score and assign difficulty as follows:\n" | |
| f" * 1.0 - 3.5 = Easy\n" | |
| f" * 3.6 - 6.5 = Medium\n" | |
| f" * 6.6 - 10.0 = Hard\n\n" | |
| f"Format the response exactly as follows:\n" | |
| f"Question: [Your question here]\n" | |
| f"A) [Option A]\n" | |
| f"B) [Option B]\n" | |
| f"C) [Option C]\n" | |
| f"D) [Option D]\n" | |
| f"Correct answer: [Correct option letter]\n" | |
| f"Difficulty level: [Easy/Medium/Hard]\n\n" | |
| f"total weighted score: [Total score out of 10]\n\n" | |
| f"Do not include any explanations or comments outside of this format." | |
| ) | |
| response = client.chat.completions.create( | |
| model="llama-3.3-70b-versatile", | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": "You are a helpful assistant who generates quizzes." | |
| }, | |
| { | |
| "role": "user", | |
| "content": ques_prompt | |
| } | |
| ], | |
| temperature=0.5, | |
| max_tokens=1024, | |
| top_p=1, | |
| stop=None, | |
| stream=False | |
| ) | |
| return response.choices[0].message.content | |
| #theme = "History" | |
| #print(generate_ques(theme)) | |