quiz-generator-v3 / models /questions.py
ecuartasm's picture
Initial commit: AI Course Assessment Generator
217abc3
from typing import List, Optional, Union
from pydantic import BaseModel, Field
class MultipleChoiceOption(BaseModel):
"""Model for a multiple choice option."""
option_text: str = Field(description="Text of the option")
is_correct: bool = Field(description="Whether this option is correct")
feedback: str = Field(description="Feedback for this option")
class MultipleChoiceQuestion(BaseModel):
"""Model for a multiple choice question."""
id: int = Field(description="Unique identifier for the question")
question_text: str = Field(description="Text of the question")
options: List[MultipleChoiceOption] = Field(description="List of options for the question")
learning_objective_id: int = Field(description="ID of the learning objective this question addresses")
learning_objective: str = Field(description="Learning objective this question addresses")
correct_answer: str = Field(description="Correct answer to the question")
source_reference: Union[List[str], str] = Field(description="Paths to the files from which this question was extracted")
judge_feedback: Optional[str] = Field(None, description="Feedback from the LLM judge")
approved: Optional[bool] = Field(None, description="Whether this question has been approved by the LLM judge")
class RankedNoGroupMultipleChoiceQuestion(MultipleChoiceQuestion):
"""Model for a multiple choice question that has been ranked but not grouped."""
rank: int = Field(description="Rank assigned to the question (1 = best)")
ranking_reasoning: str = Field(description="Reasoning for the assigned rank")
class RankedMultipleChoiceQuestion(MultipleChoiceQuestion):
"""Model for a multiple choice question that has been ranked."""
rank: int = Field(description="Rank assigned to the question (1 = best)")
ranking_reasoning: str = Field(description="Reasoning for the assigned rank")
in_group: bool = Field(description="Whether this question is part of a group of similar questions")
group_members: List[int] = Field(description="IDs of questions in the same group")
best_in_group: bool = Field(description="Whether this is the best question in its group")
class GroupedMultipleChoiceQuestion(MultipleChoiceQuestion):
"""Model for a multiple choice question that has been grouped but not ranked."""
in_group: bool = Field(description="Whether this question is part of a group of similar questions")
group_members: List[int] = Field(description="IDs of questions in the same group")
best_in_group: bool = Field(description="Whether this is the best question in its group")
class MultipleChoiceQuestionFromFeedback(BaseModel):
"""Model for a multiple choice question."""
id: int = Field(description="Unique identifier for the question")
question_text: str = Field(description="Text of the question")
options: List[MultipleChoiceOption] = Field(description="List of options for the question")
learning_objective: str = Field(description="Learning objective this question addresses")
source_reference: Union[List[str], str] = Field(description="Paths to the files from which this question was extracted")
feedback: str = Field(description="User criticism for this question, this will be found at the bottom of <QUESTION FOLLOWED BY USER CRITICISM> and it is a criticism of something which suggests a change.")
# Response models for questions
class RankedNoGroupMultipleChoiceQuestionsResponse(BaseModel):
ranked_questions: List[RankedNoGroupMultipleChoiceQuestion] = Field(description="List of ranked multiple choice questions without grouping")
class RankedMultipleChoiceQuestionsResponse(BaseModel):
ranked_questions: List[RankedMultipleChoiceQuestion] = Field(description="List of ranked multiple choice questions")
class GroupedMultipleChoiceQuestionsResponse(BaseModel):
grouped_questions: List[GroupedMultipleChoiceQuestion] = Field(description="List of grouped multiple choice questions")