Spaces:
Sleeping
Sleeping
File size: 3,997 Bytes
217abc3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | from typing import List, Optional, Union
from pydantic import BaseModel, Field
class MultipleChoiceOption(BaseModel):
"""Model for a multiple choice option."""
option_text: str = Field(description="Text of the option")
is_correct: bool = Field(description="Whether this option is correct")
feedback: str = Field(description="Feedback for this option")
class MultipleChoiceQuestion(BaseModel):
"""Model for a multiple choice question."""
id: int = Field(description="Unique identifier for the question")
question_text: str = Field(description="Text of the question")
options: List[MultipleChoiceOption] = Field(description="List of options for the question")
learning_objective_id: int = Field(description="ID of the learning objective this question addresses")
learning_objective: str = Field(description="Learning objective this question addresses")
correct_answer: str = Field(description="Correct answer to the question")
source_reference: Union[List[str], str] = Field(description="Paths to the files from which this question was extracted")
judge_feedback: Optional[str] = Field(None, description="Feedback from the LLM judge")
approved: Optional[bool] = Field(None, description="Whether this question has been approved by the LLM judge")
class RankedNoGroupMultipleChoiceQuestion(MultipleChoiceQuestion):
"""Model for a multiple choice question that has been ranked but not grouped."""
rank: int = Field(description="Rank assigned to the question (1 = best)")
ranking_reasoning: str = Field(description="Reasoning for the assigned rank")
class RankedMultipleChoiceQuestion(MultipleChoiceQuestion):
"""Model for a multiple choice question that has been ranked."""
rank: int = Field(description="Rank assigned to the question (1 = best)")
ranking_reasoning: str = Field(description="Reasoning for the assigned rank")
in_group: bool = Field(description="Whether this question is part of a group of similar questions")
group_members: List[int] = Field(description="IDs of questions in the same group")
best_in_group: bool = Field(description="Whether this is the best question in its group")
class GroupedMultipleChoiceQuestion(MultipleChoiceQuestion):
"""Model for a multiple choice question that has been grouped but not ranked."""
in_group: bool = Field(description="Whether this question is part of a group of similar questions")
group_members: List[int] = Field(description="IDs of questions in the same group")
best_in_group: bool = Field(description="Whether this is the best question in its group")
class MultipleChoiceQuestionFromFeedback(BaseModel):
"""Model for a multiple choice question."""
id: int = Field(description="Unique identifier for the question")
question_text: str = Field(description="Text of the question")
options: List[MultipleChoiceOption] = Field(description="List of options for the question")
learning_objective: str = Field(description="Learning objective this question addresses")
source_reference: Union[List[str], str] = Field(description="Paths to the files from which this question was extracted")
feedback: str = Field(description="User criticism for this question, this will be found at the bottom of <QUESTION FOLLOWED BY USER CRITICISM> and it is a criticism of something which suggests a change.")
# Response models for questions
class RankedNoGroupMultipleChoiceQuestionsResponse(BaseModel):
ranked_questions: List[RankedNoGroupMultipleChoiceQuestion] = Field(description="List of ranked multiple choice questions without grouping")
class RankedMultipleChoiceQuestionsResponse(BaseModel):
ranked_questions: List[RankedMultipleChoiceQuestion] = Field(description="List of ranked multiple choice questions")
class GroupedMultipleChoiceQuestionsResponse(BaseModel):
grouped_questions: List[GroupedMultipleChoiceQuestion] = Field(description="List of grouped multiple choice questions") |